From a24ce04418992696c69bad7e10760b1f09805f4d Mon Sep 17 00:00:00 2001 From: kaleofduty <59616916+kaleofduty@users.noreply.github.com> Date: Fri, 10 Oct 2025 17:31:24 +0200 Subject: [PATCH] Improvements: - OCR3.1 progress - Minor fixes based on b7513f300c6593a015e560a4ceddc8fd89e234ed --- README.md | 2 +- go.mod | 29 +- go.sum | 55 +- internal/jmt/binary_tree.go | 114 + internal/jmt/digest.go | 39 + internal/jmt/hex_tree.go | 115 + internal/jmt/jmt.go | 797 +++++++ internal/jmt/nibbles.go | 101 + internal/jmt/persistence.go | 44 + internal/jmt/verify.go | 320 +++ internal/loghelper/taper.go | 22 +- internal/randmap/randmap.go | 74 + internal/ringbuffer/ringbuffer.go | 14 + internal/singlewriter/conflict_tracker.go | 43 + internal/singlewriter/overlay_transaction.go | 322 +++ .../singlewriter/serialized_transaction.go | 79 + .../singlewriter/unserialized_transaction.go | 60 + .../internal/ocrendpointv3/types/types.go | 31 - networking/ocr_endpoint_v2.go | 12 +- networking/ocr_endpoint_v3.go | 335 +-- networking/peer_group.go | 14 +- networking/peer_v2.go | 55 +- networking/ragedisco/ragep2p_discoverer.go | 13 +- networking/ragep2pwrapper/wrapper.go | 32 + networking/rageping/service.go | 20 +- networking/rageping/types.go | 4 +- .../internal/managed/limits/ocr3_1_limits.go | 182 +- .../internal/managed/managed_ocr3_1_oracle.go | 9 +- .../protocol/outcome_generation_follower.go | 3 +- .../internal/ocr3/protocol/signed_data.go | 6 +- .../ocr3_1/blobtypes/serialization.go | 86 + .../offchainreporting3_1_blobs.pb.go | 268 +++ .../blobtypes/serialization/serialization.go | 41 + .../internal/ocr3_1/blobtypes/types.go | 33 +- .../internal/ocr3_1/protocol/blob_endpoint.go | 155 +- .../internal/ocr3_1/protocol/blob_exchange.go | 1558 ++++++++++---- .../internal/ocr3_1/protocol/blob_reap.go | 121 ++ .../internal/ocr3_1/protocol/db.go | 10 - .../internal/ocr3_1/protocol/event.go | 100 +- .../internal/ocr3_1/protocol/kv.go | 89 - .../internal/ocr3_1/protocol/kvdb.go | 181 ++ .../internal/ocr3_1/protocol/message.go | 222 +- .../internal/ocr3_1/protocol/oracle.go | 89 +- .../ocr3_1/protocol/outcome_generation.go | 15 +- .../protocol/outcome_generation_follower.go | 431 ++-- .../protocol/outcome_generation_leader.go | 68 +- .../internal/ocr3_1/protocol/queue/queue.go | 68 - .../ocr3_1/protocol/report_attestation.go | 12 +- .../requestergadget/requester_gadget.go | 261 +++ .../protocol/requestergadget/shuffle.go | 52 + .../internal/ocr3_1/protocol/signed_data.go | 161 +- .../protocol/state_block_synchronization.go | 367 ---- .../ocr3_1/protocol/state_persistence.go | 378 ---- .../internal/ocr3_1/protocol/state_sync.go | 521 +++++ .../ocr3_1/protocol/state_sync_block.go | 385 ++++ .../protocol/state_sync_block_replay.go | 139 ++ .../protocol/state_sync_destroy_if_needed.go | 120 ++ .../ocr3_1/protocol/state_sync_reap.go | 216 ++ .../ocr3_1/protocol/state_sync_snapshot.go | 52 + .../ocr3_1/protocol/state_sync_tree.go | 589 ++++++ .../ocr3_1/protocol/state_sync_tree_ranges.go | 94 + .../protocol/state_tree_synchronization.go | 8 - .../internal/ocr3_1/protocol/types.go | 40 +- .../offchainreporting3_1_db.pb.go | 344 ++- .../offchainreporting3_1_jmt.pb.go | 444 ++++ .../offchainreporting3_1_messages.pb.go | 1868 +++++++++++------ .../ocr3_1/serialization/serialization.go | 438 +++- .../ocr3_1/serialization/serialization_jmt.go | 204 ++ .../internal/shim/metrics.go | 51 + .../internal/shim/ocr3_1_database.go | 54 - .../internal/shim/ocr3_1_key_value_store.go | 944 ++++++++- .../internal/shim/ocr3_1_reporting_plugin.go | 26 +- .../shim/ocr3_1_serializing_endpoint.go | 83 +- .../badger_key_value_database.go | 180 -- .../pebble_key_value_database.go | 260 +++ offchainreporting2plus/ocr3_1types/db.go | 26 +- .../ocr3_1types/{kv.go => kvdb.go} | 33 +- offchainreporting2plus/ocr3_1types/plugin.go | 234 ++- offchainreporting2plus/types/types.go | 6 + ragep2p/ragep2p.go | 18 +- ragep2p/ragep2pnew/doc.go | 127 ++ .../ragep2pnew/internal/demuxer/demuxer.go | 277 +++ ragep2p/ragep2pnew/internal/frame/frame.go | 321 +++ .../internal/internaltypes/internaltypes.go | 54 + ragep2p/ragep2pnew/internal/muxer/muxer.go | 272 +++ .../overheadawareconn/overhead_aware_conn.go | 111 + .../internal/ratelimit/ratelimit.go | 115 + .../ratelimitaggregator/aggregator.go | 94 + .../internal}/responselimit/checker.go | 20 +- .../internal}/responselimit/policies.go | 8 +- .../internal/stream2types/stream2types.go | 129 ++ ragep2p/ragep2pnew/loggers/logrus.go | 44 + ragep2p/ragep2pnew/metrics.go | 216 ++ ragep2p/ragep2pnew/ragep2p.go | 1599 ++++++++++++++ ragep2p/ragep2pnew/stream.go | 95 + ragep2p/ragep2pnew/stream2.go | 249 +++ ragep2p/ragep2pnew/tls_config.go | 22 + ragep2p/ragep2pnew/wrapper.go | 74 + ragep2p/types/types.go | 13 + ragep2p/wrapper.go | 74 + 100 files changed, 15442 insertions(+), 3561 deletions(-) create mode 100644 internal/jmt/binary_tree.go create mode 100644 internal/jmt/digest.go create mode 100644 internal/jmt/hex_tree.go create mode 100644 internal/jmt/jmt.go create mode 100644 internal/jmt/nibbles.go create mode 100644 internal/jmt/persistence.go create mode 100644 internal/jmt/verify.go create mode 100644 internal/randmap/randmap.go create mode 100644 internal/singlewriter/conflict_tracker.go create mode 100644 internal/singlewriter/overlay_transaction.go create mode 100644 internal/singlewriter/serialized_transaction.go create mode 100644 internal/singlewriter/unserialized_transaction.go delete mode 100644 networking/internal/ocrendpointv3/types/types.go create mode 100644 networking/ragep2pwrapper/wrapper.go create mode 100644 offchainreporting2plus/internal/ocr3_1/blobtypes/serialization.go create mode 100644 offchainreporting2plus/internal/ocr3_1/blobtypes/serialization/offchainreporting3_1_blobs.pb.go create mode 100644 offchainreporting2plus/internal/ocr3_1/blobtypes/serialization/serialization.go create mode 100644 offchainreporting2plus/internal/ocr3_1/protocol/blob_reap.go delete mode 100644 offchainreporting2plus/internal/ocr3_1/protocol/kv.go create mode 100644 offchainreporting2plus/internal/ocr3_1/protocol/kvdb.go delete mode 100644 offchainreporting2plus/internal/ocr3_1/protocol/queue/queue.go create mode 100644 offchainreporting2plus/internal/ocr3_1/protocol/requestergadget/requester_gadget.go create mode 100644 offchainreporting2plus/internal/ocr3_1/protocol/requestergadget/shuffle.go delete mode 100644 offchainreporting2plus/internal/ocr3_1/protocol/state_block_synchronization.go delete mode 100644 offchainreporting2plus/internal/ocr3_1/protocol/state_persistence.go create mode 100644 offchainreporting2plus/internal/ocr3_1/protocol/state_sync.go create mode 100644 offchainreporting2plus/internal/ocr3_1/protocol/state_sync_block.go create mode 100644 offchainreporting2plus/internal/ocr3_1/protocol/state_sync_block_replay.go create mode 100644 offchainreporting2plus/internal/ocr3_1/protocol/state_sync_destroy_if_needed.go create mode 100644 offchainreporting2plus/internal/ocr3_1/protocol/state_sync_reap.go create mode 100644 offchainreporting2plus/internal/ocr3_1/protocol/state_sync_snapshot.go create mode 100644 offchainreporting2plus/internal/ocr3_1/protocol/state_sync_tree.go create mode 100644 offchainreporting2plus/internal/ocr3_1/protocol/state_sync_tree_ranges.go delete mode 100644 offchainreporting2plus/internal/ocr3_1/protocol/state_tree_synchronization.go create mode 100644 offchainreporting2plus/internal/ocr3_1/serialization/offchainreporting3_1_jmt.pb.go create mode 100644 offchainreporting2plus/internal/ocr3_1/serialization/serialization_jmt.go delete mode 100644 offchainreporting2plus/keyvaluedatabase/badger_key_value_database.go create mode 100644 offchainreporting2plus/keyvaluedatabase/pebble_key_value_database.go rename offchainreporting2plus/ocr3_1types/{kv.go => kvdb.go} (65%) create mode 100644 ragep2p/ragep2pnew/doc.go create mode 100644 ragep2p/ragep2pnew/internal/demuxer/demuxer.go create mode 100644 ragep2p/ragep2pnew/internal/frame/frame.go create mode 100644 ragep2p/ragep2pnew/internal/internaltypes/internaltypes.go create mode 100644 ragep2p/ragep2pnew/internal/muxer/muxer.go create mode 100644 ragep2p/ragep2pnew/internal/overheadawareconn/overhead_aware_conn.go create mode 100644 ragep2p/ragep2pnew/internal/ratelimit/ratelimit.go create mode 100644 ragep2p/ragep2pnew/internal/ratelimitaggregator/aggregator.go rename {networking/internal/ocrendpointv3 => ragep2p/ragep2pnew/internal}/responselimit/checker.go (88%) rename {networking/internal/ocrendpointv3 => ragep2p/ragep2pnew/internal}/responselimit/policies.go (88%) create mode 100644 ragep2p/ragep2pnew/internal/stream2types/stream2types.go create mode 100644 ragep2p/ragep2pnew/loggers/logrus.go create mode 100644 ragep2p/ragep2pnew/metrics.go create mode 100644 ragep2p/ragep2pnew/ragep2p.go create mode 100644 ragep2p/ragep2pnew/stream.go create mode 100644 ragep2p/ragep2pnew/stream2.go create mode 100644 ragep2p/ragep2pnew/tls_config.go create mode 100644 ragep2p/ragep2pnew/wrapper.go create mode 100644 ragep2p/wrapper.go diff --git a/README.md b/README.md index 277dad01..7a17e9cc 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ Please see the whitepapers available at https://chainlinklabs.com/research for d - OCR1 is deprecated and being phased out. - OCR2 & OCR3 are in production. -- OCR3.1 is in alpha and excluded from any bug bounties at this time. So is the associated Key-Value-Database in `offchainreporting2plus/keyvaluedatabase/`. +- OCR3.1 is in alpha and excluded from any bug bounties at this time. So are the associated Key-Value-Database in `offchainreporting2plus/keyvaluedatabase/` and helpers in `internal/jmt`, `internal/randmap`, `internal/singlewriter`. ## Organization ``` diff --git a/go.mod b/go.mod index 348d5aee..061de091 100644 --- a/go.mod +++ b/go.mod @@ -5,14 +5,15 @@ go 1.24 toolchain go1.24.4 require ( - github.com/dgraph-io/badger/v4 v4.7.0 + github.com/cockroachdb/pebble v1.1.2 github.com/ethereum/go-ethereum v1.15.3 + github.com/google/btree v1.1.3 github.com/leanovate/gopter v0.2.11 github.com/mr-tron/base58 v1.2.0 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.14.0 github.com/sirupsen/logrus v1.9.3 - golang.org/x/crypto v0.36.0 + golang.org/x/crypto v0.41.0 google.golang.org/protobuf v1.36.6 ) @@ -29,7 +30,6 @@ require ( github.com/cockroachdb/errors v1.11.3 // indirect github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect - github.com/cockroachdb/pebble v1.1.2 // indirect github.com/cockroachdb/redact v1.1.5 // indirect github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/consensys/bavard v0.1.22 // indirect @@ -41,23 +41,19 @@ require ( github.com/deckarep/golang-set/v2 v2.6.0 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect github.com/deepmap/oapi-codegen v1.8.2 // indirect - github.com/dgraph-io/ristretto/v2 v2.2.0 // indirect - github.com/dustin/go-humanize v1.0.1 // indirect github.com/ethereum/c-kzg-4844 v1.0.3 // indirect github.com/ethereum/go-verkle v0.2.2 // indirect github.com/ferranbt/fastssz v0.1.2 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect github.com/getsentry/sentry-go v0.27.0 // indirect - github.com/go-logr/logr v1.4.2 // indirect - github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/gofrs/flock v0.8.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.1 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect - github.com/google/flatbuffers v25.2.10+incompatible // indirect + github.com/google/go-cmp v0.7.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.4.2 // indirect github.com/graph-gophers/graphql-go v1.3.0 // indirect @@ -100,22 +96,21 @@ require ( github.com/rs/cors v1.7.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect + github.com/smartcontractkit/go-sumtype2 v0.0.0-20250903174514-31585731b5a3 // indirect github.com/supranational/blst v0.3.14 // indirect github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/urfave/cli/v2 v2.27.5 // indirect github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect - go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel v1.35.0 // indirect - go.opentelemetry.io/otel/metric v1.35.0 // indirect - go.opentelemetry.io/otel/trace v1.35.0 // indirect golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f // indirect - golang.org/x/net v0.38.0 // indirect - golang.org/x/sync v0.12.0 // indirect - golang.org/x/sys v0.31.0 // indirect - golang.org/x/text v0.23.0 // indirect + golang.org/x/mod v0.27.0 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/sync v0.16.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/text v0.28.0 // indirect golang.org/x/time v0.9.0 // indirect + golang.org/x/tools v0.36.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect @@ -123,3 +118,5 @@ require ( ) replace github.com/leanovate/gopter => github.com/leanovate/gopter v0.2.10-0.20210127095200-9abe2343507a + +tool github.com/smartcontractkit/go-sumtype2 diff --git a/go.sum b/go.sum index 78a1ff4c..8c571ddf 100644 --- a/go.sum +++ b/go.sum @@ -56,15 +56,7 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3 github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M= github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU= github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw= -github.com/dgraph-io/badger/v4 v4.7.0 h1:Q+J8HApYAY7UMpL8d9owqiB+odzEc0zn/aqOD9jhc6Y= -github.com/dgraph-io/badger/v4 v4.7.0/go.mod h1:He7TzG3YBy3j4f5baj5B7Zl2XyfNe5bl4Udl0aPemVA= -github.com/dgraph-io/ristretto/v2 v2.2.0 h1:bkY3XzJcXoMuELV8F+vS8kzNgicwQFAaGINAEJdWGOM= -github.com/dgraph-io/ristretto/v2 v2.2.0/go.mod h1:RZrm63UmcBAaYWC1DotLYBmTvgkrs0+XhBd7Npn7/zI= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da h1:aIftn67I1fkbMa512G+w+Pxci9hJPB8oMnkcP3iZF38= -github.com/dgryski/go-farm v0.0.0-20240924180020-3414d57e47da/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/ethereum/c-kzg-4844 v1.0.3 h1:IEnbOHwjixW2cTvKRUlAAUOeleV7nNM/umJR+qy4WDs= github.com/ethereum/c-kzg-4844 v1.0.3/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= github.com/ethereum/go-ethereum v1.15.3 h1:OeTWAq6r8iR89bfJDjmmOemE74ywArl9DUViFsVj3Y8= @@ -87,11 +79,6 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= @@ -117,8 +104,8 @@ github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= -github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q= -github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -264,6 +251,8 @@ github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible h1 github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/smartcontractkit/go-sumtype2 v0.0.0-20250903174514-31585731b5a3 h1:8DQfNbAPbN749k+XRi1tK+Ofur5eq0VHDypnwipqluw= +github.com/smartcontractkit/go-sumtype2 v0.0.0-20250903174514-31585731b5a3/go.mod h1:z89MmYNUz23UMWPgHr/26uYyeWJjTtlDvLS+xOS3sAs= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -275,8 +264,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/supranational/blst v0.3.14 h1:xNMoHRJOTwMn63ip6qoWJ2Ymgvj7E2b9jY2FAwY+qRo= github.com/supranational/blst v0.3.14/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= @@ -295,14 +284,6 @@ github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBi github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= -go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= -go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= -go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= -go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= -go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -311,14 +292,16 @@ golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= -golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= -golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= +golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f h1:99ci1mjWVBWwJiEKYY6jWa4d2nTQVIEhZIptnrVb1XY= golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= +golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -335,8 +318,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= -golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= -golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -345,8 +328,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= -golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -379,8 +362,8 @@ golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -396,8 +379,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= @@ -409,6 +392,8 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= +golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/internal/jmt/binary_tree.go b/internal/jmt/binary_tree.go new file mode 100644 index 00000000..807c0dd6 --- /dev/null +++ b/internal/jmt/binary_tree.go @@ -0,0 +1,114 @@ +package jmt + +import ( + "crypto/sha256" + "fmt" +) + +const ( + MaxBinaryTreeDepth = MaxHexTreeDepth * 4 + MaxProofLength = MaxBinaryTreeDepth +) + +var SparseMerklePlaceholderDigest = Digest([]byte("SPARSE_MERKLE_PLACEHOLDER_HASH__")) +var LeafDomainSeparator = []byte("JMT::LeafNode") +var InternalDomainSeparator = []byte("JMT::IntrnalNode") // sic + +func digestInternalBinary(leftChildDigest Digest, rightChildDigest Digest) Digest { + hash := sha256.New() + hash.Write(InternalDomainSeparator) + hash.Write(leftChildDigest[:]) + hash.Write(rightChildDigest[:]) + return Digest(hash.Sum(nil)) +} + +func digestLeafBinary(keyDigest Digest, valueDigest Digest) Digest { + hash := sha256.New() + hash.Write(LeafDomainSeparator) + hash.Write(keyDigest[:]) + hash.Write(valueDigest[:]) + return Digest(hash.Sum(nil)) +} + +type sparseMerkleNode struct { + isLeaf bool + digest Digest +} + +func (n sparseMerkleNode) String() string { + if n.isLeaf { + return fmt.Sprintf("Leaf(%x)", n.digest) + } + return fmt.Sprintf("Internal(%x)", n.digest) +} + +func sparseDigestInternalBinary(leftNode sparseMerkleNode, rightNode sparseMerkleNode) sparseMerkleNode { + if leftNode.digest == SparseMerklePlaceholderDigest && rightNode.digest == SparseMerklePlaceholderDigest { + return sparseMerkleNode{false, SparseMerklePlaceholderDigest} + } else if leftNode.isLeaf && rightNode.digest == SparseMerklePlaceholderDigest { + return leftNode + } else if leftNode.digest == SparseMerklePlaceholderDigest && rightNode.isLeaf { + return rightNode + } else { + return sparseMerkleNode{false, digestInternalBinary(leftNode.digest, rightNode.digest)} + } +} + +func evolveLayer(bottomLayer []sparseMerkleNode) []sparseMerkleNode { + if len(bottomLayer)%2 != 0 { + panic("") + } + + nextLayer := make([]sparseMerkleNode, 0, len(bottomLayer)/2) + + for i, j := 0, 1; j < len(bottomLayer); i, j = i+2, j+2 { + nextLayer = append(nextLayer, sparseDigestInternalBinary(bottomLayer[i], bottomLayer[j])) + } + return nextLayer +} + +func sparseMerkleTreeDigest(bottomLayer []sparseMerkleNode) Digest { + if len(bottomLayer) == 0 { + return SparseMerklePlaceholderDigest + } + if len(bottomLayer) == 1 { + return bottomLayer[0].digest + } + return sparseMerkleTreeDigest(evolveLayer(bottomLayer)) +} + +// Calling with a compactable i will return invalid proofs, because the node is really not part of the final tree. +func sparseMerkleProof(bottomLayer []sparseMerkleNode, i int) []Digest { + // find the bottommost layer where the node at index i remains after compactions + for len(bottomLayer) > 1 { + bottomLayerCandidate := evolveLayer(bottomLayer) + + if len(bottomLayerCandidate) > i/2 && bottomLayerCandidate[i/2] == bottomLayer[i] { + bottomLayer = bottomLayerCandidate + i = i / 2 + } else { + break + } + } + + if len(bottomLayer) <= 1 { + + return nil + } + + mid := len(bottomLayer) / 2 + + var ( + sibling Digest + descendantSiblings []Digest + ) + if i < mid { + sibling = sparseMerkleTreeDigest(bottomLayer[mid:]) + descendantSiblings = sparseMerkleProof(bottomLayer[:mid], i) + } else { // i >= mid + sibling = sparseMerkleTreeDigest(bottomLayer[:mid]) + descendantSiblings = sparseMerkleProof(bottomLayer[mid:], i-mid) + } + + return append(descendantSiblings, sibling) +} diff --git a/internal/jmt/digest.go b/internal/jmt/digest.go new file mode 100644 index 00000000..60aaf50f --- /dev/null +++ b/internal/jmt/digest.go @@ -0,0 +1,39 @@ +package jmt + +import ( + "bytes" + "crypto/sha256" +) + +type Digest = [sha256.Size]byte + +var ( + MinDigest = Digest{} + MaxDigest = Digest(bytes.Repeat([]byte{0xff}, len(Digest{}))) +) + +func DecrementDigest(digest Digest) (Digest, bool) { + decDigest := digest + for i := len(decDigest) - 1; i >= 0; i-- { + if decDigest[i] == 0 { + decDigest[i] = 0xff + } else { + decDigest[i]-- + return decDigest, true + } + } + return Digest{}, false +} + +func IncrementDigest(digest Digest) (Digest, bool) { + incDigest := digest + for i := len(incDigest) - 1; i >= 0; i-- { + if incDigest[i] == 0xff { + incDigest[i] = 0 + } else { + incDigest[i]++ + return incDigest, true + } + } + return Digest{}, false +} diff --git a/internal/jmt/hex_tree.go b/internal/jmt/hex_tree.go new file mode 100644 index 00000000..dc86d9d7 --- /dev/null +++ b/internal/jmt/hex_tree.go @@ -0,0 +1,115 @@ +package jmt + +import ( + "crypto/sha256" + "fmt" +) + +const MaxHexTreeDepth = len(Digest{}) * 2 + +// go-sumtype:decl Node + +type Node interface { + isNode() +} + +type InternalNode struct { + Children [16]*Child +} + +func (n *InternalNode) isNode() {} + +func (n *InternalNode) String() string { + return fmt.Sprintf("InternalNode{children: %v}", n.Children) +} + +type Child struct { + Version Version + Digest Digest + IsLeaf bool +} + +func (c *Child) String() string { + return fmt.Sprintf("Child{version: %d, digest: %x, isLeaf: %t}", c.Version, c.Digest, c.IsLeaf) +} + +type LeafNode struct { + KeyDigest Digest + Key []byte + ValueDigest Digest + Value []byte +} + +func (n *LeafNode) isNode() {} + +func (n *LeafNode) String() string { + return fmt.Sprintf("LeafNode{keyDigest: %x, valueDigest: %x, value: %x}", n.KeyDigest, n.ValueDigest, n.Value) +} + +type NodeKey struct { + Version Version + NibblePath NibblePath +} + +func (nk NodeKey) Equal(nk2 NodeKey) bool { + return nk.Version == nk2.Version && nk.NibblePath.Equal(nk2.NibblePath) +} + +func digestNode(node Node) Digest { + + if node == nil { + return SparseMerklePlaceholderDigest + } + + switch n := node.(type) { + case *InternalNode: + return digestInternalNode(n) + case *LeafNode: + return digestLeafNode(n) + default: + panic("") + } +} + +func hexInternalNodeToBinaryBottomLayer(node *InternalNode) []sparseMerkleNode { + bottomLayer := make([]sparseMerkleNode, len(node.Children)) + for i, child := range node.Children { + if child != nil { + bottomLayer[i] = sparseMerkleNode{ + child.IsLeaf, + child.Digest, + } + } else { + bottomLayer[i] = sparseMerkleNode{ + false, + SparseMerklePlaceholderDigest, + } + } + } + return bottomLayer +} + +func digestInternalNode(node *InternalNode) Digest { + return sparseMerkleTreeDigest(hexInternalNodeToBinaryBottomLayer(node)) +} + +func proveInternalNodeChild(node *InternalNode, childIndex int) []Digest { + bottomLayer := hexInternalNodeToBinaryBottomLayer(node) + return sparseMerkleProof(bottomLayer, childIndex) +} + +func digestLeafNode(node *LeafNode) Digest { + return digestLeafBinary(node.KeyDigest, node.ValueDigest) +} + +func DigestKey(key []byte) Digest { + hash := sha256.New() + hash.Write(key) + return Digest(hash.Sum(nil)) +} + +func DigestValue(value []byte) Digest { + hash := sha256.New() + hash.Write(value) + return Digest(hash.Sum(nil)) +} diff --git a/internal/jmt/jmt.go b/internal/jmt/jmt.go new file mode 100644 index 00000000..181a149d --- /dev/null +++ b/internal/jmt/jmt.go @@ -0,0 +1,797 @@ +package jmt + +import ( + "bytes" + "errors" + "fmt" + "math" + "slices" + "sort" +) + +type KeyValue struct { + // Key length must be greater than 0. + Key []byte + // Value of nil is permitted and indicates a desire to delete Key. + Value []byte +} + +type digestedKeyValue struct { + key []byte + keyDigest Digest + value []byte + valueDigest Digest +} + +func (dkv digestedKeyValue) String() string { + return fmt.Sprintf("%x->%x", dkv.keyDigest, dkv.value) +} + +// BatchUpdate performs the updates indicated by keyValueUpdates on top of the +// tree at oldVersion. If oldVersion coincides with newVersion, the updates are +// performed in place and no stale nodes are written. If oldVersion is less than +// newVersion, we perform copy-on-write, and stale nodes might be written that +// might need to be reaped in the future using ReapStaleNodes. Using an +// oldVersion that is greater than newVersion is an error. +// +// keyValueUpdates must be unique in Key. +// +// BatchUpdate returns the NodeKey of the new root node at newVersion. +func BatchUpdate( + rootReadWriter RootReadWriter, + nodeReadWriter NodeReadWriter, + staleNodeWriter StaleNodeWriter, + oldVersion Version, + newVersion Version, + keyValueUpdates []KeyValue, +) (NodeKey, error) { + if oldVersion > newVersion { + return NodeKey{}, fmt.Errorf("old version %d is greater than new version %d", oldVersion, newVersion) + } + + oldRootNodeKey, err := rootReadWriter.ReadRoot(oldVersion) + if err != nil { + return NodeKey{}, fmt.Errorf("error reading root node with version %d: %w", oldVersion, err) + } + + oldRootNode, err := nodeReadWriter.ReadNode(oldRootNodeKey) + if err != nil { + return NodeKey{}, fmt.Errorf("error reading root node with node key %v: %w", oldRootNodeKey, err) + } + + digestedInserts := make([]digestedKeyValue, 0, len(keyValueUpdates)) + digestedDeletes := make([]digestedKeyValue, 0, len(keyValueUpdates)) + + { + + seenDigestedKeys := make(map[Digest]struct{}, len(keyValueUpdates)) + + for i, keyValue := range keyValueUpdates { + if len(keyValue.Key) == 0 { + return NodeKey{}, fmt.Errorf("%d-th keyValueUpdate: key is empty", i) + } + + keyDigest := DigestKey(keyValue.Key) + if _, ok := seenDigestedKeys[keyDigest]; ok { + return NodeKey{}, fmt.Errorf("%d-th keyValueUpdate: duplicate key %v in keyValueUpdates", i, keyValue.Key) + } + seenDigestedKeys[keyDigest] = struct{}{} + + var valueDigest Digest + if keyValue.Value != nil { + valueDigest = DigestValue(keyValue.Value) + } + + dkv := digestedKeyValue{ + keyValue.Key, + keyDigest, + keyValue.Value, + valueDigest, + } + + if keyValue.Value == nil { + digestedDeletes = append(digestedDeletes, dkv) + } else { + digestedInserts = append(digestedInserts, dkv) + } + } + } + + // in order of leaf insertion + sort.Slice(digestedInserts, func(i, j int) bool { + return bytes.Compare(digestedInserts[i].keyDigest[:], digestedInserts[j].keyDigest[:]) < 0 + }) + sort.Slice(digestedDeletes, func(i, j int) bool { + return bytes.Compare(digestedDeletes[i].keyDigest[:], digestedDeletes[j].keyDigest[:]) < 0 + }) + + newRootNodeKey, _, err := batchUpdate( + nodeReadWriter, + staleNodeWriter, + newVersion, + oldRootNodeKey, + oldRootNode, + digestedInserts, + digestedDeletes, + ) + if err != nil { + return NodeKey{}, fmt.Errorf("error performing batch update %v: %w", keyValueUpdates, err) + } + + err = rootReadWriter.WriteRoot(newVersion, newRootNodeKey) + if err != nil { + return NodeKey{}, fmt.Errorf("error writing root node with node key %v: %w", newRootNodeKey, err) + } + + return newRootNodeKey, nil +} + +func filterSortedDigestedKeyValues( + sortedDigestedKeyValues []digestedKeyValue, + digestPrefix NibblePath, +) []digestedKeyValue { + matchIndex := digestPrefix.NumNibbles() - 1 + if matchIndex < 0 || matchIndex >= len(Digest{})*2 { + panic(fmt.Errorf("match index %v out of bounds", matchIndex)) + } + matchNibble := digestPrefix.Get(matchIndex) + + // assume filterSortedDigestedKeyValues has been recursively called with digestPrefix[:-1] to produce our input sortedDigestedKeyValues + firstMatchingIndex := sort.Search(len(sortedDigestedKeyValues), func(i int) bool { + return NibblePathFromDigest(sortedDigestedKeyValues[i].keyDigest).Get(matchIndex) >= matchNibble + }) + firstNonMatchingIndex := sort.Search(len(sortedDigestedKeyValues), func(i int) bool { + return NibblePathFromDigest(sortedDigestedKeyValues[i].keyDigest).Get(matchIndex) > matchNibble + }) + return sortedDigestedKeyValues[firstMatchingIndex:firstNonMatchingIndex] +} + +func batchUpdate( + nodeReadWriter NodeReadWriter, + staleNodeWriter StaleNodeWriter, + version Version, // version of new or altered nodes + rootNodeKey NodeKey, + rootNode Node, + sortedDigestedInserts []digestedKeyValue, + sortedDigestedDeletes []digestedKeyValue, +) (newRootNodeKey NodeKey, newRootNode Node, err error) { + oldNodeKey := rootNodeKey + oldNodeWasNil := rootNode == nil + replaceRootNode := func(newNodeKey NodeKey, newNode Node) error { + var err error + if !oldNodeWasNil { + // there is something to delete + if oldNodeKey.Version < version { + err = errors.Join(err, staleNodeWriter.WriteStaleNode(StaleNode{version, oldNodeKey})) + } else if oldNodeKey.Version == version { + err = errors.Join(err, nodeReadWriter.WriteNode(oldNodeKey, nil)) + } else { + return fmt.Errorf("assumption violation: old node key version %d is greater than new node key version %d", oldNodeKey.Version, version) + } + } + err = errors.Join(err, nodeReadWriter.WriteNode(newNodeKey, newNode)) + return err + } + + if len(sortedDigestedInserts) == 0 && len(sortedDigestedDeletes) == 0 { + return rootNodeKey, rootNode, nil + } + + // len(sortedDigestedInserts) >= 1 or len(sortedDigestedDeletes) >= 1 beyond this point + + if rootNode == nil { + // we can ignore deletions, because we can't delete nodes that don't exist + sortedDigestedDeletes = nil + + if len(sortedDigestedInserts) == 0 { + // nothing to do, nil -> nil + return rootNodeKey, rootNode, nil + } + + if len(sortedDigestedInserts) == 1 { + // nil -> leaf + dkv := sortedDigestedInserts[0] + leafNode := &LeafNode{ + dkv.keyDigest, + dkv.key, + dkv.valueDigest, + dkv.value, + } + leafNodeKey := NodeKey{ + version, + rootNodeKey.NibblePath, + } + err := replaceRootNode(leafNodeKey, leafNode) + if err != nil { + return NodeKey{}, nil, err + } + return leafNodeKey, leafNode, nil + } + + // nil -> internal + // more than one insertions, definitely need an internal node + + rootNode = &InternalNode{} // pretend it's an empty internal node + } + + if rootLeafNode, ok := rootNode.(*LeafNode); ok { + leafNodeKey := NodeKey{ + version, + rootNodeKey.NibblePath, + } + + if len(sortedDigestedInserts) == 1 { + dkv := sortedDigestedInserts[0] + if dkv.keyDigest == rootLeafNode.KeyDigest { + // leaf -> leaf + leafNode := &LeafNode{ + dkv.keyDigest, + dkv.key, + dkv.valueDigest, + dkv.value, + } + err := replaceRootNode(leafNodeKey, leafNode) + if err != nil { + return NodeKey{}, nil, err + } + return leafNodeKey, leafNode, nil + } + } + + // could contain many spurious deletes, so search for our leaf key digest + _, deleteRootLeafNode := sort.Find(len(sortedDigestedDeletes), func(i int) int { + return bytes.Compare(rootLeafNode.KeyDigest[:], sortedDigestedDeletes[i].keyDigest[:]) + }) + + if len(sortedDigestedInserts) == 0 { + if deleteRootLeafNode { + // leaf -> nil + err := replaceRootNode(leafNodeKey, nil) + if err != nil { + return NodeKey{}, nil, err + } + return NodeKey{}, nil, nil + } else { + // noop + return rootNodeKey, rootNode, nil + } + } + + // leaf -> internal + insertionPoint := sort.Search(len(sortedDigestedInserts), func(i int) bool { + return bytes.Compare(sortedDigestedInserts[i].keyDigest[:], rootLeafNode.KeyDigest[:]) >= 0 + }) + if !deleteRootLeafNode && (insertionPoint >= len(sortedDigestedInserts) || sortedDigestedInserts[insertionPoint].keyDigest != rootLeafNode.KeyDigest) { + // we didn't already have an update in mind for this leaf key digest + // carry it by inserting (in key digest sorted order) to sortedDigestedInserts + + carryDigestedKeyValue := digestedKeyValue{ + rootLeafNode.Key, + rootLeafNode.KeyDigest, + rootLeafNode.Value, + rootLeafNode.ValueDigest, + } + + sortedDigestedInserts = slices.Clone(sortedDigestedInserts) + sortedDigestedInserts = slices.Insert(sortedDigestedInserts, insertionPoint, carryDigestedKeyValue) + } + rootNode = &InternalNode{} // pretend it's an empty internal node + } + + if rootInternalNode, ok := rootNode.(*InternalNode); ok { + newChildrenNonNilCount := 0 + var ( + newChildrenLastLeafNodeKey NodeKey + newChildrenLastLeaf *LeafNode + ) + + internalNode := &InternalNode{} + + anyChildChanged := false + + for nibble := range 16 { + child := rootInternalNode.Children[nibble] + childNibblePath := rootNodeKey.NibblePath.Append(byte(nibble)) + + var ( + childNodeKey NodeKey + childNode Node + ) + if child == nil { + + childNodeKey = NodeKey{version, childNibblePath} + } else { + childNodeKey = NodeKey{child.Version, childNibblePath} + var err error + childNode, err = nodeReadWriter.ReadNode(childNodeKey) + if err != nil { + return NodeKey{}, nil, err + } + } + + sortedDigestedInsertsBelowNibble := filterSortedDigestedKeyValues(sortedDigestedInserts, childNibblePath) + sortedDigestedDeletesBelowNibble := filterSortedDigestedKeyValues(sortedDigestedDeletes, childNibblePath) + newChildNodeKey, newChildNode, err := batchUpdate( + nodeReadWriter, + staleNodeWriter, + version, + childNodeKey, + childNode, + sortedDigestedInsertsBelowNibble, + sortedDigestedDeletesBelowNibble, + ) + + if err != nil { + return NodeKey{}, nil, err + } + + if childNode != newChildNode { + + anyChildChanged = true + } + + if newChildNode == nil { + internalNode.Children[nibble] = nil + // deletion has been taken care of by batchUpdate with the child node as root + } else { + newChildrenNonNilCount++ + isLeaf := false + if newChildNodeLeaf, ok := newChildNode.(*LeafNode); ok { + isLeaf = true + + newChildrenLastLeafNodeKey = newChildNodeKey + newChildrenLastLeaf = newChildNodeLeaf + } + + internalNode.Children[nibble] = &Child{ + newChildNodeKey.Version, + digestNode(newChildNode), + isLeaf, + } + } + } + + if !anyChildChanged { + // noop + return rootNodeKey, rootNode, nil + } + + newRootNodeKey := NodeKey{ + version, + rootNodeKey.NibblePath, + } + + // if no children anymore, delete this node + + if newChildrenNonNilCount == 0 { + err := replaceRootNode(newRootNodeKey, nil) + if err != nil { + return NodeKey{}, nil, err + } + return NodeKey{}, nil, nil + } + + // if only child is a leaf, make the root node that leaf, while keeping the same nibble path + + if newChildrenNonNilCount == 1 && newChildrenLastLeaf != nil { + // the leaf is already written more deeply, so we need to delete it + + if newChildrenLastLeafNodeKey.Version == version { + // we can remove it in place + err := nodeReadWriter.WriteNode(newChildrenLastLeafNodeKey, nil) + if err != nil { + return NodeKey{}, nil, err + } + } else { + // it's an older node so we mark it as stale + err := staleNodeWriter.WriteStaleNode(StaleNode{version, newChildrenLastLeafNodeKey}) + if err != nil { + return NodeKey{}, nil, err + } + } + + err := replaceRootNode(newRootNodeKey, newChildrenLastLeaf) + if err != nil { + return NodeKey{}, nil, err + } + return newRootNodeKey, newChildrenLastLeaf, nil + } + + err := replaceRootNode(newRootNodeKey, internalNode) + if err != nil { + return NodeKey{}, nil, err + } + return newRootNodeKey, internalNode, nil + } + + panic("unreachable") +} + +// ProveSubrange returns the bounding leafs required to verify inclusion of the +// vector of all the key-values of which the digests fall in the subrange +// [startIndex, endInclIndex], using VerifySubrange. +func ProveSubrange( + rootReader RootReader, + nodeReader NodeReader, + version Version, + startIndex Digest, + endInclIndex Digest, +) ([]BoundingLeaf, error) { + + var boundingLeaves []BoundingLeaf + + left, _, err := ReadRangeAscOrDesc(rootReader, nodeReader, version, MinDigest, startIndex, math.MaxInt, 1, ReadRangeOrderDesc) + if err != nil { + return nil, fmt.Errorf("finding left bounding leaf for %x failed: %w", startIndex, err) + } + if len(left) > 0 { + leftLeafKeyDigest := DigestKey(left[0].Key) + leftLeafProof, err := proveInclusionDigested(rootReader, nodeReader, version, leftLeafKeyDigest) + if err != nil { + return nil, fmt.Errorf("proving inclusion of left bounding leaf %x failed: %w", leftLeafKeyDigest, err) + } + + var leftSiblings []Digest + for i, p := range leftLeafProof { + if bitGet(leftLeafKeyDigest, len(leftLeafProof)-1-i) { + // only keep left proof siblings + leftSiblings = append(leftSiblings, p) + } + } + boundingLeaves = append(boundingLeaves, BoundingLeaf{ + LeafKeyAndValueDigests{ + leftLeafKeyDigest, + DigestValue(left[0].Value), + }, + leftSiblings, + }) + } + + right, _, err := ReadRangeAscOrDesc(rootReader, nodeReader, version, endInclIndex, MaxDigest, math.MaxInt, 1, ReadRangeOrderAsc) + if err != nil { + return nil, fmt.Errorf("finding right bounding leaf for %x failed: %w", endInclIndex, err) + } + if len(right) > 0 { + rightLeafKeyDigest := DigestKey(right[0].Key) + rightLeafProof, err := proveInclusionDigested(rootReader, nodeReader, version, rightLeafKeyDigest) + if err != nil { + return nil, fmt.Errorf("proving inclusion of right bounding leaf %x failed: %w", rightLeafKeyDigest, err) + } + + var rightSiblings []Digest + for i, p := range rightLeafProof { + if !bitGet(rightLeafKeyDigest, len(rightLeafProof)-1-i) { + // only keep right proof siblings + rightSiblings = append(rightSiblings, p) + } + } + boundingLeaves = append(boundingLeaves, BoundingLeaf{ + LeafKeyAndValueDigests{ + rightLeafKeyDigest, + DigestValue(right[0].Value), + }, + rightSiblings, + }) + } + + return boundingLeaves, nil +} + +type LeafKeyAndValueDigests struct { + KeyDigest Digest + ValueDigest Digest +} + +// ProveInclusion returns the proof siblings for the inclusion proof of the +// undigested key in the tree at version. +func ProveInclusion( + rootReader RootReader, + nodeReader NodeReader, + version Version, + key []byte, +) ([]Digest, error) { + return proveInclusionDigested(rootReader, nodeReader, version, DigestKey(key)) +} + +func proveInclusionDigested( + rootReader RootReader, + nodeReader NodeReader, + version Version, + digestedKey Digest, +) ([]Digest, error) { + var siblings []Digest + + rootNodeKey, err := rootReader.ReadRoot(version) + if err != nil { + return nil, fmt.Errorf("error reading root node with version %d: %w", version, err) + } + + rootNode, err := nodeReader.ReadNode(rootNodeKey) + if err != nil { + return nil, fmt.Errorf("error reading root node with node key %v: %w", rootNodeKey, err) + } + + if rootNode == nil { + return nil, fmt.Errorf("asked to inclusion prove in nil tree") + } + + nibblePath := NibblePathFromDigest(digestedKey) + + for i := 0; i < nibblePath.NumNibbles() && rootNode != nil; i++ { + nibble := nibblePath.Get(i) + switch n := rootNode.(type) { + case *InternalNode: + child := n.Children[nibble] + if child == nil { + return nil, fmt.Errorf("child node with nibble %v not found", nibble) + } + + siblings = append(proveInternalNodeChild(n, int(nibble)), siblings...) + + rootNodeKey = NodeKey{ + child.Version, + rootNodeKey.NibblePath.Append(nibble), + } + rootNode, err = nodeReader.ReadNode(rootNodeKey) + if err != nil { + return nil, fmt.Errorf("error reading child node with node key %v: %w", rootNodeKey, err) + } + case *LeafNode: + if n.KeyDigest == digestedKey { + break + } + return nil, fmt.Errorf("leaf node with key digest %x not found", digestedKey) + } + } + + return siblings, nil +} + +// Read returns the undigested value for the undigested key in the tree at +// version. If the key does not exist, nil is returned, and no error. +func Read( + rootReader RootReader, + nodeReader NodeReader, + version Version, + key []byte, +) ([]byte, error) { + rootNodeKey, err := rootReader.ReadRoot(version) + if err != nil { + return nil, fmt.Errorf("error reading root node with version %d: %w", version, err) + } + + rootNode, err := nodeReader.ReadNode(rootNodeKey) + if err != nil { + return nil, fmt.Errorf("error reading root node with node key %v: %w", rootNodeKey, err) + } + + if rootNode == nil { + return nil, nil + } + + nibblePath := NibblePathFromDigest(DigestKey(key)) + + for i := 0; i < nibblePath.NumNibbles(); i++ { + nibble := nibblePath.Get(i) + + if n, ok := rootNode.(*InternalNode); ok { + child := n.Children[nibble] + if child == nil { + return nil, nil + } + childNodeKey := NodeKey{ + child.Version, + rootNodeKey.NibblePath.Append(nibble), + } + childNode, err := nodeReader.ReadNode(childNodeKey) + if err != nil { + return nil, fmt.Errorf("error reading child node with node key %v: %w", childNodeKey, err) + } + if childNode == nil { + return nil, fmt.Errorf("child node with node key %v unexpectedly nil", childNodeKey) + } + rootNode = childNode + rootNodeKey = childNodeKey + } else { + break + } + } + + if n, ok := rootNode.(*LeafNode); ok { + if n.KeyDigest == DigestKey(key) { + return n.Value, nil + } else { + return nil, nil + } + } + panic("unreachable") +} + +// ReadRootDigest returns the digest of the root node of the tree at version. If +// digest is SparseMerklePlaceholderDigest, the tree is empty. The caller must +// be careful to only call this method for versions that exist and roots have +// indeed been written. Versions that are not written are implied to represent +// an empty tree. +func ReadRootDigest( + rootReader RootReader, + nodeReader NodeReader, + version Version, +) (Digest, error) { + rootNodeKey, err := rootReader.ReadRoot(version) + if err != nil { + return Digest{}, fmt.Errorf("error reading root node for version %v: %w", version, err) + } + + rootNode, err := nodeReader.ReadNode(rootNodeKey) + if err != nil { + return Digest{}, fmt.Errorf("error reading node for version %v and node key %v: %w", version, rootNodeKey, err) + } + + return digestNode(rootNode), nil +} + +// ReadRange returns all key-value pairs in the range [minKeyDigest, +// maxKeyDigest] in the tree at version. This is useful for state +// synchronization where a verifier expects to build the tree from scratch in +// order from the leftmost to the rightmost leaf. We return the key-value pairs +// instead of the digests to give the verifier the most flexibility. If there +// were more key-value pairs in the range that we could not fit in the limits, +// we return truncated=true. +func ReadRange( + rootReader RootReader, + nodeReader NodeReader, + version Version, + minKeyDigest Digest, + maxKeyDigest Digest, + keysPlusValuesBytesLimit int, + lenLimit int, +) (keyValues []KeyValue, truncated bool, err error) { + return ReadRangeAscOrDesc( + rootReader, + nodeReader, + version, + minKeyDigest, + maxKeyDigest, + keysPlusValuesBytesLimit, + lenLimit, + ReadRangeOrderAsc, + ) +} + +type ReadRangeOrder int + +const ( + _ ReadRangeOrder = iota + ReadRangeOrderAsc + ReadRangeOrderDesc +) + +// ReadRangeAscOrDesc is a variant of ReadRange that allows the caller to +// specify the order in which to read the key-value pairs. +func ReadRangeAscOrDesc( + rootReader RootReader, + nodeReader NodeReader, + version Version, + minKeyDigest Digest, + maxKeyDigest Digest, + keysPlusValuesBytesLimit int, + lenLimit int, + order ReadRangeOrder, +) (keyValues []KeyValue, truncated bool, err error) { + rootNodeKey, err := rootReader.ReadRoot(version) + if err != nil { + return nil, false, fmt.Errorf("error reading root node for version %v: %w", version, err) + } + + rootNode, err := nodeReader.ReadNode(rootNodeKey) + if err != nil { + return nil, false, fmt.Errorf("error reading node for node key %v: %w", rootNodeKey, err) + } + keyValues, _, truncated, err = readRange( + nodeReader, + rootNodeKey, + rootNode, + 0, + NibblePathFromDigest(minKeyDigest), + NibblePathFromDigest(maxKeyDigest), + minKeyDigest, + maxKeyDigest, + keysPlusValuesBytesLimit, + lenLimit, + order, + ) + return keyValues, truncated, err +} + +func readRange( + nodeReader NodeReader, + rootNodeKey NodeKey, + rootNode Node, + depth int, + minKeyDigestNibblePath NibblePath, + maxKeyDigestNibblePath NibblePath, + minKeyDigest Digest, + maxKeyDigest Digest, + keysPlusValuesBytesLimit int, + lenLimit int, + order ReadRangeOrder, +) (keyValues []KeyValue, keysPlusValuesBytes int, truncated bool, err error) { + if rootNode == nil { + return nil, 0, false, nil + } + + switch n := rootNode.(type) { + case *InternalNode: + var ( + initial int + delta int + ) + + switch order { + case ReadRangeOrderAsc: + initial, delta = 0, +1 + case ReadRangeOrderDesc: + initial, delta = len(n.Children)-1, -1 + } + for i := initial; i >= 0 && i < len(n.Children); i += delta { + child := n.Children[i] + if child == nil { + continue + } + childNibblePath := rootNodeKey.NibblePath.Append(byte(i)) + { + minKeyDigestNibblePathPrefix := minKeyDigestNibblePath.Prefix(depth + 1) + maxKeyDigestNibblePathPrefix := maxKeyDigestNibblePath.Prefix(depth + 1) + if !(childNibblePath.Compare(minKeyDigestNibblePathPrefix) >= 0 && childNibblePath.Compare(maxKeyDigestNibblePathPrefix) <= 0) { + continue + } + } + childNodeKey := NodeKey{ + child.Version, + childNibblePath, + } + childNode, err := nodeReader.ReadNode(childNodeKey) + if err != nil { + return nil, 0, false, fmt.Errorf("error reading child node with node key %v: %w", childNodeKey, err) + } + if childNode == nil { + return nil, 0, false, fmt.Errorf("child node with node key %v unexpectedly nil", childNodeKey) + } + moreKeyValues, moreKeysPlusValuesBytes, moreTruncated, err := readRange( + nodeReader, + childNodeKey, + childNode, + depth+1, + minKeyDigestNibblePath, + maxKeyDigestNibblePath, + minKeyDigest, + maxKeyDigest, + keysPlusValuesBytesLimit-keysPlusValuesBytes, + lenLimit-len(keyValues), + order, + ) + if err != nil { + return nil, 0, false, err + } + keyValues = append(keyValues, moreKeyValues...) + keysPlusValuesBytes += moreKeysPlusValuesBytes + if moreTruncated { + truncated = true + break + } + } + case *LeafNode: + if bytes.Compare(n.KeyDigest[:], minKeyDigest[:]) >= 0 && bytes.Compare(n.KeyDigest[:], maxKeyDigest[:]) <= 0 { + lenLimitOk := len(keyValues)+1 <= lenLimit + keysPlusValuesBytesLimitOk := keysPlusValuesBytes+len(n.Key)+len(n.Value) <= keysPlusValuesBytesLimit + if !(lenLimitOk && keysPlusValuesBytesLimitOk) { + truncated = true + } else { + keysPlusValuesBytes += len(n.Key) + len(n.Value) + keyValues = append(keyValues, KeyValue{n.Key, n.Value}) + } + } + } + + return keyValues, keysPlusValuesBytes, truncated, nil +} diff --git a/internal/jmt/nibbles.go b/internal/jmt/nibbles.go new file mode 100644 index 00000000..6be086bd --- /dev/null +++ b/internal/jmt/nibbles.go @@ -0,0 +1,101 @@ +package jmt + +import ( + "bytes" + "cmp" + "fmt" + "slices" +) + +type NibblePath struct { + numNibbles int + bytes []byte +} + +func NewNibblePath(numNibbles int, bytes []byte) (NibblePath, bool) { + expectedBytesLen := (numNibbles + 1) / 2 + if len(bytes) != expectedBytesLen { + return NibblePath{}, false + } + if numNibbles%2 == 1 && bytes[len(bytes)-1]&0x0f != 0 { + return NibblePath{}, false + } + return NibblePath{numNibbles, slices.Clone(bytes)}, true +} + +func (np NibblePath) Compare(np2 NibblePath) int { + + bytesCmp := bytes.Compare(np.bytes, np2.bytes) + if bytesCmp != 0 { + return bytesCmp + } + return cmp.Compare(np.numNibbles, np2.numNibbles) +} + +func (np NibblePath) Equal(np2 NibblePath) bool { + return np.numNibbles == np2.numNibbles && bytes.Equal(np.bytes, np2.bytes) +} + +func (np NibblePath) Get(index int) byte { + if index >= np.numNibbles { + + panic("index out of bounds") + } + b := np.bytes[index/2] + var n byte + if index%2 == 0 { + n = (b & 0xf0) >> 4 + } else { + n = b & 0x0f + } + return n +} + +func (np NibblePath) NumNibbles() int { + return np.numNibbles +} + +func (np NibblePath) Bytes() []byte { + return np.bytes +} + +func (np NibblePath) Append(nibble byte) NibblePath { + np2 := NibblePath{ + np.numNibbles + 1, + bytes.Clone(np.bytes), + } + if np.numNibbles%2 == 1 { + np2.bytes[len(np2.bytes)-1] = np2.bytes[len(np2.bytes)-1]&0xf0 | nibble + return np2 + } else { + np2.bytes = append(np2.bytes, (nibble&0x0f)<<4) + return np2 + } +} + +func (np NibblePath) Prefix(count int) NibblePath { + nbytes := (count + 1) / 2 + keepbytes := bytes.Clone(np.bytes[:nbytes]) + if count%2 == 1 { + keepbytes[len(keepbytes)-1] = keepbytes[len(keepbytes)-1] & 0xf0 + } + return NibblePath{ + count, + keepbytes, + } +} + +func (np NibblePath) String() string { + r := fmt.Sprintf("%x", np.bytes) + if np.numNibbles%2 == 1 { + r = r[:len(r)-1] + } + return r +} + +func NibblePathFromDigest(digest Digest) NibblePath { + return NibblePath{ + len(digest) * 2, + digest[:], + } +} diff --git a/internal/jmt/persistence.go b/internal/jmt/persistence.go new file mode 100644 index 00000000..cc507447 --- /dev/null +++ b/internal/jmt/persistence.go @@ -0,0 +1,44 @@ +package jmt + +// When starting from scratch, you can safely use version 0 as the "old version" +// contains an empty tree. +type Version = uint64 + +type RootWriter interface { + WriteRoot(version Version, nodeKey NodeKey) error +} + +type RootReader interface { + ReadRoot(version Version) (nodeKey NodeKey, err error) +} + +type RootReadWriter interface { + RootReader + RootWriter +} + +type NodeWriter interface { + // Implementers should check whether node is nil first. nil indicates an + // empty tree, and should effectively cause a deletion of the node key. + // Implementers should make sure to not be affected by later mutations to + // Node. + WriteNode(nodeKey NodeKey, nodeOrNil Node) error +} + +type NodeReader interface { + ReadNode(nodeKey NodeKey) (nodeOrNil Node, err error) +} + +type NodeReadWriter interface { + NodeReader + NodeWriter +} + +type StaleNode struct { + StaleSinceVersion Version + NodeKey NodeKey +} + +type StaleNodeWriter interface { + WriteStaleNode(staleNode StaleNode) error +} diff --git a/internal/jmt/verify.go b/internal/jmt/verify.go new file mode 100644 index 00000000..e0fa7065 --- /dev/null +++ b/internal/jmt/verify.go @@ -0,0 +1,320 @@ +package jmt + +import ( + "bytes" + "fmt" + "slices" +) + +func bitLength(d Digest) int { + return len(d) * 8 +} + +func bitGet(d Digest, index int) bool { + return (d[index/8] & (1 << (7 - index%8))) != 0 +} + +func commonPrefixLengthInBits(a Digest, b Digest) int { + lastMatchIndex := -1 + for i := 0; i < bitLength(a) && i < bitLength(b); i++ { + if bitGet(a, i) == bitGet(b, i) { + lastMatchIndex = i + } else { + break + } + } + return lastMatchIndex + 1 +} + +type indexedSparseMerkleNode struct { + index Digest + sparseMerkleNode sparseMerkleNode +} + +type BoundingLeaf struct { + Leaf LeafKeyAndValueDigests + Siblings []Digest +} + +const MaxBoundingLeaves = 2 + +// VerifySubrange verifies that the key-value pairs in keyValues are the only +// ones included in the key digest subrange [startIndex, endInclIndex], in the +// tree rooted at expectedRootDigest. +func VerifySubrange( + expectedRootDigest Digest, + startIndex Digest, + endInclIndex Digest, + keyValues []KeyValue, + boundingLeaves []BoundingLeaf, +) error { + if !(bytes.Compare(startIndex[:], endInclIndex[:]) <= 0) { + return fmt.Errorf("start index %x is not less than or equal to the end incl index %x", startIndex, endInclIndex) + } + + sortedDigestedKeyValues := make([]digestedKeyValue, 0, len(keyValues)) + for _, kv := range keyValues { + sortedDigestedKeyValues = append(sortedDigestedKeyValues, digestedKeyValue{ + kv.Key, + DigestKey(kv.Key), + kv.Value, + DigestValue(kv.Value), + }) + } + slices.SortFunc(sortedDigestedKeyValues, func(a, b digestedKeyValue) int { + return bytes.Compare(a.keyDigest[:], b.keyDigest[:]) + }) + for i := range sortedDigestedKeyValues { + if i > 0 && !(bytes.Compare(sortedDigestedKeyValues[i-1].keyDigest[:], sortedDigestedKeyValues[i].keyDigest[:]) < 0) { + return fmt.Errorf("key values contain duplicates") + } + } + + if len(sortedDigestedKeyValues) > 0 { + // Ensure key digests are within bounds of the subrange. + if !(bytes.Compare(startIndex[:], sortedDigestedKeyValues[0].keyDigest[:]) <= 0) { + return fmt.Errorf("start index %x is not less than or equal to the min key digest %x", startIndex, sortedDigestedKeyValues[0].keyDigest) + } + if !(bytes.Compare(sortedDigestedKeyValues[len(sortedDigestedKeyValues)-1].keyDigest[:], endInclIndex[:]) <= 0) { + return fmt.Errorf("end incl index %x is not greater than or equal to the max key digest %x", endInclIndex, sortedDigestedKeyValues[len(sortedDigestedKeyValues)-1].keyDigest) + } + } + + // This layer is generated by the key-value pairs we received, padded with + // the bounding leaves on the left and right. + bottommostLayer := make([]indexedSparseMerkleNode, 0, len(sortedDigestedKeyValues)+2) + + var ( + leftBoundingLeafOrNil *BoundingLeaf + rightBoundingLeafOrNil *BoundingLeaf + ) + + switch len(boundingLeaves) { + case 0: + case 1: + if bytes.Compare(boundingLeaves[0].Leaf.KeyDigest[:], startIndex[:]) <= 0 { + leftBoundingLeafOrNil = &boundingLeaves[0] + } else if bytes.Compare(endInclIndex[:], boundingLeaves[0].Leaf.KeyDigest[:]) <= 0 { + rightBoundingLeafOrNil = &boundingLeaves[0] + } else { + return fmt.Errorf("bounding leaf key digest %x is not less than or equal to the start index %x or greater than or equal to the end incl index %x", boundingLeaves[0].Leaf.KeyDigest, startIndex, endInclIndex) + } + case 2: + leftBoundingLeafOrNil = &boundingLeaves[0] + rightBoundingLeafOrNil = &boundingLeaves[1] + default: + return fmt.Errorf("unexpected number of bounding leaves: %v", len(boundingLeaves)) + } + + if leftBoundingLeafOrNil != nil { + leftBoundingIndex := leftBoundingLeafOrNil.Leaf.KeyDigest + leftBoundingCmpStart := bytes.Compare(leftBoundingIndex[:], startIndex[:]) + switch leftBoundingCmpStart { + case -1: + // Left bounding leaf is completely to the left of the range, we can safely add it to the bottommost layer + bottommostLayer = append(bottommostLayer, indexedSparseMerkleNode{ + leftBoundingIndex, + sparseMerkleNode{ + true, + digestLeafBinary(leftBoundingIndex, leftBoundingLeafOrNil.Leaf.ValueDigest), + }, + }) + case 0: + // If the left bounding leaf is in range but not included in keyvalues, error out + if len(sortedDigestedKeyValues) == 0 { + return fmt.Errorf("left bounding leaf key digest %x is in range but not included in keyvalues", leftBoundingLeafOrNil.Leaf.KeyDigest) + } + if sortedDigestedKeyValues[0].keyDigest != leftBoundingIndex { + return fmt.Errorf("left bounding leaf key digest %x is in range but not in keyvalues: bounding key digest %v, keyvalues first key digest %v", leftBoundingLeafOrNil.Leaf.KeyDigest, leftBoundingLeafOrNil.Leaf.KeyDigest, sortedDigestedKeyValues[0].keyDigest) + } + if sortedDigestedKeyValues[0].valueDigest != leftBoundingLeafOrNil.Leaf.ValueDigest { + return fmt.Errorf("left bounding leaf key digest %x is in range but improperly included in keyvalues: bounding value digest %v, keyvalues value digest %v", leftBoundingLeafOrNil.Leaf.KeyDigest, leftBoundingLeafOrNil.Leaf.ValueDigest, sortedDigestedKeyValues[0].valueDigest) + } + case 1: + return fmt.Errorf("left bounding leaf key digest %x is greater than the start index %x", leftBoundingLeafOrNil.Leaf.KeyDigest, startIndex) + } + } + + for _, kv := range sortedDigestedKeyValues { + leafDigest := digestLeafBinary(kv.keyDigest, kv.valueDigest) + bottommostLayer = append(bottommostLayer, indexedSparseMerkleNode{ + kv.keyDigest, + sparseMerkleNode{ + true, + leafDigest, + }, + }) + } + + if rightBoundingLeafOrNil != nil { + rightBoundingIndex := rightBoundingLeafOrNil.Leaf.KeyDigest + rightBoundingCmpEndIncl := bytes.Compare(rightBoundingIndex[:], endInclIndex[:]) + switch rightBoundingCmpEndIncl { + case 1: + // Right bounding leaf comes after end of range, we can safely add it to the bottommost layer + bottommostLayer = append(bottommostLayer, indexedSparseMerkleNode{ + rightBoundingIndex, + sparseMerkleNode{ + true, + digestLeafBinary(rightBoundingIndex, rightBoundingLeafOrNil.Leaf.ValueDigest), + }, + }) + case 0: + // If the right bounding leaf is in range but not included in keyvalues, error out + if len(sortedDigestedKeyValues) == 0 { + return fmt.Errorf("right bounding leaf key digest %x is in range but not included in keyvalues", rightBoundingLeafOrNil.Leaf.KeyDigest) + } + if sortedDigestedKeyValues[len(sortedDigestedKeyValues)-1].keyDigest != rightBoundingIndex { + return fmt.Errorf("right bounding leaf key digest %x is in range but not in keyvalues: bounding key digest %v, keyvalues last key digest %v", rightBoundingLeafOrNil.Leaf.KeyDigest, rightBoundingLeafOrNil.Leaf.KeyDigest, sortedDigestedKeyValues[len(sortedDigestedKeyValues)-1].keyDigest) + } + if sortedDigestedKeyValues[len(sortedDigestedKeyValues)-1].valueDigest != rightBoundingLeafOrNil.Leaf.ValueDigest { + return fmt.Errorf("right bounding leaf key digest %x is in range but improperly included in keyvalues: bounding value digest %v, keyvalues value digest %v", rightBoundingLeafOrNil.Leaf.KeyDigest, rightBoundingLeafOrNil.Leaf.ValueDigest, sortedDigestedKeyValues[len(sortedDigestedKeyValues)-1].valueDigest) + } + case -1: + // Right bounding leaf comes before end of range + return fmt.Errorf("right bounding leaf key digest %x is less than the end incl index %x", rightBoundingLeafOrNil.Leaf.KeyDigest, endInclIndex) + } + } + + if len(bottommostLayer) == 0 { + // Prover is claiming the tree is empty, and we know there are no + // bounding leaves or siblings of them otherwise the bottommost layer + // would not be empty. So we can safely add a placeholder node and have + // it bubble to the top. + bottommostLayer = append(bottommostLayer, indexedSparseMerkleNode{ + startIndex, + sparseMerkleNode{ + false, + SparseMerklePlaceholderDigest, + }, + }) + } + + return verifySubrangePadded( + expectedRootDigest, + bottommostLayer, + leftBoundingLeafOrNil, + rightBoundingLeafOrNil, + ) +} + +func siblingDigests( + left Digest, + right Digest, + depth int, +) bool { + return commonPrefixLengthInBits(left, right) == depth && !bitGet(left, depth) && bitGet(right, depth) +} + +func cumulativeOccurrencesOfBit(d Digest, b bool) []int { + acc := make([]int, bitLength(d)) + for i := 0; i < bitLength(d); i++ { + if i > 0 { + acc[i] = acc[i-1] + } + if bitGet(d, i) == b { + acc[i]++ + } + } + return acc +} + +func verifySubrangePadded( + expectedRootDigest Digest, + bottommostLayer []indexedSparseMerkleNode, + leftBoundingLeafOrNil *BoundingLeaf, + rightBoundingLeafOrNil *BoundingLeaf, +) error { + expectedLeftSiblingsAtDepth := make([]int, bitLength(Digest{})) + expectedRightSiblingsAtDepth := make([]int, bitLength(Digest{})) + if leftBoundingLeafOrNil != nil { + expectedLeftSiblingsAtDepth = cumulativeOccurrencesOfBit(leftBoundingLeafOrNil.Leaf.KeyDigest, true) + } + if rightBoundingLeafOrNil != nil { + expectedRightSiblingsAtDepth = cumulativeOccurrencesOfBit(rightBoundingLeafOrNil.Leaf.KeyDigest, false) + } + + layer := bottommostLayer + maxDepth := bitLength(Digest{}) - 1 + for depth := maxDepth; depth >= 0; depth-- { + parentLayer := make([]indexedSparseMerkleNode, 0, len(layer)/2) + + for i := 0; i < len(layer); i++ { + index := layer[i].index + node := layer[i].sparseMerkleNode + bit := bitGet(index, depth) + + var parentNode sparseMerkleNode + switch bit { + case false: + // We are looking for a right sibling... + + var rightSibling sparseMerkleNode + + // Is this the rightmost node in layer? + if i+1 == len(layer) { + if rightBoundingLeafOrNil != nil && len(rightBoundingLeafOrNil.Siblings) > 0 && expectedRightSiblingsAtDepth[depth] == len(rightBoundingLeafOrNil.Siblings) { + // Either our right siblings cover this level. + rightSibling = sparseMerkleNode{false, rightBoundingLeafOrNil.Siblings[0]} + rightBoundingLeafOrNil.Siblings = rightBoundingLeafOrNil.Siblings[1:] + } else { + // Or we put a placeholder node. + rightSibling = sparseMerkleNode{false, SparseMerklePlaceholderDigest} + } + } else if siblingDigests(index, layer[i+1].index, depth) { + // The immediately next node in layer is a right sibling! + rightSibling = layer[i+1].sparseMerkleNode + i++ // Skip over the right sibling we just used. + } else { + // We put a placeholder node. + rightSibling = sparseMerkleNode{false, SparseMerklePlaceholderDigest} + } + + parentNode = sparseDigestInternalBinary(node, rightSibling) + case true: + // We are looking for a left sibling... + + var leftSibling sparseMerkleNode + + // Is this the leftmost node in layer? + if i == 0 { + if leftBoundingLeafOrNil != nil && len(leftBoundingLeafOrNil.Siblings) > 0 && expectedLeftSiblingsAtDepth[depth] == len(leftBoundingLeafOrNil.Siblings) { + // Either our left siblings cover this level. + leftSibling = sparseMerkleNode{false, leftBoundingLeafOrNil.Siblings[0]} + leftBoundingLeafOrNil.Siblings = leftBoundingLeafOrNil.Siblings[1:] + } else { + // Or we put a placeholder node. + leftSibling = sparseMerkleNode{false, SparseMerklePlaceholderDigest} + } + } else { + // If there was a left sibling in layer we'd already have + // processed this node as its right sibling, and skipped + // over it. Thus there's no appropriate left sibling in + // layer. + leftSibling = sparseMerkleNode{false, SparseMerklePlaceholderDigest} + } + + parentNode = sparseDigestInternalBinary(leftSibling, node) + } + + parentLayer = append(parentLayer, indexedSparseMerkleNode{ + // We are being lazy and not truncating the index to the depth + // of the parent node. + index, + parentNode, + }) + } + layer = parentLayer + } + + if len(layer) != 1 { + return fmt.Errorf("unexpectedly ended with a non-singleton layer of %v nodes at the top", len(layer)) + } + if !((leftBoundingLeafOrNil == nil || len(leftBoundingLeafOrNil.Siblings) == 0) && (rightBoundingLeafOrNil == nil || len(rightBoundingLeafOrNil.Siblings) == 0)) { + return fmt.Errorf("unexpectedly ended with left or right siblings remaining: left %v, right %v", len(leftBoundingLeafOrNil.Siblings), len(rightBoundingLeafOrNil.Siblings)) + } + if layer[0].sparseMerkleNode.digest != expectedRootDigest { + return fmt.Errorf("computed root digest mismatch: computed %x, expected %x", layer[0].sparseMerkleNode.digest, expectedRootDigest) + } + return nil +} diff --git a/internal/loghelper/taper.go b/internal/loghelper/taper.go index c58aa0ef..6d86da0a 100644 --- a/internal/loghelper/taper.go +++ b/internal/loghelper/taper.go @@ -1,31 +1,41 @@ package loghelper +import "sync" + // LogarithmicTaper provides logarithmic tapering of an event sequence. // For example, if the taper is Triggered 50 times with a function that // simply prints the provided count, the output would be 1,2,4,8,16,32. type LogarithmicTaper struct { - count uint64 + count uint64 + countMu sync.Mutex } // Trigger increments a count and calls f iff the new count is a power of two func (tap *LogarithmicTaper) Trigger(f func(newCount uint64)) { + tap.countMu.Lock() tap.count++ - if f != nil && isPowerOfTwo(tap.count) { - f(tap.count) + newCount := tap.count + tap.countMu.Unlock() + if f != nil && isPowerOfTwo(newCount) { + f(newCount) } } // Count returns the internal count of the taper func (tap *LogarithmicTaper) Count() uint64 { + tap.countMu.Lock() + defer tap.countMu.Unlock() return tap.count } // Reset resets the count to 0 and then calls f with the previous count // iff it wasn't already 0 func (tap *LogarithmicTaper) Reset(f func(oldCount uint64)) { - if tap.count != 0 { - oldCount := tap.count - tap.count = 0 + tap.countMu.Lock() + oldCount := tap.count + tap.count = 0 + tap.countMu.Unlock() + if oldCount != 0 { f(oldCount) } } diff --git a/internal/randmap/randmap.go b/internal/randmap/randmap.go new file mode 100644 index 00000000..8591c582 --- /dev/null +++ b/internal/randmap/randmap.go @@ -0,0 +1,74 @@ +package randmap + +import "math/rand/v2" + +type MapEntry[K comparable, V any] struct { + Key K + Value V +} + +// Like a regular map, but with a GetRandom operation +// that returns a uniformly random entry from the map. +// +// Limitation: The memory consumed by this data structure is evergrowing, it never shrinks. + +type Map[K comparable, V any] struct { + indices map[K]int + entries []MapEntry[K, V] +} + +func NewMap[K comparable, V any]() *Map[K, V] { + return &Map[K, V]{ + make(map[K]int), + make([]MapEntry[K, V], 0), + } +} + +func (m *Map[K, V]) Set(key K, value V) { + if _, ok := m.indices[key]; !ok { + m.indices[key] = len(m.entries) + m.entries = append(m.entries, MapEntry[K, V]{key, value}) + } else { + m.entries[m.indices[key]] = MapEntry[K, V]{key, value} + } +} + +func (m *Map[K, V]) Get(key K) (V, bool) { + if _, ok := m.indices[key]; !ok { + var zero V + return zero, false + } + return m.entries[m.indices[key]].Value, true +} + +func (m *Map[K, V]) Delete(key K) { + if _, ok := m.indices[key]; !ok { + return + } + + index := m.indices[key] + delete(m.indices, key) + + if index == len(m.entries)-1 { + // the element we want to delete is already in last place + m.entries = m.entries[0 : len(m.entries)-1] + return + } else { + m.entries[index] = m.entries[len(m.entries)-1] + m.indices[m.entries[index].Key] = index + m.entries = m.entries[0 : len(m.entries)-1] + } +} + +func (m *Map[K, V]) GetRandom() (MapEntry[K, V], bool) { + if len(m.entries) == 0 { + var zero MapEntry[K, V] + return zero, false + } + randomEntry := m.entries[rand.IntN(len(m.entries))] + return randomEntry, true +} + +func (m *Map[K, V]) Size() int { + return len(m.entries) +} diff --git a/internal/ringbuffer/ringbuffer.go b/internal/ringbuffer/ringbuffer.go index b19337ac..48a02b70 100644 --- a/internal/ringbuffer/ringbuffer.go +++ b/internal/ringbuffer/ringbuffer.go @@ -91,3 +91,17 @@ func (rb *RingBuffer[T]) PushEvict(item T) (evicted T, didEvict bool) { } return evicted, didEvict } + +func (rb *RingBuffer[T]) SetCap(cap int) { + + // KISS ! + temp := NewRingBuffer[T](cap) // will panic if cap <= 0 + for { + item, ok := rb.Pop() + if !ok { + break + } + temp.PushEvict(item) + } + *rb = *temp +} diff --git a/internal/singlewriter/conflict_tracker.go b/internal/singlewriter/conflict_tracker.go new file mode 100644 index 00000000..9a1dba5f --- /dev/null +++ b/internal/singlewriter/conflict_tracker.go @@ -0,0 +1,43 @@ +package singlewriter + +import ( + "fmt" + "sync" +) + +type ConflictTracker struct { + mu sync.Mutex + maxCreatedTxTimestamp uint64 + maxCommittedTxTimestamp uint64 +} + +func NewConflictTracker() *ConflictTracker { + return &ConflictTracker{ + sync.Mutex{}, + uint64(0), + uint64(0), + } +} + +func (ct *ConflictTracker) beginTransaction() (uint64, uint64) { + ct.mu.Lock() + defer ct.mu.Unlock() + ct.maxCreatedTxTimestamp++ + return ct.maxCreatedTxTimestamp, ct.maxCommittedTxTimestamp +} + +func (ct *ConflictTracker) lockAndPrepareToCommit(maxCommittedTxTimestampAtCreation uint64) error { + ct.mu.Lock() + if maxCommittedTxTimestampAtCreation != ct.maxCommittedTxTimestamp { + ct.mu.Unlock() + return fmt.Errorf("concurrent conflict detected: expected maxCommittedTxTimestamp: %d, got: %d", maxCommittedTxTimestampAtCreation, ct.maxCommittedTxTimestamp) + } + return nil +} + +func (ct *ConflictTracker) finalizeCommitAndUnlock(success bool, timestamp uint64) { + if success { + ct.maxCommittedTxTimestamp = timestamp + } + ct.mu.Unlock() +} diff --git a/internal/singlewriter/overlay_transaction.go b/internal/singlewriter/overlay_transaction.go new file mode 100644 index 00000000..d32f82eb --- /dev/null +++ b/internal/singlewriter/overlay_transaction.go @@ -0,0 +1,322 @@ +package singlewriter + +import ( + "bytes" + "fmt" + "sort" + "sync" + + "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3_1types" +) + +type opKind int + +const ( + _ opKind = iota + opWrite + opDelete +) + +type operation struct { + kind opKind + value []byte // only for writes +} + +var ErrClosed = fmt.Errorf("transaction already committed/discarded") + +type txStatus int + +const ( + _ txStatus = iota + txStatusOpen + txStatusCommitted + txStatusDiscarded +) + +type overlayTransaction struct { + overlay map[string]operation + + keyValueDatabase ocr3_1types.KeyValueDatabase + rawReaderTx ocr3_1types.KeyValueDatabaseReadTransaction + + // but using the mu mutex + + mu sync.Mutex + + iterators sync.WaitGroup + status txStatus +} + +func (ot *overlayTransaction) Read(key []byte) ([]byte, error) { + k := string(key) + ot.mu.Lock() + defer ot.mu.Unlock() + if ot.status != txStatusOpen { + return nil, ErrClosed + } + if op, ok := ot.overlay[k]; ok { + switch op.kind { + case opWrite: + v := bytes.Clone(op.value) + return v, nil + case opDelete: + return nil, nil + default: + return nil, fmt.Errorf("unknown op kind: %v", op.kind) + } + } + return ot.rawReaderTx.Read(key) +} + +type keyOperationPair struct { + key []byte + op operation +} + +type overlayIterator struct { + hiKeyExcl []byte + + currentKey []byte + currentValue []byte + err error + done bool + + sortedTouchedLocalSnapshot []keyOperationPair + idx int // index on sortedTouchedLocalSnapshot + + base ocr3_1types.KeyValueDatabaseIterator + baseNext bool + baseKey []byte + + onClose func() +} + +var _ ocr3_1types.KeyValueDatabaseIterator = &overlayIterator{} + +func (oi *overlayIterator) advanceBase() { + if !oi.base.Next() { + oi.baseNext = false + oi.baseKey = nil + return + } + + k := oi.base.Key() + if oi.hiKeyExcl != nil && bytes.Compare(k, oi.hiKeyExcl) >= 0 { + oi.baseNext = false + oi.baseKey = nil + return + } + + oi.baseNext = true + oi.baseKey = bytes.Clone(k) +} + +func (oi *overlayIterator) Next() bool { + if oi.done { + return false + } + + if oi.err != nil { + return false + } + + for { + var pickedTouchedLocal bool + if oi.baseNext && oi.idx < len(oi.sortedTouchedLocalSnapshot) { + // both ranges have more keys, we must compare + pickedTouchedLocal = bytes.Compare(oi.sortedTouchedLocalSnapshot[oi.idx].key, oi.baseKey) <= 0 + } else if oi.idx < len(oi.sortedTouchedLocalSnapshot) { + pickedTouchedLocal = true + } else if oi.baseNext { + pickedTouchedLocal = false + } else { + // both ranges are exhausted + oi.done = true + return false + } + + if pickedTouchedLocal { + kop := oi.sortedTouchedLocalSnapshot[oi.idx] + oi.idx++ + // If it was a tie we must advance base to avoid duplicates + if oi.baseNext && bytes.Equal(kop.key, oi.baseKey) { + oi.advanceBase() + } + + if kop.op.kind == opDelete { + continue + } + oi.currentKey = kop.key + oi.currentValue = bytes.Clone(kop.op.value) + return true + } + // else we picked from rawReaderTx.Range() + oi.currentKey = bytes.Clone(oi.baseKey) + v, err := oi.base.Value() + if err != nil { + oi.err = err + oi.done = true + return false + } + oi.currentValue = v + oi.advanceBase() + return true + } +} + +func (oi *overlayIterator) Key() []byte { + return oi.currentKey +} + +func (oi *overlayIterator) Value() ([]byte, error) { + if oi.err != nil { + return nil, oi.err + } + return oi.currentValue, nil +} + +func (oi *overlayIterator) Err() error { + return oi.err +} + +func (oi *overlayIterator) Close() error { + err := oi.base.Close() + if oi.onClose != nil { + oi.onClose() + oi.onClose = nil + } + if err != nil { + return err + } + return nil +} + +type closedIterator struct { + err error +} + +var _ ocr3_1types.KeyValueDatabaseIterator = &closedIterator{} + +func (ti *closedIterator) Next() bool { + return false +} + +func (ti *closedIterator) Key() []byte { + return nil +} + +func (ti *closedIterator) Value() ([]byte, error) { + return nil, ti.err +} + +func (ti *closedIterator) Err() error { + return ti.err +} + +func (ti *closedIterator) Close() error { + return ti.err +} + +// Range iterates over the merged overlay and rawReaderTx keys. +// If a key exists both in the rawReaderTx and the overlay, it returns the overlay value. +// If a key is deleted in the overlay it skips it. +// The caller is expected to call Close() on the returned iterator before Committing or Discarding the transaction. +func (ot *overlayTransaction) Range(loKey []byte, hiKeyExcl []byte) ocr3_1types.KeyValueDatabaseIterator { + loKey = bytes.Clone(loKey) + hiKeyExcl = bytes.Clone(hiKeyExcl) + + ot.mu.Lock() + defer ot.mu.Unlock() + if ot.status != txStatusOpen { + return &closedIterator{err: ErrClosed} + } + + kops := make([]keyOperationPair, 0, len(ot.overlay)) + for k, op := range ot.overlay { + kb := []byte(k) + if (loKey == nil || bytes.Compare(kb, loKey) >= 0) && + (hiKeyExcl == nil || bytes.Compare(kb, hiKeyExcl) < 0) { + kops = append(kops, keyOperationPair{bytes.Clone(kb), op}) + } + } + + sort.Slice(kops, + func(i, j int) bool { + return bytes.Compare(kops[i].key, kops[j].key) < 0 + }) + ot.iterators.Add(1) + oi := &overlayIterator{ + hiKeyExcl, + nil, + nil, + nil, + false, + kops, + 0, + ot.rawReaderTx.Range(loKey, hiKeyExcl), + false, + nil, + ot.iterators.Done, + } + oi.advanceBase() + return oi +} + +func (ot *overlayTransaction) lockedDiscard() { + ot.overlay = nil + ot.status = txStatusDiscarded + // wait for any open iterators to close before dropping the read tx + ot.iterators.Wait() + ot.rawReaderTx.Discard() +} + +func (ot *overlayTransaction) Discard() { + ot.mu.Lock() + defer ot.mu.Unlock() + if ot.status != txStatusOpen { + return + } + ot.lockedDiscard() +} + +func (ot *overlayTransaction) Write(key []byte, value []byte) error { + ot.mu.Lock() + defer ot.mu.Unlock() + if ot.status != txStatusOpen { + return ErrClosed + } + ot.overlay[string(key)] = operation{opWrite, bytes.Clone(value)} + return nil +} + +func (ot *overlayTransaction) Delete(key []byte) error { + ot.mu.Lock() + defer ot.mu.Unlock() + if ot.status != txStatusOpen { + return ErrClosed + } + ot.overlay[string(key)] = operation{opDelete, nil} + return nil +} + +func lockedCommit(overlay map[string]operation, rawReadWriteTx ocr3_1types.KeyValueDatabaseReadWriteTransaction) error { + for k := range overlay { + op := overlay[k] + switch op.kind { + case opWrite: + if err := rawReadWriteTx.Write([]byte(k), op.value); err != nil { + return err + } + case opDelete: + if err := rawReadWriteTx.Delete([]byte(k)); err != nil { + return err + } + default: + return fmt.Errorf("unknown op kind: %v", op.kind) + } + } + if err := rawReadWriteTx.Commit(); err != nil { + return err + } + return nil +} diff --git a/internal/singlewriter/serialized_transaction.go b/internal/singlewriter/serialized_transaction.go new file mode 100644 index 00000000..b07c1bc4 --- /dev/null +++ b/internal/singlewriter/serialized_transaction.go @@ -0,0 +1,79 @@ +package singlewriter + +import ( + "fmt" + "sync" + + "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3_1types" +) + +type SerializedTransaction struct { + *overlayTransaction + timestamp uint64 + maxCommittedTransactionTimestampAtCreation uint64 + + tracker *ConflictTracker +} + +var _ ocr3_1types.KeyValueDatabaseReadWriteTransaction = &SerializedTransaction{} + +func NewSerializedTransaction( + keyValueDatabase ocr3_1types.KeyValueDatabase, + conflictTracker *ConflictTracker, +) (ocr3_1types.KeyValueDatabaseReadWriteTransaction, error) { + timestamp, maxCommittedTransactionTimestamp := conflictTracker.beginTransaction() + rawReaderTx, err := keyValueDatabase.NewReadTransaction() + if err != nil { + return nil, fmt.Errorf("failed to create read transaction to create a SerializedTransaction: %w", err) + } + return &SerializedTransaction{ + &overlayTransaction{ + make(map[string]operation), + keyValueDatabase, + rawReaderTx, + sync.Mutex{}, + sync.WaitGroup{}, + txStatusOpen, + }, + timestamp, + maxCommittedTransactionTimestamp, + conflictTracker, + }, nil +} + +// Commit the overlay to the underlying DB as long as no other SerializedTransaction is committed since this +// SerializedTransaction was created. +func (st *SerializedTransaction) Commit() error { + st.mu.Lock() + defer st.mu.Unlock() + + if st.status != txStatusOpen { + return ErrClosed + } + overlay := st.overlay + st.lockedDiscard() + + if err := st.tracker.lockAndPrepareToCommit(st.maxCommittedTransactionTimestampAtCreation); err != nil { + return err + } + committed := false + defer func() { + st.tracker.finalizeCommitAndUnlock(committed, st.timestamp) + }() + + rawReadWriteTx, err := st.keyValueDatabase.NewReadWriteTransaction() + if err != nil { + return fmt.Errorf("failed to create read write transaction to commit SerializedTransaction: %w", err) + } + + defer rawReadWriteTx.Discard() + + err = lockedCommit(overlay, rawReadWriteTx) + if err != nil { + return err + } + + st.status = txStatusCommitted + committed = true + return nil +} diff --git a/internal/singlewriter/unserialized_transaction.go b/internal/singlewriter/unserialized_transaction.go new file mode 100644 index 00000000..c3c2f8ef --- /dev/null +++ b/internal/singlewriter/unserialized_transaction.go @@ -0,0 +1,60 @@ +package singlewriter + +import ( + "fmt" + "sync" + + "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3_1types" +) + +type UnserializedTransaction struct { + *overlayTransaction +} + +var _ ocr3_1types.KeyValueDatabaseReadWriteTransaction = &UnserializedTransaction{} + +func NewUnserializedTransaction( + keyValueDatabase ocr3_1types.KeyValueDatabase, +) (ocr3_1types.KeyValueDatabaseReadWriteTransaction, error) { + rawReaderTx, err := keyValueDatabase.NewReadTransaction() + if err != nil { + return nil, fmt.Errorf("failed to create read transaction to create an UnserializedTransactionImpl: %w", err) + } + return &UnserializedTransaction{ + &overlayTransaction{ + make(map[string]operation), + keyValueDatabase, + rawReaderTx, + sync.Mutex{}, + sync.WaitGroup{}, + txStatusOpen, + }, + }, nil +} + +func (ut *UnserializedTransaction) Commit() error { + ut.mu.Lock() + defer ut.mu.Unlock() + + if ut.status != txStatusOpen { + return ErrClosed + } + overlay := ut.overlay + ut.lockedDiscard() + + rawReadWriteTx, err := ut.keyValueDatabase.NewReadWriteTransaction() + if err != nil { + return fmt.Errorf("failed to create read write transaction to commit UnserializedTransaction: %w", err) + } + + defer rawReadWriteTx.Discard() + + err = lockedCommit(overlay, rawReadWriteTx) + if err != nil { + return err + } + + ut.status = txStatusCommitted + ut.overlay = nil + return nil +} diff --git a/networking/internal/ocrendpointv3/types/types.go b/networking/internal/ocrendpointv3/types/types.go deleted file mode 100644 index 6856c169..00000000 --- a/networking/internal/ocrendpointv3/types/types.go +++ /dev/null @@ -1,31 +0,0 @@ -package types - -import ( - "crypto/rand" - "encoding/hex" - "fmt" - - "github.com/smartcontractkit/libocr/commontypes" - ocr2types "github.com/smartcontractkit/libocr/offchainreporting2plus/types" -) - -type StreamID struct { - OracleID commontypes.OracleID - Priority ocr2types.BinaryMessageOutboundPriority -} - -const RequestIDSize = 32 - -type RequestID [RequestIDSize]byte - -var _ fmt.Stringer = RequestID{} - -func (r RequestID) String() string { - return hex.EncodeToString(r[:]) -} - -func GetRandomRequestID() RequestID { - var b [RequestIDSize]byte - _, _ = rand.Read(b[:]) - return RequestID(b) -} diff --git a/networking/ocr_endpoint_v2.go b/networking/ocr_endpoint_v2.go index 5d81a71f..54dbf71e 100644 --- a/networking/ocr_endpoint_v2.go +++ b/networking/ocr_endpoint_v2.go @@ -7,8 +7,8 @@ import ( "sync" "github.com/smartcontractkit/libocr/commontypes" + "github.com/smartcontractkit/libocr/networking/ragep2pwrapper" ocr2types "github.com/smartcontractkit/libocr/offchainreporting2plus/types" - "github.com/smartcontractkit/libocr/ragep2p" ragetypes "github.com/smartcontractkit/libocr/ragep2p/types" "github.com/smartcontractkit/libocr/subprocesses" @@ -50,14 +50,14 @@ type ocrEndpointV2 struct { peerIDs []ragetypes.PeerID peerMapping map[commontypes.OracleID]ragetypes.PeerID reversedPeerMapping map[ragetypes.PeerID]commontypes.OracleID - host *ragep2p.Host + host ragep2pwrapper.Host configDigest ocr2types.ConfigDigest ownOracleID commontypes.OracleID // internal and state management chSendToSelf chan commontypes.BinaryMessageWithSender chClose chan struct{} - streams map[commontypes.OracleID]*ragep2p.Stream + streams map[commontypes.OracleID]ragep2pwrapper.Stream registration io.Closer state ocrEndpointState @@ -131,7 +131,7 @@ func newOCREndpointV2( ownOracleID, chSendToSelf, make(chan struct{}), - make(map[commontypes.OracleID]*ragep2p.Stream), + make(map[commontypes.OracleID]ragep2pwrapper.Stream), registration, ocrEndpointUnstarted, sync.RWMutex{}, @@ -174,11 +174,11 @@ func (o *ocrEndpointV2) Start() error { o.config.OutgoingMessageBufferSize, o.config.IncomingMessageBufferSize, o.limits.MaxMessageLength, - ragep2p.TokenBucketParams{ + ragetypes.TokenBucketParams{ o.limits.MessagesRatePerOracle, uint32(o.limits.MessagesCapacityPerOracle), }, - ragep2p.TokenBucketParams{ + ragetypes.TokenBucketParams{ o.limits.BytesRatePerOracle, uint32(o.limits.BytesCapacityPerOracle), }, diff --git a/networking/ocr_endpoint_v3.go b/networking/ocr_endpoint_v3.go index 7abaaa5b..a9cb10ca 100644 --- a/networking/ocr_endpoint_v3.go +++ b/networking/ocr_endpoint_v3.go @@ -1,18 +1,14 @@ package networking import ( - "encoding" "errors" "fmt" "io" - "math" "sync" "github.com/smartcontractkit/libocr/commontypes" - "github.com/smartcontractkit/libocr/networking/internal/ocrendpointv3/responselimit" - ocrendpointv3types "github.com/smartcontractkit/libocr/networking/internal/ocrendpointv3/types" ocr2types "github.com/smartcontractkit/libocr/offchainreporting2plus/types" - "github.com/smartcontractkit/libocr/ragep2p" + "github.com/smartcontractkit/libocr/ragep2p/ragep2pnew" ragetypes "github.com/smartcontractkit/libocr/ragep2p/types" "github.com/smartcontractkit/libocr/subprocesses" @@ -29,7 +25,7 @@ type ocrEndpointV3 struct { defaultPriorityConfig ocr2types.BinaryNetworkEndpoint2Config lowPriorityConfig ocr2types.BinaryNetworkEndpoint2Config peerMapping map[commontypes.OracleID]ragetypes.PeerID - host *ragep2p.Host + host *ragep2pnew.Host configDigest ocr2types.ConfigDigest ownOracleID commontypes.OracleID @@ -43,8 +39,6 @@ type ocrEndpointV3 struct { stateMu sync.RWMutex subs subprocesses.Subprocesses - responseChecker *responselimit.ResponseChecker - // recv is exposed to clients of this network endpoint recv chan ocr2types.InboundBinaryMessageWithSender @@ -52,8 +46,8 @@ type ocrEndpointV3 struct { } type priorityStreamGroup struct { - Low *ragep2p.Stream - Default *ragep2p.Stream + Low ragep2pnew.Stream2 + Default ragep2pnew.Stream2 } //nolint:unused @@ -94,11 +88,16 @@ func newOCREndpointV3( logger.Warn("OCREndpointV3: No bootstrappers were provided. Peer discovery might not work reliably for this instance.", nil) } + host, ok := peer.host.RawWrappee().(*ragep2pnew.Host) + if !ok { + return nil, fmt.Errorf("host is not a wrapped ragep2pnew.Host. Please set the appropriate value for PeerConfig.EnableExperimentalRageP2P") + } + o := &ocrEndpointV3{ defaultPriorityConfig, lowPriorityConfig, peerMapping, - peer.host, + host, configDigest, ownOracleID, chSendToSelf, @@ -108,7 +107,6 @@ func newOCREndpointV3( ocrEndpointUnstarted, sync.RWMutex{}, subprocesses.Subprocesses{}, - responselimit.NewResponseChecker(), make(chan ocr2types.InboundBinaryMessageWithSender), logger, } @@ -138,34 +136,45 @@ func (o *ocrEndpointV3) start() error { continue } - noLimitsMaxMessageLength := ragep2p.MaxMessageLength - noLimitsTokenBucketParams := ragep2p.TokenBucketParams{ - math.MaxFloat64, - math.MaxUint32, - } - // Initialize the underlying streams, one stream per priority level. - lowPriorityStream, err := o.host.NewStream( + lowPriorityStream, err := o.host.NewStream2( pid, - streamNameFromConfigDigestAndPriority(o.configDigest, ocr2types.BinaryMessagePriorityLow), - o.lowPriorityConfig.OverrideOutgoingMessageBufferSize, - o.lowPriorityConfig.OverrideIncomingMessageBufferSize, - noLimitsMaxMessageLength, - noLimitsTokenBucketParams, - noLimitsTokenBucketParams, + streamNameFromConfigDigestAndPriority(o.configDigest, ragep2pnew.StreamPriorityLow), + ragep2pnew.StreamPriorityLow, + ragep2pnew.Stream2Limits{o.lowPriorityConfig.OverrideOutgoingMessageBufferSize, + o.lowPriorityConfig.OverrideIncomingMessageBufferSize, + o.lowPriorityConfig.MaxMessageLength, + ragetypes.TokenBucketParams{ + o.lowPriorityConfig.MessagesRatePerOracle, + uint32(o.lowPriorityConfig.MessagesCapacityPerOracle), + }, + ragetypes.TokenBucketParams{ + o.lowPriorityConfig.BytesRatePerOracle, + uint32(o.lowPriorityConfig.BytesCapacityPerOracle), + }, + }, ) if err != nil { return fmt.Errorf("failed to create (low priority) stream for oracle %v (peer id: %q): %w", oid, pid, err) } - defaultPriorityStream, err := o.host.NewStream( + defaultPriorityStream, err := o.host.NewStream2( pid, - streamNameFromConfigDigestAndPriority(o.configDigest, ocr2types.BinaryMessagePriorityDefault), - o.defaultPriorityConfig.OverrideOutgoingMessageBufferSize, - o.defaultPriorityConfig.OverrideIncomingMessageBufferSize, - noLimitsMaxMessageLength, - noLimitsTokenBucketParams, - noLimitsTokenBucketParams, + streamNameFromConfigDigestAndPriority(o.configDigest, ragep2pnew.StreamPriorityDefault), + ragep2pnew.StreamPriorityDefault, + ragep2pnew.Stream2Limits{ + o.defaultPriorityConfig.OverrideOutgoingMessageBufferSize, + o.defaultPriorityConfig.OverrideIncomingMessageBufferSize, + o.defaultPriorityConfig.MaxMessageLength, + ragetypes.TokenBucketParams{ + o.defaultPriorityConfig.MessagesRatePerOracle, + uint32(o.defaultPriorityConfig.MessagesCapacityPerOracle), + }, + ragetypes.TokenBucketParams{ + o.defaultPriorityConfig.BytesRatePerOracle, + uint32(o.defaultPriorityConfig.BytesCapacityPerOracle), + }, + }, ) if err != nil { return fmt.Errorf("failed to create (default priority) stream for oracle %v (peer id: %q): %w", oid, pid, err) @@ -193,217 +202,69 @@ func (o *ocrEndpointV3) start() error { // remote goes mad and sends us thousands of messages, we don't drop any // messages from good remotes func (o *ocrEndpointV3) runRecv(oid commontypes.OracleID) { - chRecv1 := o.streams[oid].Default.ReceiveMessages() - chRecv2 := o.streams[oid].Low.ReceiveMessages() + chRecv1 := o.streams[oid].Default.Receive() + chRecv2 := o.streams[oid].Low.Receive() for { - var ( - msg []byte - priority ocr2types.BinaryMessageOutboundPriority - ) - select { - case msg = <-chRecv1: - priority = ocr2types.BinaryMessagePriorityDefault - case msg = <-chRecv2: - priority = ocr2types.BinaryMessagePriorityLow - case <-o.chClose: - return - } - - inMsg, err := o.translateInboundMessage(msg, priority, oid) - if err != nil { - o.logger.Warn("Invalid inbound message", commontypes.LogFields{ - "remoteOracleID": oid, - "priority": priority, - "reason": err, - }) - continue - } - select { - case o.recv <- ocr2types.InboundBinaryMessageWithSender{inMsg, oid}: - continue + case msg := <-chRecv1: + select { + case o.recv <- ocr2types.InboundBinaryMessageWithSender{o.translateInboundMessage(msg, ocr2types.BinaryMessagePriorityDefault), oid}: + continue + case <-o.chClose: + return + } + case msg := <-chRecv2: + select { + case o.recv <- ocr2types.InboundBinaryMessageWithSender{o.translateInboundMessage(msg, ocr2types.BinaryMessagePriorityLow), oid}: + continue + case <-o.chClose: + return + } case <-o.chClose: return } } } -type ocrEndpointV3PayloadType byte - -const ( - _ ocrEndpointV3PayloadType = iota - ocrEndpointV3PayloadTypePlain - ocrEndpointV3PayloadTypeRequest - ocrEndpointV3PayloadTypeResponse -) - -type ocrEndpointV3Payload struct { - sumType ocrEndpointV3PayloadSumType -} - -func (o *ocrEndpointV3Payload) MarshalBinary() ([]byte, error) { - var prefix byte - switch o.sumType.(type) { - case *ocrEndpointV3PayloadPlain: - prefix = byte(ocrEndpointV3PayloadTypePlain) - case *ocrEndpointV3PayloadRequest: - prefix = byte(ocrEndpointV3PayloadTypeRequest) - case *ocrEndpointV3PayloadResponse: - prefix = byte(ocrEndpointV3PayloadTypeResponse) - } - sumTypeBytes, err := o.sumType.MarshalBinary() - if err != nil { - return nil, err - } - return append([]byte{prefix}, sumTypeBytes...), nil -} - -func (o *ocrEndpointV3Payload) UnmarshalBinary(data []byte) error { - if len(data) < 1 { - return fmt.Errorf("data is too short to contain prefix") - } - prefix := ocrEndpointV3PayloadType(data[0]) - data = data[1:] - switch prefix { - case ocrEndpointV3PayloadTypePlain: - o.sumType = &ocrEndpointV3PayloadPlain{} - case ocrEndpointV3PayloadTypeRequest: - o.sumType = &ocrEndpointV3PayloadRequest{} - case ocrEndpointV3PayloadTypeResponse: - o.sumType = &ocrEndpointV3PayloadResponse{} - } - return o.sumType.UnmarshalBinary(data) -} - -//go-sumtype:decl ocrEndpointV3PayloadSumType - -type ocrEndpointV3PayloadSumType interface { - isOCREndpointV3PayloadSumType() - encoding.BinaryMarshaler - encoding.BinaryUnmarshaler -} - -type ocrEndpointV3PayloadPlain struct { - payload []byte -} - -func (op ocrEndpointV3PayloadPlain) isOCREndpointV3PayloadSumType() {} - -func (op *ocrEndpointV3PayloadPlain) MarshalBinary() ([]byte, error) { - return op.payload, nil -} - -func (op *ocrEndpointV3PayloadPlain) UnmarshalBinary(data []byte) error { - op.payload = data - return nil -} - -type ocrEndpointV3PayloadRequest struct { - requestID ocrendpointv3types.RequestID - payload []byte -} - -func (oreq ocrEndpointV3PayloadRequest) isOCREndpointV3PayloadSumType() {} - -func (oreq *ocrEndpointV3PayloadRequest) MarshalBinary() ([]byte, error) { - return append(oreq.requestID[:], oreq.payload...), nil -} - -func (oreq *ocrEndpointV3PayloadRequest) UnmarshalBinary(data []byte) error { - if len(data) < len(oreq.requestID) { - return fmt.Errorf("data is too short to contain requestID") - } - oreq.requestID = ocrendpointv3types.RequestID(data[:len(oreq.requestID)]) - oreq.payload = data[len(oreq.requestID):] - return nil -} - -type ocrEndpointV3PayloadResponse struct { - requestID ocrendpointv3types.RequestID - payload []byte -} - -func (ores ocrEndpointV3PayloadResponse) isOCREndpointV3PayloadSumType() {} - -func (ores *ocrEndpointV3PayloadResponse) MarshalBinary() ([]byte, error) { - return append(ores.requestID[:], ores.payload...), nil -} - -func (ores *ocrEndpointV3PayloadResponse) UnmarshalBinary(data []byte) error { - if len(data) < len(ores.requestID) { - return fmt.Errorf("data is too short to contain requestID") - } - ores.requestID = ocrendpointv3types.RequestID(data[:len(ores.requestID)]) - ores.payload = data[len(ores.requestID):] - return nil -} - -func (o *ocrEndpointV3) translateInboundMessage(ragepayload []byte, priority ocr2types.BinaryMessageOutboundPriority, from commontypes.OracleID) (ocr2types.InboundBinaryMessage, error) { - var payload ocrEndpointV3Payload - if err := payload.UnmarshalBinary(ragepayload); err != nil { - return nil, err - } - - switch msg := payload.sumType.(type) { - case *ocrEndpointV3PayloadPlain: - return ocr2types.InboundBinaryMessagePlain{msg.payload, priority}, nil - - case *ocrEndpointV3PayloadRequest: - rid := msg.requestID +func (o *ocrEndpointV3) translateInboundMessage(inMsg ragep2pnew.InboundBinaryMessage, priority ocr2types.BinaryMessageOutboundPriority) ocr2types.InboundBinaryMessage { + switch msg := inMsg.(type) { + case ragep2pnew.InboundBinaryMessagePlain: + return ocr2types.InboundBinaryMessagePlain{msg.Payload, priority} + case ragep2pnew.InboundBinaryMessageRequest: return ocr2types.InboundBinaryMessageRequest{ - ocrEndpointV3RequestHandle{priority, rid}, - msg.payload, + ocrEndpointV3RequestHandle{priority, msg.RequestHandle}, + msg.Payload, priority, - }, nil - - case *ocrEndpointV3PayloadResponse: - sid := ocrendpointv3types.StreamID{from, priority} - rid := msg.requestID - - checkResult := o.responseChecker.CheckResponse(sid, rid, len(msg.payload)) - switch checkResult { - case responselimit.ResponseCheckResultReject: - return nil, fmt.Errorf("rejected response") - case responselimit.ResponseCheckResultAllow: - return ocr2types.InboundBinaryMessageResponse{msg.payload, priority}, nil } - panic(fmt.Sprintf("unexpected responselimit.ResponseCheckResult: %#v", checkResult)) + case ragep2pnew.InboundBinaryMessageResponse: + return ocr2types.InboundBinaryMessageResponse{msg.Payload, priority} } - - panic("unknown ocrEndpointV3PayloadType") + panic("unknown type of ragep2pnew.InboundBinaryMessage") } -func (o *ocrEndpointV3) translateOutboundMessage(outMsg ocr2types.OutboundBinaryMessage, to commontypes.OracleID) ( - ragepayload []byte, +func (o *ocrEndpointV3) translateOutboundMessage(outMsg ocr2types.OutboundBinaryMessage) ( + ragemsg ragep2pnew.OutboundBinaryMessage, priority ocr2types.BinaryMessageOutboundPriority, - err error, ) { - var payload ocrEndpointV3Payload switch msg := outMsg.(type) { case ocr2types.OutboundBinaryMessagePlain: - payload.sumType = &ocrEndpointV3PayloadPlain{msg.Payload} + ragemsg = ragep2pnew.OutboundBinaryMessagePlain{msg.Payload} priority = msg.Priority case ocr2types.OutboundBinaryMessageRequest: - var ocrendpointv3responsepolicy responselimit.ResponsePolicy + var rageresponsepolicy ragep2pnew.ResponsePolicy switch responsepolicy := msg.ResponsePolicy.(type) { case ocr2types.SingleUseSizedLimitedResponsePolicy: - ocrendpointv3responsepolicy = &responselimit.SingleUseSizedLimitedResponsePolicy{ + rageresponsepolicy = &ragep2pnew.SingleUseSizedLimitedResponsePolicy{ responsepolicy.MaxSize, responsepolicy.ExpiryTimestamp, } } - + ragemsg = ragep2pnew.OutboundBinaryMessageRequest{rageresponsepolicy, msg.Payload} priority = msg.Priority - sid := ocrendpointv3types.StreamID{to, priority} - rid := ocrendpointv3types.GetRandomRequestID() - o.responseChecker.SetPolicy(sid, rid, ocrendpointv3responsepolicy) - - payload.sumType = &ocrEndpointV3PayloadRequest{rid, msg.Payload} - case ocr2types.OutboundBinaryMessageResponse: requestHandle, ok := ocr2types.MustGetOutboundBinaryMessageResponseRequestHandle(msg).(ocrEndpointV3RequestHandle) if !ok { @@ -413,16 +274,14 @@ func (o *ocrEndpointV3) translateOutboundMessage(outMsg ocr2types.OutboundBinary ) return } - - requestID := requestHandle.requestID - payload.sumType = &ocrEndpointV3PayloadResponse{requestID, msg.Payload} + ragemsg = requestHandle.rageRequestHandle.MakeResponse(msg.Payload) priority = msg.Priority default: - panic("unknown type of ocr2types.OutboundBinaryMessage") + panic("unknown type of commontypes.OutboundBinaryMessage") } - ragepayload, err = payload.MarshalBinary() - return ragepayload, priority, err + + return ragemsg, priority } func (o *ocrEndpointV3) runSendToSelf() { @@ -457,19 +316,11 @@ func (o *ocrEndpointV3) Close() error { var allErrors error for oid, priorityGroupStream := range o.streams { - { - sid := ocrendpointv3types.StreamID{oid, ocr2types.BinaryMessagePriorityDefault} - o.responseChecker.ClearPoliciesForStream(sid) - if err := priorityGroupStream.Default.Close(); err != nil { - allErrors = errors.Join(allErrors, fmt.Errorf("error while closing (default priority) stream with oracle %v: %w", oid, err)) - } + if err := priorityGroupStream.Default.Close(); err != nil { + allErrors = errors.Join(allErrors, fmt.Errorf("error while closing (default priority) stream with oracle %v: %w", oid, err)) } - { - sid := ocrendpointv3types.StreamID{oid, ocr2types.BinaryMessagePriorityLow} - o.responseChecker.ClearPoliciesForStream(sid) - if err := priorityGroupStream.Low.Close(); err != nil { - allErrors = errors.Join(allErrors, fmt.Errorf("error while closing (low priority) stream with oracle %v: %w", oid, err)) - } + if err := priorityGroupStream.Low.Close(); err != nil { + allErrors = errors.Join(allErrors, fmt.Errorf("error while closing (low priority) stream with oracle %v: %w", oid, err)) } } @@ -506,24 +357,18 @@ func (o *ocrEndpointV3) SendTo(msg ocr2types.OutboundBinaryMessage, to commontyp return } - ragemsg, priority, err := o.translateOutboundMessage(msg, to) - if err != nil { - o.logger.Error("Failed to translate outbound message", commontypes.LogFields{ - "error": err, - }) - return - } + ragemsg, priority := o.translateOutboundMessage(msg) switch priority { case ocr2types.BinaryMessagePriorityDefault: - o.streams[to].Default.SendMessage(ragemsg) + o.streams[to].Default.Send(ragemsg) case ocr2types.BinaryMessagePriorityLow: - o.streams[to].Low.SendMessage(ragemsg) + o.streams[to].Low.Send(ragemsg) } } type ocrEndpointV3RequestHandle struct { - priority ocr2types.BinaryMessageOutboundPriority - requestID ocrendpointv3types.RequestID + priority ocr2types.BinaryMessageOutboundPriority + rageRequestHandle ragep2pnew.RequestHandle } func (rh ocrEndpointV3RequestHandle) MakeResponse(payload []byte) ocr2types.OutboundBinaryMessageResponse { @@ -593,12 +438,12 @@ func (o *ocrEndpointV3) Receive() <-chan ocr2types.InboundBinaryMessageWithSende return o.recv } -func streamNameFromConfigDigestAndPriority(cd ocr2types.ConfigDigest, priority ocr2types.BinaryMessageOutboundPriority) string { +func streamNameFromConfigDigestAndPriority(cd ocr2types.ConfigDigest, priority ragep2pnew.StreamPriority) string { switch priority { - case ocr2types.BinaryMessagePriorityLow: + case ragep2pnew.StreamPriorityLow: return fmt.Sprintf("ocr/%s/priority=low", cd) - case ocr2types.BinaryMessagePriorityDefault: + case ragep2pnew.StreamPriorityDefault: return fmt.Sprintf("ocr/%s", cd) } - panic("case implementation for ragep2p.StreamPriority missing") + panic("case implementation for ragep2pnew.StreamPriority missing") } diff --git a/networking/peer_group.go b/networking/peer_group.go index a811be41..1ed05bac 100644 --- a/networking/peer_group.go +++ b/networking/peer_group.go @@ -8,8 +8,8 @@ import ( "sync" "github.com/smartcontractkit/libocr/commontypes" + "github.com/smartcontractkit/libocr/networking/ragep2pwrapper" ocr2types "github.com/smartcontractkit/libocr/offchainreporting2plus/types" - "github.com/smartcontractkit/libocr/ragep2p" ragetypes "github.com/smartcontractkit/libocr/ragep2p/types" ) @@ -60,7 +60,7 @@ type Stream interface { Close() error } -var _ Stream = &ragep2p.Stream{} +var _ Stream = (ragep2pwrapper.Stream)(nil) //sumtype:decl type NewStreamArgs interface { @@ -72,8 +72,8 @@ type NewStreamArgs1 struct { OutgoingBufferSize int // number of messages that fit in the outgoing buffer IncomingBufferSize int // number of messages that fit in the incoming buffer MaxMessageLength int - MessagesLimit ragep2p.TokenBucketParams // rate limit for incoming messages - BytesLimit ragep2p.TokenBucketParams // rate limit for incoming messages + MessagesLimit ragetypes.TokenBucketParams // rate limit for incoming messages + BytesLimit ragetypes.TokenBucketParams // rate limit for incoming messages } func (NewStreamArgs1) isNewStreamArgs() {} @@ -146,7 +146,7 @@ const ( type peerGroup struct { reg *endpointRegistration - host *ragep2p.Host + host ragep2pwrapper.Host streamNamePrefix string peerIDSet map[ragetypes.PeerID]struct{} @@ -159,7 +159,7 @@ type peerGroup struct { // managedStream is a wrapper around ragep2p.Stream that removes the stream from // peerGroup upon Close. type managedStream struct { - stream *ragep2p.Stream + stream ragep2pwrapper.Stream onClose func() } @@ -240,7 +240,7 @@ func (f *peerGroup) Close() error { // defensive continue } - stream, ok := e.Value.(*ragep2p.Stream) + stream, ok := e.Value.(ragep2pwrapper.Stream) if !ok { // defensive continue diff --git a/networking/peer_v2.go b/networking/peer_v2.go index 5598e216..6dbf1a18 100644 --- a/networking/peer_v2.go +++ b/networking/peer_v2.go @@ -13,13 +13,17 @@ import ( "github.com/smartcontractkit/libocr/internal/metricshelper" "github.com/smartcontractkit/libocr/internal/peerkeyringhelper" "github.com/smartcontractkit/libocr/networking/ragedisco" + "github.com/smartcontractkit/libocr/networking/ragep2pwrapper" "github.com/smartcontractkit/libocr/networking/rageping" nettypes "github.com/smartcontractkit/libocr/networking/types" ocr2types "github.com/smartcontractkit/libocr/offchainreporting2plus/types" "github.com/smartcontractkit/libocr/ragep2p" + "github.com/smartcontractkit/libocr/ragep2p/ragep2pnew" ragetypes "github.com/smartcontractkit/libocr/ragep2p/types" ) +var DangerDangerEnableExperimentalRageP2P = "I promise I know what I'm doing, give me the experimental ragep2p" + // Exactly one of PrivKey (deprecated) or PeerKeyring must be provided. type PeerConfig struct { // Exactly one of PrivKey (deprecated) or PeerKeyring must be provided. @@ -54,6 +58,12 @@ type PeerConfig struct { MetricsRegisterer prometheus.Registerer LatencyMetricsServiceConfigs []*rageping.LatencyMetricsServiceConfig + + // Set this to DangerDangerEnableExperimentalRageP2P to use the experimental ragep2p stack. + // If set to any other value, the default production ragep2p stack is used. + // Note that the experimental ragep2p stack is not yet ready for production use, but should + // in principle be backwards compatible with the current ragep2p stack. + EnableExperimentalRageP2P string } func (c *PeerConfig) keyring() (ragetypes.PeerKeyring, error) { @@ -70,7 +80,7 @@ func (c *PeerConfig) keyring() (ragetypes.PeerKeyring, error) { // concretePeerV2 represents a ragep2p peer with one peer ID listening on one port type concretePeerV2 struct { peerID ragetypes.PeerID - host *ragep2p.Host + host ragep2pwrapper.Host discoverer *ragedisco.Ragep2pDiscoverer metricsRegisterer prometheus.Registerer logger loghelper.LoggerWithContext @@ -93,6 +103,12 @@ func NewPeer(c PeerConfig) (*concretePeerV2, error) { "peerID": peerID.String(), }) + if c.EnableExperimentalRageP2P == DangerDangerEnableExperimentalRageP2P { + logger = logger.MakeChild(commontypes.LogFields{ + "ragep2p": "experimental", + }) + } + announceAddresses := c.V2AnnounceAddresses if len(c.V2AnnounceAddresses) == 0 { announceAddresses = c.V2ListenAddresses @@ -101,16 +117,33 @@ func NewPeer(c PeerConfig) (*concretePeerV2, error) { metricsRegistererWrapper := metricshelper.NewPrometheusRegistererWrapper(c.MetricsRegisterer, c.Logger) discoverer := ragedisco.NewRagep2pDiscoverer(c.V2DeltaReconcile, announceAddresses, c.V2DiscovererDatabase, metricsRegistererWrapper) - host, err := ragep2p.NewHost( - ragep2p.HostConfig{c.V2DeltaDial}, - keyring, - c.V2ListenAddresses, - discoverer, - c.Logger, - metricsRegistererWrapper, - ) - if err != nil { - return nil, fmt.Errorf("failed to construct ragep2p host: %w", err) + var host ragep2pwrapper.Host + if c.EnableExperimentalRageP2P == DangerDangerEnableExperimentalRageP2P { + h, err := ragep2pnew.NewHost( + ragep2pnew.HostConfig{c.V2DeltaDial}, + keyring, + c.V2ListenAddresses, + discoverer, + c.Logger, + metricsRegistererWrapper, + ) + if err != nil { + return nil, fmt.Errorf("failed to construct ragep2pnew host: %w", err) + } + host = ragep2pnew.Wrapped(h) + } else { + h, err := ragep2p.NewHost( + ragep2p.HostConfig{c.V2DeltaDial}, + keyring, + c.V2ListenAddresses, + discoverer, + c.Logger, + metricsRegistererWrapper, + ) + if err != nil { + return nil, fmt.Errorf("failed to construct ragep2pnew host: %w", err) + } + host = ragep2p.Wrapped(h) } err = host.Start() if err != nil { diff --git a/networking/ragedisco/ragep2p_discoverer.go b/networking/ragedisco/ragep2p_discoverer.go index e3f91a1e..d868d1e8 100644 --- a/networking/ragedisco/ragep2p_discoverer.go +++ b/networking/ragedisco/ragep2p_discoverer.go @@ -9,6 +9,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/smartcontractkit/libocr/commontypes" "github.com/smartcontractkit/libocr/internal/loghelper" + "github.com/smartcontractkit/libocr/networking/ragep2pwrapper" nettypes "github.com/smartcontractkit/libocr/networking/types" "github.com/smartcontractkit/libocr/offchainreporting2plus/types" "github.com/smartcontractkit/libocr/ragep2p" @@ -33,14 +34,14 @@ type Ragep2pDiscoverer struct { deltaReconcile time.Duration announceAddresses []string db nettypes.DiscovererDatabase - host *ragep2p.Host + host ragep2pwrapper.Host proto *discoveryProtocol stateMu sync.Mutex state ragep2pDiscovererState streamsMu sync.Mutex - streams map[ragetypes.PeerID]*ragep2p.Stream + streams map[ragetypes.PeerID]ragep2pwrapper.Stream chIncomingMessages chan incomingMessage chOutgoingMessages chan outgoingMessage @@ -69,7 +70,7 @@ func NewRagep2pDiscoverer( sync.Mutex{}, ragep2pDiscovererUnstarted, sync.Mutex{}, - make(map[ragetypes.PeerID]*ragep2p.Stream), + make(map[ragetypes.PeerID]ragep2pwrapper.Stream), make(chan incomingMessage), make(chan outgoingMessage), make(chan connectivityMsg), @@ -77,7 +78,7 @@ func NewRagep2pDiscoverer( } } -func (r *Ragep2pDiscoverer) Start(host *ragep2p.Host, keyring ragetypes.PeerKeyring, logger loghelper.LoggerWithContext) error { +func (r *Ragep2pDiscoverer) Start(host ragep2pwrapper.Host, keyring ragetypes.PeerKeyring, logger loghelper.LoggerWithContext) error { succeeded := false defer func() { if !succeeded { @@ -144,7 +145,7 @@ func (r *Ragep2pDiscoverer) connectivityLoop() { // no point in keeping very large buffers, since only // the latest messages matter anyways. bufferSize := 2 - messagesLimit := ragep2p.TokenBucketParams{ + messagesLimit := ragetypes.TokenBucketParams{ // we expect one message every deltaReconcile seconds, let's double it // for good measure 2 / r.deltaReconcile.Seconds(), @@ -152,7 +153,7 @@ func (r *Ragep2pDiscoverer) connectivityLoop() { 2 * uint32(bufferSize), } // bytesLimit is messagesLimit * maxMessageLength - bytesLimit := ragep2p.TokenBucketParams{ + bytesLimit := ragetypes.TokenBucketParams{ messagesLimit.Rate * maxMessageLength, messagesLimit.Capacity * maxMessageLength, } diff --git a/networking/ragep2pwrapper/wrapper.go b/networking/ragep2pwrapper/wrapper.go new file mode 100644 index 00000000..e5e60e4f --- /dev/null +++ b/networking/ragep2pwrapper/wrapper.go @@ -0,0 +1,32 @@ +// Temporary wrapper package to allow switching between ragep2p and ragep2pnew. +// Eventually, we will remove this package and use ragep2pnew directly. +package ragep2pwrapper + +import ( + "github.com/smartcontractkit/libocr/ragep2p/types" +) + +type Host interface { + Start() error + Close() error + ID() types.PeerID + NewStream( + other types.PeerID, + streamName string, + outgoingBufferSize int, + incomingBufferSize int, + maxMessageLength int, + messagesLimit types.TokenBucketParams, + bytesLimit types.TokenBucketParams, + ) (Stream, error) + + RawWrappee() any +} + +type Stream interface { + Other() types.PeerID + Name() string + SendMessage(data []byte) + ReceiveMessages() <-chan []byte + Close() error +} diff --git a/networking/rageping/service.go b/networking/rageping/service.go index 27e0945a..2f42abc2 100644 --- a/networking/rageping/service.go +++ b/networking/rageping/service.go @@ -12,14 +12,14 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/smartcontractkit/libocr/commontypes" "github.com/smartcontractkit/libocr/internal/loghelper" - "github.com/smartcontractkit/libocr/ragep2p" + "github.com/smartcontractkit/libocr/networking/ragep2pwrapper" ragetypes "github.com/smartcontractkit/libocr/ragep2p/types" unsafeRand "math/rand" ) type latencyMetricsService struct { - host *ragep2p.Host + host ragep2pwrapper.Host metricsRegisterer prometheus.Registerer logger loghelper.LoggerWithContext peerStates map[ragetypes.PeerID]*latencyMetricsPeerState @@ -61,7 +61,7 @@ type latencyMetricsPeerState struct { metrics *latencyMetrics // Main stream used for sending/receiving PING/PONG messages. - stream *ragep2p.Stream + stream ragep2pwrapper.Stream // A reference counter for the number of times this particular peer has been registered. Only a single state is // kept per peer (and config). Registering the same peer multiple times does not create a new ping/pong protocol @@ -83,8 +83,8 @@ type latencyMetricsServiceStreamLimits struct { outgoingBufferSize int incomingBufferSize int maxMessageLength int - messagesLimit ragep2p.TokenBucketParams - bytesLimit ragep2p.TokenBucketParams + messagesLimit ragetypes.TokenBucketParams + bytesLimit ragetypes.TokenBucketParams } func (c *LatencyMetricsServiceConfig) getStreamLimits() *latencyMetricsServiceStreamLimits { @@ -106,10 +106,10 @@ func (c *LatencyMetricsServiceConfig) getStreamLimits() *latencyMetricsServiceSt // considered for the rate limits.) msgsCapacity := uint32(2 + 2 /* margin of error */) msgsRate := 2.0 / c.MinPeriod.Seconds() - msgsLimit := ragep2p.TokenBucketParams{msgsRate, msgsCapacity} + msgsLimit := ragetypes.TokenBucketParams{msgsRate, msgsCapacity} bytesCapacity := uint32((c.PingSize + pongSize) * 2) bytesRate := float64(bytesCapacity) / c.MinPeriod.Seconds() - bytesLimit := ragep2p.TokenBucketParams{bytesRate, bytesCapacity} + bytesLimit := ragetypes.TokenBucketParams{bytesRate, bytesCapacity} return &latencyMetricsServiceStreamLimits{ outgoingBufferSize, @@ -251,7 +251,7 @@ func (sg *latencyMetricsServiceGroup) Close() { } } -func (s *latencyMetricsService) initStream(peerID ragetypes.PeerID) (*ragep2p.Stream, error) { +func (s *latencyMetricsService) initStream(peerID ragetypes.PeerID) (ragep2pwrapper.Stream, error) { // Get a unique stream name for each configuration. streamName := fmt.Sprintf( "ping-pong-(%v|%v|%v|%v)", s.config.PingSize, s.config.MinPeriod, s.config.MaxPeriod, s.config.Timeout, @@ -422,7 +422,7 @@ func (s *latencyMetricsService) run(remotePeerID ragetypes.PeerID, peerState *la } func (s *latencyMetricsService) sendPing( - remotePeerID ragetypes.PeerID, stream *ragep2p.Stream, metrics *latencyMetrics, + remotePeerID ragetypes.PeerID, stream ragep2pwrapper.Stream, metrics *latencyMetrics, ) (lastPingSentAt time.Time, expectedPongMsg []byte) { // Generate a new random PING message to be sent to the remote peer. pingMsg, err := s.preparePingMessage() @@ -463,7 +463,7 @@ func (s *latencyMetricsService) processTimedOutPing(remotePeerID ragetypes.PeerI func (s *latencyMetricsService) processIncomingPingMessage( pingMsg []byte, remotePeerID ragetypes.PeerID, - stream *ragep2p.Stream, + stream ragep2pwrapper.Stream, metrics *latencyMetrics, ) { // Some valid PING message was received from the remote peer. diff --git a/networking/rageping/types.go b/networking/rageping/types.go index b6e7bb13..9b314e6e 100644 --- a/networking/rageping/types.go +++ b/networking/rageping/types.go @@ -7,7 +7,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/smartcontractkit/libocr/commontypes" "github.com/smartcontractkit/libocr/internal/loghelper" - "github.com/smartcontractkit/libocr/ragep2p" + "github.com/smartcontractkit/libocr/networking/ragep2pwrapper" ragetypes "github.com/smartcontractkit/libocr/ragep2p/types" ) @@ -77,7 +77,7 @@ func DefaultLatencyBuckets() []float64 { // configuration multiple times as parameter. (This minor restriction is a result of how the underlying streams are // initialized, and may be lifted if needed.) func NewLatencyMetricsService( - host *ragep2p.Host, + host ragep2pwrapper.Host, registerer prometheus.Registerer, logger loghelper.LoggerWithContext, configs []*LatencyMetricsServiceConfig, diff --git a/offchainreporting2plus/internal/managed/limits/ocr3_1_limits.go b/offchainreporting2plus/internal/managed/limits/ocr3_1_limits.go index 72220e24..aefc3194 100644 --- a/offchainreporting2plus/internal/managed/limits/ocr3_1_limits.go +++ b/offchainreporting2plus/internal/managed/limits/ocr3_1_limits.go @@ -7,35 +7,37 @@ import ( "math/big" "time" - "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3_1types" - + "github.com/smartcontractkit/libocr/internal/jmt" "github.com/smartcontractkit/libocr/offchainreporting2plus/internal/config/ocr3config" "github.com/smartcontractkit/libocr/offchainreporting2plus/internal/ocr3_1/protocol" + "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3_1types" "github.com/smartcontractkit/libocr/offchainreporting2plus/types" ) -type ocr3_1serializedLengthLimits struct { - maxLenMsgNewEpoch int - maxLenMsgEpochStartRequest int - maxLenMsgEpochStart int - maxLenMsgRoundStart int - maxLenMsgObservation int - maxLenMsgProposal int - maxLenMsgPrepare int - maxLenMsgCommit int - maxLenMsgReportSignatures int - maxLenMsgCertifiedCommitRequest int - maxLenMsgCertifiedCommit int - maxLenMsgBlockSyncSummary int - maxLenMsgBlockSyncRequest int - maxLenMsgBlockSync int - maxLenMsgBlobOffer int - maxLenMsgBlobChunkRequest int - maxLenMsgBlobChunkResponse int - maxLenMsgBlobAvailable int +type OCR3_1SerializedLengthLimits struct { + MaxLenMsgNewEpoch int + MaxLenMsgEpochStartRequest int + MaxLenMsgEpochStart int + MaxLenMsgRoundStart int + MaxLenMsgObservation int + MaxLenMsgProposal int + MaxLenMsgPrepare int + MaxLenMsgCommit int + MaxLenMsgReportSignatures int + MaxLenMsgCertifiedCommitRequest int + MaxLenMsgCertifiedCommit int + MaxLenMsgStateSyncSummary int + MaxLenMsgBlockSyncRequest int + MaxLenMsgBlockSyncResponse int + MaxLenMsgTreeSyncChunkRequest int + MaxLenMsgTreeSyncChunkResponse int + MaxLenMsgBlobOffer int + MaxLenMsgBlobOfferResponse int + MaxLenMsgBlobChunkRequest int + MaxLenMsgBlobChunkResponse int } -func ocr3_1limits(cfg ocr3config.PublicConfig, pluginLimits ocr3_1types.ReportingPluginLimits, maxSigLen int) (types.BinaryNetworkEndpointLimits, types.BinaryNetworkEndpointLimits, ocr3_1serializedLengthLimits, error) { +func OCR3_1Limits(cfg ocr3config.PublicConfig, pluginLimits ocr3_1types.ReportingPluginLimits, maxSigLen int) (types.BinaryNetworkEndpointLimits, types.BinaryNetworkEndpointLimits, OCR3_1SerializedLengthLimits, error) { overflow := false // These two helper functions add/multiply together a bunch of numbers and set overflow to true if the result @@ -62,13 +64,23 @@ func ocr3_1limits(cfg ocr3config.PublicConfig, pluginLimits ocr3_1types.Reportin return int(prod.Int64()) } + const repeatedOverhead = 10 const sigOverhead = 10 const overhead = 256 - maxLenStateTransitionOutputsAndReportsPlusPrecursor := add(mul(2, pluginLimits.MaxKeyValueModifiedKeysPlusValuesLength), pluginLimits.MaxReportsPlusPrecursorLength, overhead) - maxLenCertifiedPrepareOrCommit := add(mul(ed25519.SignatureSize+sigOverhead, cfg.ByzQuorumSize()), len(protocol.StateTransitionInputsDigest{}), maxLenStateTransitionOutputsAndReportsPlusPrecursor, overhead) - maxLenCertifiedCommittedReports := add(mul(ed25519.SignatureSize+sigOverhead, cfg.ByzQuorumSize()), len(protocol.StateTransitionInputsDigest{}), len(protocol.StateTransitionOutputDigest{}), pluginLimits.MaxReportsPlusPrecursorLength, overhead) - + maxLenStateTransitionOutputs := add(mul(2, pluginLimits.MaxKeyValueModifiedKeysPlusValuesLength), overhead) + maxLenCertifiedPrepareOrCommit := add(mul(ed25519.SignatureSize+sigOverhead, cfg.ByzQuorumSize()), + len(protocol.StateTransitionInputsDigest{}), + maxLenStateTransitionOutputs, + len(protocol.StateRootDigest{}), + pluginLimits.MaxReportsPlusPrecursorLength, + overhead) + maxLenCertifiedCommittedReports := add(mul(ed25519.SignatureSize+sigOverhead, cfg.ByzQuorumSize()), + len(protocol.StateTransitionInputsDigest{}), + len(protocol.StateTransitionOutputDigest{}), + len(protocol.StateRootDigest{}), + pluginLimits.MaxReportsPlusPrecursorLength, + overhead) maxLenMsgNewEpoch := overhead maxLenMsgEpochStartRequest := add(maxLenCertifiedPrepareOrCommit, overhead) maxLenMsgEpochStart := add(maxLenCertifiedPrepareOrCommit, mul(ed25519.SignatureSize+sigOverhead, cfg.ByzQuorumSize()), overhead) @@ -80,12 +92,48 @@ func ocr3_1limits(cfg ocr3config.PublicConfig, pluginLimits ocr3_1types.Reportin maxLenMsgReportSignatures := add(mul(add(maxSigLen, sigOverhead), pluginLimits.MaxReportCount), overhead) maxLenMsgCertifiedCommitRequest := overhead maxLenMsgCertifiedCommit := add(maxLenCertifiedCommittedReports, overhead) + maxLenMsgStateSyncSummary := overhead + + // tree sync messages + maxLenMsgTreeSyncChunkRequest := overhead + maxLenMsgTreeSyncChunkResponseBoundingLeaf := add( + len(jmt.Digest{}), // leaf key + len(jmt.Digest{}), // leaf value + mul( // siblings + jmt.MaxProofLength, + add( + repeatedOverhead, + len(jmt.Digest{}), + ), + ), + ) + maxLenMsgTreeSyncChunkResponseKeyValues := add( + protocol.MaxTreeSyncChunkKeysPlusValuesLength, + mul( // repeated overheads + protocol.MaxTreeSyncChunkKeys, + repeatedOverhead, // key-value + add( + repeatedOverhead, // key + repeatedOverhead, // value + ), + ), + ) + maxLenMsgTreeSyncChunkResponse := add( + overhead, + mul( + jmt.MaxBoundingLeaves, + add( + repeatedOverhead, + maxLenMsgTreeSyncChunkResponseBoundingLeaf, + ), + ), + maxLenMsgTreeSyncChunkResponseKeyValues, + ) // block sync messages - maxLenMsgBlockSyncSummary := overhead maxLenMsgBlockSyncRequest := overhead maxLenAttestedStateTransitionBlock := maxLenCertifiedPrepareOrCommit - maxLenMsgBlockSync := add(mul(protocol.MaxBlocksSent, maxLenAttestedStateTransitionBlock), overhead) + maxLenMsgBlockSyncResponse := add(mul(protocol.MaxBlocksPerBlockSyncResponse, maxLenAttestedStateTransitionBlock), overhead) // blob exchange messages const blobChunkDigestSize = len(protocol.BlobChunkDigest{}) @@ -93,7 +141,7 @@ func ocr3_1limits(cfg ocr3config.PublicConfig, pluginLimits ocr3_1types.Reportin maxLenMsgBlobOffer := add(mul(blobChunkDigestSize, maxNumBlobChunks), overhead) maxLenMsgBlobChunkRequest := add(blobChunkDigestSize, overhead) maxLenMsgBlobChunkResponse := add(blobChunkDigestSize, protocol.BlobChunkSize, overhead) - maxLenMsgBlobAvailable := add(blobChunkDigestSize, ed25519.SignatureSize+sigOverhead, overhead) + maxLenMsgBlobOfferResponse := add(blobChunkDigestSize, ed25519.SignatureSize+sigOverhead, overhead) maxDefaultPriorityMessageSize := max( maxLenMsgNewEpoch, @@ -107,50 +155,53 @@ func ocr3_1limits(cfg ocr3config.PublicConfig, pluginLimits ocr3_1types.Reportin maxLenMsgReportSignatures, maxLenMsgCertifiedCommitRequest, maxLenMsgCertifiedCommit, - maxLenMsgBlockSyncSummary, maxLenMsgBlobOffer, maxLenMsgBlobChunkRequest, - - maxLenMsgBlobAvailable, ) maxLowPriorityMessageSize := max( - maxLenMsgBlockSyncSummary, + maxLenMsgStateSyncSummary, maxLenMsgBlockSyncRequest, + maxLenMsgTreeSyncChunkRequest, ) - minEpochInterval := math.Min(float64(cfg.DeltaProgress), math.Min(float64(cfg.DeltaInitial), float64(cfg.RMax)*float64(cfg.DeltaRound))) + minRoundInterval := math.Max(float64(cfg.DeltaRound), float64(cfg.DeltaGrace)) + + minEpochInterval := math.Min(float64(cfg.DeltaProgress), math.Min(float64(cfg.DeltaInitial), float64(cfg.RMax)*float64(minRoundInterval))) defaultPriorityMessagesRate := (1.0*float64(time.Second)/float64(cfg.DeltaResend) + 3.0*float64(time.Second)/minEpochInterval + - 8.0*float64(time.Second)/float64(cfg.DeltaRound)) * 1.2 + 8.0*float64(time.Second)/float64(minRoundInterval) + + 2.0*float64(time.Second)/float64(protocol.DeltaBlobOfferBroadcast) + + 1.0*float64(time.Second)/float64(protocol.DeltaBlobChunkRequest)) * 1.2 lowPriorityMessagesRate := (1.0*float64(time.Second)/float64(protocol.DeltaMinBlockSyncRequest) + - 1.0*float64(time.Second)/float64(protocol.DeltaBlockSyncHeartbeat)) * 1.2 + 1.0*float64(time.Second)/float64(protocol.DeltaMinTreeSyncRequest) + + 1.0*float64(time.Second)/float64(protocol.DeltaStateSyncHeartbeat)) * 1.2 defaultPriorityMessagesCapacity := mul(15, 3) - lowPriorityMessagesCapacity := mul(2, 3) + lowPriorityMessagesCapacity := mul(3, 3) // we don't multiply bytesRate by a safetyMargin since we already have a generous overhead on each message defaultPriorityBytesRate := float64(time.Second)/float64(cfg.DeltaResend)*float64(maxLenMsgNewEpoch) + float64(time.Second)/float64(minEpochInterval)*float64(maxLenMsgNewEpoch) + - float64(time.Second)/float64(cfg.DeltaRound)*float64(maxLenMsgPrepare) + - float64(time.Second)/float64(cfg.DeltaRound)*float64(maxLenMsgCommit) + - float64(time.Second)/float64(cfg.DeltaRound)*float64(maxLenMsgReportSignatures) + + float64(time.Second)/float64(minRoundInterval)*float64(maxLenMsgPrepare) + + float64(time.Second)/float64(minRoundInterval)*float64(maxLenMsgCommit) + + float64(time.Second)/float64(minRoundInterval)*float64(maxLenMsgReportSignatures) + float64(time.Second)/float64(minEpochInterval)*float64(maxLenMsgEpochStart) + - float64(time.Second)/float64(cfg.DeltaRound)*float64(maxLenMsgRoundStart) + - float64(time.Second)/float64(cfg.DeltaRound)*float64(maxLenMsgProposal) + + float64(time.Second)/float64(minRoundInterval)*float64(maxLenMsgRoundStart) + + float64(time.Second)/float64(minRoundInterval)*float64(maxLenMsgProposal) + float64(time.Second)/float64(minEpochInterval)*float64(maxLenMsgEpochStartRequest) + - float64(time.Second)/float64(cfg.DeltaRound)*float64(maxLenMsgObservation) + - float64(time.Second)/float64(cfg.DeltaRound)*float64(maxLenMsgCertifiedCommitRequest) + - float64(time.Second)/float64(cfg.DeltaRound)*float64(maxLenMsgCertifiedCommit) + - float64(time.Second)/float64(protocol.DeltaBlobCertRequest)*float64(maxLenMsgBlobOffer) + // blob-related messages - float64(time.Second)/float64(protocol.DeltaBlobChunkRequest)*float64(maxLenMsgBlobChunkRequest) + - float64(time.Second)/float64(protocol.DeltaBlobCertRequest)*float64(maxLenMsgBlobAvailable) + float64(time.Second)/float64(minRoundInterval)*float64(maxLenMsgObservation) + + float64(time.Second)/float64(minRoundInterval)*float64(maxLenMsgCertifiedCommitRequest) + + float64(time.Second)/float64(minRoundInterval)*float64(maxLenMsgCertifiedCommit) + + float64(time.Second)/float64(protocol.DeltaBlobOfferBroadcast)*float64(maxLenMsgBlobOffer) + // blob-related messages + float64(time.Second)/float64(protocol.DeltaBlobChunkRequest)*float64(maxLenMsgBlobChunkRequest) - lowPriorityBytesRate := float64(time.Second)/float64(protocol.DeltaBlockSyncHeartbeat)*float64(maxLenMsgBlockSyncSummary) + - float64(time.Second)/float64(protocol.DeltaMinBlockSyncRequest)*float64(maxLenMsgBlockSyncRequest) + lowPriorityBytesRate := float64(time.Second)/float64(protocol.DeltaStateSyncHeartbeat)*float64(maxLenMsgStateSyncSummary) + + float64(time.Second)/float64(protocol.DeltaMinBlockSyncRequest)*float64(maxLenMsgBlockSyncRequest) + + float64(time.Second)/float64(protocol.DeltaMinTreeSyncRequest)*float64(maxLenMsgTreeSyncChunkRequest) defaultPriorityBytesCapacity := mul(add( maxLenMsgNewEpoch, @@ -167,18 +218,18 @@ func ocr3_1limits(cfg ocr3config.PublicConfig, pluginLimits ocr3_1types.Reportin maxLenMsgCertifiedCommit, maxLenMsgBlobOffer, maxLenMsgBlobChunkRequest, - - maxLenMsgBlobAvailable, + maxLenMsgBlobOfferResponse, ), 3) lowPriorityBytesCapacity := mul(add( - maxLenMsgBlockSyncSummary, + maxLenMsgStateSyncSummary, maxLenMsgBlockSyncRequest, + maxLenMsgTreeSyncChunkRequest, ), 3) if overflow { // this should not happen due to us checking the limits in types.go - return types.BinaryNetworkEndpointLimits{}, types.BinaryNetworkEndpointLimits{}, ocr3_1serializedLengthLimits{}, fmt.Errorf("int32 overflow while computing bandwidth limits") + return types.BinaryNetworkEndpointLimits{}, types.BinaryNetworkEndpointLimits{}, OCR3_1SerializedLengthLimits{}, fmt.Errorf("int32 overflow while computing bandwidth limits") } return types.BinaryNetworkEndpointLimits{ @@ -195,7 +246,7 @@ func ocr3_1limits(cfg ocr3config.PublicConfig, pluginLimits ocr3_1types.Reportin lowPriorityBytesRate, lowPriorityBytesCapacity, }, - ocr3_1serializedLengthLimits{ + OCR3_1SerializedLengthLimits{ maxLenMsgNewEpoch, maxLenMsgEpochStartRequest, maxLenMsgEpochStart, @@ -207,26 +258,15 @@ func ocr3_1limits(cfg ocr3config.PublicConfig, pluginLimits ocr3_1types.Reportin maxLenMsgReportSignatures, maxLenMsgCertifiedCommitRequest, maxLenMsgCertifiedCommit, - maxLenMsgBlockSyncSummary, + maxLenMsgStateSyncSummary, maxLenMsgBlockSyncRequest, - maxLenMsgBlockSync, + maxLenMsgBlockSyncResponse, + maxLenMsgTreeSyncChunkRequest, + maxLenMsgTreeSyncChunkResponse, maxLenMsgBlobOffer, + maxLenMsgBlobOfferResponse, maxLenMsgBlobChunkRequest, maxLenMsgBlobChunkResponse, - maxLenMsgBlobAvailable, }, nil } - -func OCR3_1Limits( - cfg ocr3config.PublicConfig, - pluginLimits ocr3_1types.ReportingPluginLimits, - maxSigLen int, -) ( - defaultLimits types.BinaryNetworkEndpointLimits, - lowPriorityLimits types.BinaryNetworkEndpointLimits, - err error, -) { - defaultLimits, lowPriorityLimits, _, err = ocr3_1limits(cfg, pluginLimits, maxSigLen) - return defaultLimits, lowPriorityLimits, err -} diff --git a/offchainreporting2plus/internal/managed/managed_ocr3_1_oracle.go b/offchainreporting2plus/internal/managed/managed_ocr3_1_oracle.go index 2944e2a1..00bf9286 100644 --- a/offchainreporting2plus/internal/managed/managed_ocr3_1_oracle.go +++ b/offchainreporting2plus/internal/managed/managed_ocr3_1_oracle.go @@ -159,10 +159,8 @@ func RunManagedOCR3_1Oracle[RI any]( return fmt.Errorf("ManagedOCR3_1Oracle: invalid MercuryPluginInfo"), false } - blobEndpointWrapper.SetLimits(reportingPluginInfo.Limits) - maxSigLen := onchainKeyring.MaxSignatureLength() - defaultLims, lowPriorityLimits, err := limits.OCR3_1Limits(sharedConfig.PublicConfig, reportingPluginInfo.Limits, maxSigLen) + defaultLims, lowPriorityLimits, serializedLengthLimits, err := limits.OCR3_1Limits(sharedConfig.PublicConfig, reportingPluginInfo.Limits, maxSigLen) if err != nil { logger.Error("ManagedOCR3_1Oracle: error during limits", commontypes.LogFields{ "error": err, @@ -213,6 +211,7 @@ func RunManagedOCR3_1Oracle[RI any]( registerer, reportingPluginInfo.Limits, sharedConfig.PublicConfig, + serializedLengthLimits, ) err = netEndpoint.Start() if err != nil { @@ -233,6 +232,7 @@ func RunManagedOCR3_1Oracle[RI any]( logger, "ManagedOCR3_1Oracle: error during keyValueDatabase.Close()", ) + semanticOCR3_1KeyValueDatabase := shim.NewSemanticOCR3_1KeyValueDatabase(keyValueDatabase, reportingPluginInfo.Limits, logger, metricsRegisterer) protocol.RunOracle[RI]( ctx, @@ -241,7 +241,8 @@ func RunManagedOCR3_1Oracle[RI any]( contractTransmitter, &shim.SerializingOCR3_1Database{database}, oid, - &shim.SemanticOCR3_1KeyValueStore{keyValueDatabase, reportingPluginInfo.Limits}, + semanticOCR3_1KeyValueDatabase, + reportingPluginInfo.Limits, localConfig, childLogger, registerer, diff --git a/offchainreporting2plus/internal/ocr3/protocol/outcome_generation_follower.go b/offchainreporting2plus/internal/ocr3/protocol/outcome_generation_follower.go index 62e77177..95660283 100644 --- a/offchainreporting2plus/internal/ocr3/protocol/outcome_generation_follower.go +++ b/offchainreporting2plus/internal/ocr3/protocol/outcome_generation_follower.go @@ -405,7 +405,7 @@ func (outgen *outcomeGenerationState[RI]) backgroundProposalOutcome( { seen := map[commontypes.OracleID]bool{} for _, aso := range asos { - if !(0 <= int(aso.Observer) && int(aso.Observer) <= outgen.config.N()) { + if !(0 <= int(aso.Observer) && int(aso.Observer) < outgen.config.N()) { logger.Warn("dropping MessageProposal that contains signed observation with invalid observer", commontypes.LogFields{ "seqNr": outctx.SeqNr, "invalidObserver": aso.Observer, @@ -881,5 +881,6 @@ func (outgen *outcomeGenerationState[RI]) persistAndUpdateCertIfGreater(cert Cer outgen.followerState.cert = cert } + return true } diff --git a/offchainreporting2plus/internal/ocr3/protocol/signed_data.go b/offchainreporting2plus/internal/ocr3/protocol/signed_data.go index 78334be5..91c4f779 100644 --- a/offchainreporting2plus/internal/ocr3/protocol/signed_data.go +++ b/offchainreporting2plus/internal/ocr3/protocol/signed_data.go @@ -14,9 +14,9 @@ import ( "github.com/smartcontractkit/libocr/offchainreporting2plus/types" ) -// Returns a byte slice whose first four bytes are the string "ocr3" and the rest -// of which is the sum returned by h. Used for domain separation vs ocr2, where -// we just directly sign sha256 hashes. +// Returns a byte slice that starts with the string "ocr3" and the rest +// of which is the sum returned by h. Used for domain separation as per the comment +// on offchainreporting2plus/types.OffchainKeyring. // // Any signatures made with the OffchainKeyring should use ocr3DomainSeparatedSum! func ocr3DomainSeparatedSum(h hash.Hash) []byte { diff --git a/offchainreporting2plus/internal/ocr3_1/blobtypes/serialization.go b/offchainreporting2plus/internal/ocr3_1/blobtypes/serialization.go new file mode 100644 index 00000000..7c239e5f --- /dev/null +++ b/offchainreporting2plus/internal/ocr3_1/blobtypes/serialization.go @@ -0,0 +1,86 @@ +package blobtypes + +import ( + "crypto/ed25519" + "encoding" + "fmt" + + "github.com/smartcontractkit/libocr/commontypes" + "github.com/smartcontractkit/libocr/offchainreporting2plus/internal/ocr3_1/blobtypes/serialization" + "google.golang.org/protobuf/proto" +) + +var _ encoding.BinaryMarshaler = LightCertifiedBlob{} +var _ encoding.BinaryAppender = LightCertifiedBlob{} +var _ encoding.BinaryUnmarshaler = &LightCertifiedBlob{} + +func (lc LightCertifiedBlob) AppendBinary(b []byte) ([]byte, error) { + pbChunkDigests := make([][]byte, 0, len(lc.ChunkDigests)) + for _, digest := range lc.ChunkDigests { + pbChunkDigests = append(pbChunkDigests, digest[:]) + } + + pbSignatures := make([]*serialization.AttributedBlobAvailabilitySignature, 0, len(lc.AttributedBlobAvailabilitySignatures)) + for _, sig := range lc.AttributedBlobAvailabilitySignatures { + pbSignatures = append(pbSignatures, serialization.NewAttributedBlobAvailabilitySignature( + sig.Signature, + uint32(sig.Signer), + )) + } + + pbLightCertifiedBlob := serialization.NewLightCertifiedBlob( + pbChunkDigests, + lc.PayloadLength, + lc.ExpirySeqNr, + uint32(lc.Submitter), + pbSignatures, + ) + + opts := proto.MarshalOptions{} + ret, err := opts.MarshalAppend(b, pbLightCertifiedBlob) + if err != nil { + return nil, fmt.Errorf("failed to MarshalAppend LightCertifiedBlob protobuf: %w", err) + } + return ret, nil +} + +func (lc LightCertifiedBlob) MarshalBinary() ([]byte, error) { + return lc.AppendBinary(nil) +} + +func (lc *LightCertifiedBlob) UnmarshalBinary(data []byte) error { + pbLightCertifiedBlob := serialization.LightCertifiedBlob{} + if err := proto.Unmarshal(data, &pbLightCertifiedBlob); err != nil { + return fmt.Errorf("failed to unmarshal LightCertifiedBlob protobuf: %w", err) + } + + chunkDigests := make([]BlobChunkDigest, 0, len(pbLightCertifiedBlob.ChunkDigests)) + for _, digest := range pbLightCertifiedBlob.ChunkDigests { + if len(digest) != len(BlobChunkDigest{}) { + return fmt.Errorf("invalid chunk digest length: expected %d bytes, got %d", len(BlobChunkDigest{}), len(digest)) + } + var chunkDigest BlobChunkDigest + copy(chunkDigest[:], digest) + chunkDigests = append(chunkDigests, chunkDigest) + } + + signatures := make([]AttributedBlobAvailabilitySignature, 0, len(pbLightCertifiedBlob.AttributedBlobAvailabilitySignatures)) + for _, sig := range pbLightCertifiedBlob.AttributedBlobAvailabilitySignatures { + if len(sig.Signature) != ed25519.SignatureSize { + return fmt.Errorf("invalid signature length: expected %d bytes, got %d", ed25519.SignatureSize, len(sig.Signature)) + } + signatures = append(signatures, AttributedBlobAvailabilitySignature{ + sig.Signature, + commontypes.OracleID(sig.Signer), + }) + } + + *lc = LightCertifiedBlob{ + chunkDigests, + pbLightCertifiedBlob.PayloadLength, + pbLightCertifiedBlob.ExpirySeqNr, + commontypes.OracleID(pbLightCertifiedBlob.Submitter), + signatures, + } + return nil +} diff --git a/offchainreporting2plus/internal/ocr3_1/blobtypes/serialization/offchainreporting3_1_blobs.pb.go b/offchainreporting2plus/internal/ocr3_1/blobtypes/serialization/offchainreporting3_1_blobs.pb.go new file mode 100644 index 00000000..18df5174 --- /dev/null +++ b/offchainreporting2plus/internal/ocr3_1/blobtypes/serialization/offchainreporting3_1_blobs.pb.go @@ -0,0 +1,268 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.25.1 +// source: offchainreporting3_1_blobs.proto + +package serialization + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type LightCertifiedBlob struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ChunkDigests [][]byte `protobuf:"bytes,1,rep,name=chunk_digests,json=chunkDigests,proto3" json:"chunk_digests,omitempty"` + PayloadLength uint64 `protobuf:"varint,2,opt,name=payload_length,json=payloadLength,proto3" json:"payload_length,omitempty"` + ExpirySeqNr uint64 `protobuf:"varint,3,opt,name=expiry_seq_nr,json=expirySeqNr,proto3" json:"expiry_seq_nr,omitempty"` + Submitter uint32 `protobuf:"varint,4,opt,name=submitter,proto3" json:"submitter,omitempty"` + AttributedBlobAvailabilitySignatures []*AttributedBlobAvailabilitySignature `protobuf:"bytes,5,rep,name=attributed_blob_availability_signatures,json=attributedBlobAvailabilitySignatures,proto3" json:"attributed_blob_availability_signatures,omitempty"` +} + +func (x *LightCertifiedBlob) Reset() { + *x = LightCertifiedBlob{} + if protoimpl.UnsafeEnabled { + mi := &file_offchainreporting3_1_blobs_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LightCertifiedBlob) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LightCertifiedBlob) ProtoMessage() {} + +func (x *LightCertifiedBlob) ProtoReflect() protoreflect.Message { + mi := &file_offchainreporting3_1_blobs_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LightCertifiedBlob.ProtoReflect.Descriptor instead. +func (*LightCertifiedBlob) Descriptor() ([]byte, []int) { + return file_offchainreporting3_1_blobs_proto_rawDescGZIP(), []int{0} +} + +func (x *LightCertifiedBlob) GetChunkDigests() [][]byte { + if x != nil { + return x.ChunkDigests + } + return nil +} + +func (x *LightCertifiedBlob) GetPayloadLength() uint64 { + if x != nil { + return x.PayloadLength + } + return 0 +} + +func (x *LightCertifiedBlob) GetExpirySeqNr() uint64 { + if x != nil { + return x.ExpirySeqNr + } + return 0 +} + +func (x *LightCertifiedBlob) GetSubmitter() uint32 { + if x != nil { + return x.Submitter + } + return 0 +} + +func (x *LightCertifiedBlob) GetAttributedBlobAvailabilitySignatures() []*AttributedBlobAvailabilitySignature { + if x != nil { + return x.AttributedBlobAvailabilitySignatures + } + return nil +} + +type AttributedBlobAvailabilitySignature struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Signature []byte `protobuf:"bytes,1,opt,name=signature,proto3" json:"signature,omitempty"` + Signer uint32 `protobuf:"varint,2,opt,name=signer,proto3" json:"signer,omitempty"` +} + +func (x *AttributedBlobAvailabilitySignature) Reset() { + *x = AttributedBlobAvailabilitySignature{} + if protoimpl.UnsafeEnabled { + mi := &file_offchainreporting3_1_blobs_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AttributedBlobAvailabilitySignature) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AttributedBlobAvailabilitySignature) ProtoMessage() {} + +func (x *AttributedBlobAvailabilitySignature) ProtoReflect() protoreflect.Message { + mi := &file_offchainreporting3_1_blobs_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AttributedBlobAvailabilitySignature.ProtoReflect.Descriptor instead. +func (*AttributedBlobAvailabilitySignature) Descriptor() ([]byte, []int) { + return file_offchainreporting3_1_blobs_proto_rawDescGZIP(), []int{1} +} + +func (x *AttributedBlobAvailabilitySignature) GetSignature() []byte { + if x != nil { + return x.Signature + } + return nil +} + +func (x *AttributedBlobAvailabilitySignature) GetSigner() uint32 { + if x != nil { + return x.Signer + } + return 0 +} + +var File_offchainreporting3_1_blobs_proto protoreflect.FileDescriptor + +var file_offchainreporting3_1_blobs_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, + 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x14, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, + 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x22, 0xb5, 0x02, 0x0a, 0x12, 0x4c, 0x69, 0x67, + 0x68, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x62, 0x12, + 0x23, 0x0a, 0x0d, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x44, 0x69, 0x67, + 0x65, 0x73, 0x74, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, + 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x70, 0x61, + 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x22, 0x0a, 0x0d, 0x65, + 0x78, 0x70, 0x69, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x72, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72, 0x79, 0x53, 0x65, 0x71, 0x4e, 0x72, 0x12, + 0x1c, 0x0a, 0x09, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x09, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x72, 0x12, 0x90, 0x01, + 0x0a, 0x27, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x6c, 0x6f, + 0x62, 0x5f, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x73, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x39, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, + 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x64, 0x42, 0x6c, 0x6f, 0x62, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, + 0x79, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x24, 0x61, 0x74, 0x74, 0x72, + 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x62, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, + 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x22, 0x5b, 0x0a, 0x23, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x42, 0x6c, + 0x6f, 0x62, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x53, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x42, 0x11, 0x5a, + 0x0f, 0x2e, 0x3b, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_offchainreporting3_1_blobs_proto_rawDescOnce sync.Once + file_offchainreporting3_1_blobs_proto_rawDescData = file_offchainreporting3_1_blobs_proto_rawDesc +) + +func file_offchainreporting3_1_blobs_proto_rawDescGZIP() []byte { + file_offchainreporting3_1_blobs_proto_rawDescOnce.Do(func() { + file_offchainreporting3_1_blobs_proto_rawDescData = protoimpl.X.CompressGZIP(file_offchainreporting3_1_blobs_proto_rawDescData) + }) + return file_offchainreporting3_1_blobs_proto_rawDescData +} + +var file_offchainreporting3_1_blobs_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_offchainreporting3_1_blobs_proto_goTypes = []interface{}{ + (*LightCertifiedBlob)(nil), // 0: offchainreporting3_1.LightCertifiedBlob + (*AttributedBlobAvailabilitySignature)(nil), // 1: offchainreporting3_1.AttributedBlobAvailabilitySignature +} +var file_offchainreporting3_1_blobs_proto_depIdxs = []int32{ + 1, // 0: offchainreporting3_1.LightCertifiedBlob.attributed_blob_availability_signatures:type_name -> offchainreporting3_1.AttributedBlobAvailabilitySignature + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_offchainreporting3_1_blobs_proto_init() } +func file_offchainreporting3_1_blobs_proto_init() { + if File_offchainreporting3_1_blobs_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_offchainreporting3_1_blobs_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LightCertifiedBlob); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_offchainreporting3_1_blobs_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AttributedBlobAvailabilitySignature); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_offchainreporting3_1_blobs_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_offchainreporting3_1_blobs_proto_goTypes, + DependencyIndexes: file_offchainreporting3_1_blobs_proto_depIdxs, + MessageInfos: file_offchainreporting3_1_blobs_proto_msgTypes, + }.Build() + File_offchainreporting3_1_blobs_proto = out.File + file_offchainreporting3_1_blobs_proto_rawDesc = nil + file_offchainreporting3_1_blobs_proto_goTypes = nil + file_offchainreporting3_1_blobs_proto_depIdxs = nil +} diff --git a/offchainreporting2plus/internal/ocr3_1/blobtypes/serialization/serialization.go b/offchainreporting2plus/internal/ocr3_1/blobtypes/serialization/serialization.go new file mode 100644 index 00000000..54ca5753 --- /dev/null +++ b/offchainreporting2plus/internal/ocr3_1/blobtypes/serialization/serialization.go @@ -0,0 +1,41 @@ +package serialization + +import ( + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +func NewAttributedBlobAvailabilitySignature( + signature []byte, + signer uint32, +) *AttributedBlobAvailabilitySignature { + return &AttributedBlobAvailabilitySignature{ + // zero-initialize protobuf built-ins + protoimpl.MessageState{}, + 0, + nil, + // fields + signature, + signer, + } +} + +func NewLightCertifiedBlob( + chunkDigests [][]byte, + payloadLength uint64, + expirySeqNr uint64, + submitter uint32, + attributedBlobAvailabilitySignatures []*AttributedBlobAvailabilitySignature, +) *LightCertifiedBlob { + return &LightCertifiedBlob{ + // zero-initialize protobuf built-ins + protoimpl.MessageState{}, + 0, + nil, + // fields + chunkDigests, + payloadLength, + expirySeqNr, + submitter, + attributedBlobAvailabilitySignatures, + } +} diff --git a/offchainreporting2plus/internal/ocr3_1/blobtypes/types.go b/offchainreporting2plus/internal/ocr3_1/blobtypes/types.go index 50ad09ff..f74a9963 100644 --- a/offchainreporting2plus/internal/ocr3_1/blobtypes/types.go +++ b/offchainreporting2plus/internal/ocr3_1/blobtypes/types.go @@ -5,7 +5,6 @@ import ( "crypto/sha256" "encoding" "encoding/binary" - "encoding/json" "fmt" "hash" @@ -27,6 +26,12 @@ func ocr3_1DomainSeparatedSum(h hash.Hash) []byte { type BlobChunkDigest [32]byte +var _ fmt.Stringer = BlobChunkDigest{} + +func (bcd BlobChunkDigest) String() string { + return fmt.Sprintf("%x", bcd[:]) +} + func MakeBlobChunkDigest(chunk []byte) BlobChunkDigest { h := sha256.New() h.Write(chunk) @@ -71,7 +76,7 @@ func MakeBlobDigest( return result } -const blobAvailabilitySignatureDomainSeparator = "ocr3.1 BlobAvailabilitySignature" +const blobAvailabilitySignatureDomainSeparator = "ocr3.1/BlobAvailabilitySignature/" type BlobAvailabilitySignature []byte @@ -129,10 +134,11 @@ type LightCertifiedBlob struct { func (lc *LightCertifiedBlob) Verify( configDigest types.ConfigDigest, oracleIdentities []config.OracleIdentity, + fPlusOneSize int, byzQuorumSize int, ) error { - if byzQuorumSize != len(lc.AttributedBlobAvailabilitySignatures) { - return fmt.Errorf("wrong number of signatures, expected %d for byz. quorum but got %d", byzQuorumSize, len(lc.AttributedBlobAvailabilitySignatures)) + if !(fPlusOneSize <= len(lc.AttributedBlobAvailabilitySignatures) && len(lc.AttributedBlobAvailabilitySignatures) <= byzQuorumSize) { + return fmt.Errorf("wrong number of signatures, expected in range [%d, %d] for quorum but got %d", fPlusOneSize, byzQuorumSize, len(lc.AttributedBlobAvailabilitySignatures)) } blobDigest := MakeBlobDigest( @@ -161,28 +167,9 @@ func (lc *LightCertifiedBlob) Verify( } var _ BlobHandleSumType = &LightCertifiedBlob{} -var _ encoding.BinaryMarshaler = LightCertifiedBlob{} -var _ encoding.BinaryAppender = LightCertifiedBlob{} -var _ encoding.BinaryUnmarshaler = &LightCertifiedBlob{} func (lc *LightCertifiedBlob) isBlobHandleSumType() {} -func (lc LightCertifiedBlob) AppendBinary(b []byte) ([]byte, error) { - enc, err := json.Marshal(lc) - if err != nil { - return nil, fmt.Errorf("failed to marshal LightCertifiedBlob: %w", err) - } - return append(b, enc...), nil -} - -func (lc LightCertifiedBlob) MarshalBinary() ([]byte, error) { - return lc.AppendBinary(nil) -} - -func (lc *LightCertifiedBlob) UnmarshalBinary(data []byte) error { - return json.Unmarshal(data, &lc) -} - // go-sumtype:decl BlobHandleSumType type BlobHandleSumType interface { diff --git a/offchainreporting2plus/internal/ocr3_1/protocol/blob_endpoint.go b/offchainreporting2plus/internal/ocr3_1/protocol/blob_endpoint.go index fa0ae660..6770e4dc 100644 --- a/offchainreporting2plus/internal/ocr3_1/protocol/blob_endpoint.go +++ b/offchainreporting2plus/internal/ocr3_1/protocol/blob_endpoint.go @@ -12,27 +12,21 @@ import ( type BlobEndpointWrapper struct { mu sync.Mutex wrapped *BlobEndpoint - limits ocr3_1types.ReportingPluginLimits } -func (bew *BlobEndpointWrapper) locked() (*BlobEndpoint, ocr3_1types.ReportingPluginLimits) { +func (bew *BlobEndpointWrapper) locked() *BlobEndpoint { bew.mu.Lock() wrapped := bew.wrapped - limits := bew.limits bew.mu.Unlock() - return wrapped, limits + return wrapped } var _ ocr3_1types.BlobBroadcaster = &BlobEndpointWrapper{} func (bew *BlobEndpointWrapper) BroadcastBlob(ctx context.Context, payload []byte, expirationHint ocr3_1types.BlobExpirationHint) (ocr3_1types.BlobHandle, error) { - wrapped, limits := bew.locked() + wrapped := bew.locked() if wrapped == nil { - return ocr3_1types.BlobHandle{}, errBlobEndpointClosed - } - if len(payload) > limits.MaxBlobPayloadLength { - return ocr3_1types.BlobHandle{}, fmt.Errorf("blob payload length %d exceeds maximum allowed length %d", - len(payload), limits.MaxBlobPayloadLength) + return ocr3_1types.BlobHandle{}, errBlobEndpointUnavailable } return wrapped.BroadcastBlob(ctx, payload, expirationHint) } @@ -40,9 +34,9 @@ func (bew *BlobEndpointWrapper) BroadcastBlob(ctx context.Context, payload []byt var _ ocr3_1types.BlobFetcher = &BlobEndpointWrapper{} func (bew *BlobEndpointWrapper) FetchBlob(ctx context.Context, handle ocr3_1types.BlobHandle) ([]byte, error) { - wrapped, _ := bew.locked() + wrapped := bew.locked() if wrapped == nil { - return nil, errBlobEndpointClosed + return nil, errBlobEndpointUnavailable } return wrapped.FetchBlob(ctx, handle) } @@ -53,26 +47,14 @@ func (bew *BlobEndpointWrapper) setBlobEndpoint(wrapped *BlobEndpoint) { bew.mu.Unlock() } -func (bew *BlobEndpointWrapper) SetLimits(limits ocr3_1types.ReportingPluginLimits) { - bew.mu.Lock() - bew.limits = limits - bew.mu.Unlock() -} - type BlobEndpoint struct { ctx context.Context - chBlobBroadcastRequest chan<- blobBroadcastRequest - chBlobBroadcastResponse <-chan blobBroadcastResponse - - chBlobFetchRequest chan<- blobFetchRequest - chBlobFetchResponse <-chan blobFetchResponse + chBlobBroadcastRequest chan<- blobBroadcastRequest + chBlobFetchRequest chan<- blobFetchRequest } -var ( - errBlobEndpointClosed = fmt.Errorf("blob endpoint closed") - errReceivingChannelClosed = fmt.Errorf("receiving channel closed") -) +var errBlobEndpointUnavailable = fmt.Errorf("blob endpoint unavailable") func expirySeqNr(expirationHint ocr3_1types.BlobExpirationHint) uint64 { switch beh := expirationHint.(type) { @@ -83,61 +65,85 @@ func expirySeqNr(expirationHint ocr3_1types.BlobExpirationHint) uint64 { } } -func (be *BlobEndpoint) BroadcastBlob(_ context.Context, payload []byte, expirationHint ocr3_1types.BlobExpirationHint) (ocr3_1types.BlobHandle, error) { - chDone := be.ctx.Done() +func (be *BlobEndpoint) BroadcastBlob(ctx context.Context, payload []byte, expirationHint ocr3_1types.BlobExpirationHint) (ocr3_1types.BlobHandle, error) { + chRequestDone := ctx.Done() + chEndpointDone := be.ctx.Done() - select { - case be.chBlobBroadcastRequest <- blobBroadcastRequest{ + chResponse := make(chan blobBroadcastResponse) + chDone := make(chan struct{}) + defer close(chDone) + + request := blobBroadcastRequest{ payload, expirySeqNr(expirationHint), - }: - response := <-be.chBlobBroadcastResponse - if response.err != nil { - return ocr3_1types.BlobHandle{}, response.err - } + chResponse, + chDone, + } + select { + case be.chBlobBroadcastRequest <- request: select { - case cert, ok := <-response.chCert: - if !ok { - return ocr3_1types.BlobHandle{}, errReceivingChannelClosed + case response := <-chResponse: + if response.err != nil { + return ocr3_1types.BlobHandle{}, response.err } - return blobtypes.MakeBlobHandle(&cert), nil - case <-chDone: - return ocr3_1types.BlobHandle{}, errBlobEndpointClosed + return blobtypes.MakeBlobHandle(&response.cert), nil + case <-chEndpointDone: + return ocr3_1types.BlobHandle{}, be.ctx.Err() + case <-chRequestDone: + return ocr3_1types.BlobHandle{}, ctx.Err() } - case <-chDone: - return ocr3_1types.BlobHandle{}, errBlobEndpointClosed + case <-chEndpointDone: + return ocr3_1types.BlobHandle{}, be.ctx.Err() + case <-chRequestDone: + return ocr3_1types.BlobHandle{}, ctx.Err() } } var _ ocr3_1types.BlobBroadcaster = &BlobEndpoint{} -func (be *BlobEndpoint) FetchBlob(_ context.Context, handle ocr3_1types.BlobHandle) ([]byte, error) { - chDone := be.ctx.Done() +func (be *BlobEndpoint) FetchBlob(ctx context.Context, handle ocr3_1types.BlobHandle) ([]byte, error) { + chRequestDone := ctx.Done() + chEndpointDone := be.ctx.Done() + + chResponse := make(chan blobFetchResponse) + chDone := make(chan struct{}) + defer close(chDone) blobHandleSumType := blobtypes.ExtractBlobHandleSumType(handle) if blobHandleSumType == nil { return nil, fmt.Errorf("zero value blob handle provided") } + switch handle := blobHandleSumType.(type) { case *LightCertifiedBlob: + if handle == nil { + return nil, fmt.Errorf("zero value blob handle provided") + } + + request := blobFetchRequest{ + *handle, + chResponse, + chDone, + } + select { - case be.chBlobFetchRequest <- blobFetchRequest{*handle}: - response := <-be.chBlobFetchResponse - if response.err != nil { - return nil, response.err - } + case be.chBlobFetchRequest <- request: select { - case payload, ok := <-response.chPayload: - if !ok { - return nil, errReceivingChannelClosed + case response := <-chResponse: + if response.err != nil { + return nil, response.err } - return payload, nil - case <-chDone: - return nil, errBlobEndpointClosed + return response.payload, nil + case <-chEndpointDone: + return nil, be.ctx.Err() + case <-chRequestDone: + return nil, ctx.Err() } - case <-chDone: - return nil, errBlobEndpointClosed + case <-chEndpointDone: + return nil, be.ctx.Err() + case <-chRequestDone: + return nil, ctx.Err() } default: panic(fmt.Sprintf("unexpected blob handle type %T", handle)) @@ -145,3 +151,34 @@ func (be *BlobEndpoint) FetchBlob(_ context.Context, handle ocr3_1types.BlobHand } var _ ocr3_1types.BlobFetcher = &BlobEndpoint{} + +// RoundBlobBroadcastFetcher is a thin wrapper around a blob broadcast fetcher +// which enforces that no expired blobs as of the current round at seqNr are +// fetched. +type RoundBlobBroadcastFetcher struct { + seqNr uint64 + blobBroadcastFetcher ocr3_1types.BlobBroadcastFetcher +} + +func NewRoundBlobBroadcastFetcher(seqNr uint64, blobBroadcastFetcher ocr3_1types.BlobBroadcastFetcher) *RoundBlobBroadcastFetcher { + return &RoundBlobBroadcastFetcher{seqNr, blobBroadcastFetcher} +} + +var _ ocr3_1types.BlobBroadcastFetcher = &RoundBlobBroadcastFetcher{} + +func (r *RoundBlobBroadcastFetcher) BroadcastBlob(ctx context.Context, payload []byte, expirationHint ocr3_1types.BlobExpirationHint) (ocr3_1types.BlobHandle, error) { + return r.blobBroadcastFetcher.BroadcastBlob(ctx, payload, expirationHint) +} + +func (r *RoundBlobBroadcastFetcher) FetchBlob(ctx context.Context, handle ocr3_1types.BlobHandle) ([]byte, error) { + blobHandleSumType := blobtypes.ExtractBlobHandleSumType(handle) + switch cert := blobHandleSumType.(type) { + case *blobtypes.LightCertifiedBlob: + if cert != nil && cert.ExpirySeqNr < r.seqNr { + return nil, fmt.Errorf("blob expired") + } + return r.blobBroadcastFetcher.FetchBlob(ctx, handle) + default: + panic(fmt.Sprintf("unexpected blob handle type %T", handle)) + } +} diff --git a/offchainreporting2plus/internal/ocr3_1/protocol/blob_exchange.go b/offchainreporting2plus/internal/ocr3_1/protocol/blob_exchange.go index cbdcd016..b0a67cc6 100644 --- a/offchainreporting2plus/internal/ocr3_1/protocol/blob_exchange.go +++ b/offchainreporting2plus/internal/ocr3_1/protocol/blob_exchange.go @@ -3,14 +3,19 @@ package protocol import ( "context" "fmt" + "math/rand/v2" + "slices" "time" "github.com/prometheus/client_golang/prometheus" "github.com/smartcontractkit/libocr/commontypes" + "github.com/smartcontractkit/libocr/internal/byzquorum" "github.com/smartcontractkit/libocr/internal/loghelper" "github.com/smartcontractkit/libocr/offchainreporting2plus/internal/common/scheduler" "github.com/smartcontractkit/libocr/offchainreporting2plus/internal/config/ocr3config" "github.com/smartcontractkit/libocr/offchainreporting2plus/internal/ocr3_1/blobtypes" + "github.com/smartcontractkit/libocr/offchainreporting2plus/internal/ocr3_1/protocol/requestergadget" + "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3_1types" "github.com/smartcontractkit/libocr/offchainreporting2plus/types" "github.com/smartcontractkit/libocr/subprocesses" ) @@ -22,14 +27,12 @@ func RunBlobExchange[RI any]( chOutcomeGenerationToBlobExchange <-chan EventToBlobExchange[RI], chBlobBroadcastRequest <-chan blobBroadcastRequest, - chBlobBroadcastResponse chan<- blobBroadcastResponse, - chBlobFetchRequest <-chan blobFetchRequest, - chBlobFetchResponse chan<- blobFetchResponse, config ocr3config.SharedConfig, - kv KeyValueStore, + kv KeyValueDatabase, id commontypes.OracleID, + limits ocr3_1types.ReportingPluginLimits, localConfig types.LocalConfig, logger loghelper.LoggerWithContext, metricsRegisterer prometheus.Registerer, @@ -37,21 +40,17 @@ func RunBlobExchange[RI any]( offchainKeyring types.OffchainKeyring, telemetrySender TelemetrySender, ) { - missingChunkScheduler := scheduler.NewScheduler[EventMissingBlobChunk[RI]]() - defer missingChunkScheduler.Close() - - missingCertScheduler := scheduler.NewScheduler[EventMissingBlobCert[RI]]() - defer missingCertScheduler.Close() + broadcastGraceTimeoutScheduler := scheduler.NewScheduler[EventBlobBroadcastGraceTimeout[RI]]() + defer broadcastGraceTimeoutScheduler.Close() bex := makeBlobExchangeState[RI]( ctx, chNetToBlobExchange, chOutcomeGenerationToBlobExchange, - chBlobBroadcastRequest, chBlobBroadcastResponse, - chBlobFetchRequest, chBlobFetchResponse, + chBlobBroadcastRequest, chBlobFetchRequest, config, kv, - id, localConfig, logger, metricsRegisterer, netSender, offchainKeyring, + id, limits, localConfig, logger, metricsRegisterer, netSender, offchainKeyring, telemetrySender, - missingChunkScheduler, missingCertScheduler, + broadcastGraceTimeoutScheduler, ) bex.run() } @@ -63,24 +62,24 @@ func makeBlobExchangeState[RI any]( chOutcomeGenerationToBlobExchange <-chan EventToBlobExchange[RI], chBlobBroadcastRequest <-chan blobBroadcastRequest, - chBlobBroadcastResponse chan<- blobBroadcastResponse, - chBlobFetchRequest <-chan blobFetchRequest, - chBlobFetchResponse chan<- blobFetchResponse, config ocr3config.SharedConfig, - kv KeyValueStore, + kv KeyValueDatabase, id commontypes.OracleID, + limits ocr3_1types.ReportingPluginLimits, localConfig types.LocalConfig, logger loghelper.LoggerWithContext, metricsRegisterer prometheus.Registerer, netSender NetworkSender[RI], offchainKeyring types.OffchainKeyring, telemetrySender TelemetrySender, - missingChunkScheduler *scheduler.Scheduler[EventMissingBlobChunk[RI]], - missingCertScheduler *scheduler.Scheduler[EventMissingBlobCert[RI]], -) blobExchangeState[RI] { - return blobExchangeState[RI]{ + + broadcastGraceTimeoutScheduler *scheduler.Scheduler[EventBlobBroadcastGraceTimeout[RI]], +) *blobExchangeState[RI] { + tStopExpiredBlobFetches := time.After(DeltaStopExpiredBlobFetches) + + bex := &blobExchangeState[RI]{ ctx, subprocesses.Subprocesses{}, @@ -89,50 +88,280 @@ func makeBlobExchangeState[RI any]( chOutcomeGenerationToBlobExchange, chBlobBroadcastRequest, - chBlobBroadcastResponse, - chBlobFetchRequest, - chBlobFetchResponse, config, kv, id, + limits, localConfig, logger.MakeUpdated(commontypes.LogFields{"proto": "bex"}), netSender, offchainKeyring, telemetrySender, - missingChunkScheduler, - missingCertScheduler, + broadcastGraceTimeoutScheduler, + nil, // must be filled right below + + nil, // must be filled right below + tStopExpiredBlobFetches, make(map[BlobDigest]*blob), } + + offerRequesterGadget := requestergadget.NewRequesterGadget[blobOfferItem]( + config.N(), + DeltaBlobOfferBroadcast, + bex.trySendBlobOffer, + bex.getPendingBlobOffers, + bex.getBlobOfferSeeders, + ) + bex.offerRequesterGadget = offerRequesterGadget + + chunkRequesterGadget := requestergadget.NewRequesterGadget[blobChunkId]( + config.N(), + DeltaBlobChunkRequest, + bex.trySendBlobChunkRequest, + bex.getPendingBlobChunks, + bex.getBlobChunkSeeders, + ) + bex.chunkRequesterGadget = chunkRequesterGadget + + return bex +} + +func (bex *blobExchangeState[RI]) trySendBlobChunkRequest(id blobChunkId, seeder commontypes.OracleID) (*requestergadget.RequestInfo, bool) { + blob, ok := bex.blobs[id.blobDigest] + if !ok { + return nil, false + } + + if blob.fetch == nil { + return nil, false + } + + bex.logger.Debug("sending MessageBlobChunkRequest", commontypes.LogFields{ + "blobDigest": id.blobDigest, + "chunkIndex": id.chunkIndex, + "seeder": seeder, + }) + + chunkSize := blob.getBlobChunkSize(id.chunkIndex) + expiryTimestamp := time.Now().Add(blobChunkRequestExpiration(chunkSize)) + bex.netSender.SendTo(MessageBlobChunkRequest[RI]{ + nil, + &MessageBlobChunkRequestInfo{ + expiryTimestamp, + }, + id.blobDigest, + id.chunkIndex, + }, seeder) + + return &requestergadget.RequestInfo{ + expiryTimestamp, + }, true +} + +func (bex *blobExchangeState[RI]) getBlobDigestsOrderedByTimeWhenAdded() []BlobDigest { + type timedBlobDigest struct { + blobDigest BlobDigest + time time.Time + } + timedBlobDigests := make([]timedBlobDigest, 0, len(bex.blobs)) + for blobDigest, blob := range bex.blobs { + timedBlobDigests = append(timedBlobDigests, timedBlobDigest{blobDigest, blob.timeWhenAdded}) + } + + slices.SortFunc(timedBlobDigests, func(a, b timedBlobDigest) int { + return a.time.Compare(b.time) + }) + + blobDigests := make([]BlobDigest, 0, len(timedBlobDigests)) + for _, timedBlobDigest := range timedBlobDigests { + blobDigests = append(blobDigests, timedBlobDigest.blobDigest) + } + return blobDigests +} + +func (bex *blobExchangeState[RI]) getPendingBlobChunks() []blobChunkId { + var pending []blobChunkId + for _, blobDigest := range bex.getBlobDigestsOrderedByTimeWhenAdded() { + blob := bex.blobs[blobDigest] + fetch := blob.fetch + if fetch == nil { + continue + } + if fetch.expired { + continue + } + for chunkIndex := range blob.chunkDigests { + if blob.chunkHaves[chunkIndex] { + continue + } + pending = append(pending, blobChunkId{blobDigest, uint64(chunkIndex)}) + } + } + return pending +} + +func (bex *blobExchangeState[RI]) getBlobChunkSeeders(id blobChunkId) map[commontypes.OracleID]struct{} { + blob, ok := bex.blobs[id.blobDigest] + if !ok { + return nil + } + if blob.fetch == nil { + return nil + } + return blob.fetch.seeders +} + +func (bex *blobExchangeState[RI]) trySendBlobOffer(item blobOfferItem, seeder commontypes.OracleID) (*requestergadget.RequestInfo, bool) { + blob, ok := bex.blobs[item.blobDigest] + if !ok { + return nil, false + } + + if blob.broadcast == nil { + return nil, false + } + if !blob.broadcast.shouldOfferTo(seeder) { + return nil, false + } + + bex.logger.Trace("sending MessageBlobOffer", commontypes.LogFields{ + "blobDigest": item.blobDigest, + "chunkDigests": blob.chunkDigests, + "payloadLength": blob.payloadLength, + "expirySeqNr": blob.expirySeqNr, + "to": seeder, + }) + + expiryTimestamp := time.Now().Add(blobOfferBroadcastExpiration(blob.payloadLength)) + + bex.netSender.SendTo(MessageBlobOffer[RI]{ + nil, + &MessageBlobOfferRequestInfo{ + expiryTimestamp, + }, + blob.chunkDigests, + blob.payloadLength, + blob.expirySeqNr, + }, seeder) + + return &requestergadget.RequestInfo{ + expiryTimestamp, + }, true +} + +func (bex *blobExchangeState[RI]) getPendingBlobOffers() []blobOfferItem { + var pending []blobOfferItem + for _, blobDigest := range bex.getBlobDigestsOrderedByTimeWhenAdded() { + blob := bex.blobs[blobDigest] + if blob.broadcast == nil { + continue + } + if !blob.broadcast.shouldOffer() { + continue + } + for oracleID := range blob.broadcast.oracles { + if !blob.broadcast.shouldOfferTo(commontypes.OracleID(oracleID)) { + continue + } + pending = append(pending, blobOfferItem{blobDigest, commontypes.OracleID(oracleID)}) + } + } + return pending +} + +func (bex *blobExchangeState[RI]) getBlobOfferSeeders(item blobOfferItem) map[commontypes.OracleID]struct{} { + return map[commontypes.OracleID]struct{}{ + item.oracleID: {}, + } } const ( - DeltaBlobChunkRequest = 1 * time.Second - DeltaBlobChunkRequestTimeout = 5 * time.Second - DeltaBlobCertRequest = 10 * time.Second + rateBytesPerSecond = 10 * 1024 * 1024 // 10 MiB/s + latencyOverhead = 1 * time.Second + + // DeltaBlobChunkRequest denotes the minimum duration between sending two + // MessageBlobChunkRequest messages to a particular oracle. + DeltaBlobChunkRequest = 10 * time.Millisecond + + // DeltaBlobOfferBroadcast denotes the minimum duration between sending two + // MessageBlobOffer messages to a particular oracle. + DeltaBlobOfferBroadcast = 10 * time.Millisecond + + // DeltaBlobBroadcastGrace denotes the duration that we will wait after + // receiving minSigners valid accepting MessageBlobOfferResponse messages, + // to give a last chance to straggling oracles to send us a + // MessageBlobOfferResponse. + DeltaBlobBroadcastGrace = 100 * time.Millisecond + + // DeltaStopExpiredBlobFetches denotes the interval with which we check for + // in-progress blob fetches for blobs that might have expired, and mark them + // as expired and/or send reject MessageBlobOfferResponse to the submitter + // if appropriate. + DeltaStopExpiredBlobFetches = 5 * time.Second ) +func transmitDataDuration(rateBytesPerSecond int, size uint64) time.Duration { + secs := float64(size) / float64(rateBytesPerSecond) + return time.Duration(secs * float64(time.Second)) +} + +func blobChunkRequestExpiration(chunkSize uint64) time.Duration { + return latencyOverhead + transmitDataDuration(rateBytesPerSecond, chunkSize) +} + +func blobOfferBroadcastExpiration(payloadLength uint64) time.Duration { + const latencyPerChunk = DeltaBlobChunkRequest + + expiration := latencyOverhead + for i := uint64(0); i < payloadLength; i += BlobChunkSize { + chunkSize := min(BlobChunkSize, payloadLength-i) + expiration += latencyPerChunk + expiration += transmitDataDuration(rateBytesPerSecond, chunkSize) + } + return expiration +} + type blobBroadcastRequest struct { payload []byte expirySeqNr uint64 + chResponse chan blobBroadcastResponse + chDone <-chan struct{} +} + +func (req *blobBroadcastRequest) respond(ctx context.Context, resp blobBroadcastResponse) { + select { + case req.chResponse <- resp: + case <-req.chDone: + case <-ctx.Done(): + } } type blobBroadcastResponse struct { - chCert chan LightCertifiedBlob - err error + cert LightCertifiedBlob + err error } type blobFetchRequest struct { - cert LightCertifiedBlob + cert LightCertifiedBlob + chResponse chan blobFetchResponse + chDone <-chan struct{} +} + +func (req *blobFetchRequest) respond(ctx context.Context, resp blobFetchResponse) { + select { + case req.chResponse <- resp: + case <-req.chDone: + case <-ctx.Done(): + } } type blobFetchResponse struct { - chPayload chan []byte - err error + payload []byte + err error } type blobExchangeState[RI any] struct { @@ -143,56 +372,160 @@ type blobExchangeState[RI any] struct { chNetToBlobExchange <-chan MessageToBlobExchangeWithSender[RI] chOutcomeGenerationToBlobExchange <-chan EventToBlobExchange[RI] - chBlobBroadcastRequest <-chan blobBroadcastRequest - chBlobBroadcastResponse chan<- blobBroadcastResponse - - chBlobFetchRequest <-chan blobFetchRequest - chBlobFetchResponse chan<- blobFetchResponse + chBlobBroadcastRequest <-chan blobBroadcastRequest + chBlobFetchRequest <-chan blobFetchRequest config ocr3config.SharedConfig - kv KeyValueStore + kv KeyValueDatabase id commontypes.OracleID + limits ocr3_1types.ReportingPluginLimits localConfig types.LocalConfig logger loghelper.LoggerWithContext netSender NetworkSender[RI] offchainKeyring types.OffchainKeyring telemetrySender TelemetrySender - missingChunkScheduler *scheduler.Scheduler[EventMissingBlobChunk[RI]] - missingCertScheduler *scheduler.Scheduler[EventMissingBlobCert[RI]] - blobs map[BlobDigest]*blob + // blob broadcast + broadcastGraceTimeoutScheduler *scheduler.Scheduler[EventBlobBroadcastGraceTimeout[RI]] + offerRequesterGadget *requestergadget.RequesterGadget[blobOfferItem] + + // blob fetch + chunkRequesterGadget *requestergadget.RequesterGadget[blobChunkId] + tStopExpiredBlobFetches <-chan time.Time + + blobs map[BlobDigest]*blob +} + +type blobOfferItem struct { + blobDigest BlobDigest + oracleID commontypes.OracleID +} + +type blobChunkId struct { + blobDigest BlobDigest + chunkIndex uint64 } const BlobChunkSize = 1 << 22 // 4MiB -type blob struct { - chNotifyCertAvailable chan struct{} - chNotifyPayloadAvailable chan struct{} +func numChunks(payloadLength uint64) uint64 { + return (payloadLength + BlobChunkSize - 1) / BlobChunkSize +} + +type blobFetchMeta struct { + chNotify chan struct{} + waiters int + exchange *blobExchangeMeta + seeders map[commontypes.OracleID]struct{} + expired bool +} + +func (bifm *blobFetchMeta) weServiced() { + bifm.waiters-- +} + +func (bifm *blobFetchMeta) prunable() bool { + if bifm == nil { + return true + } + return bifm.waiters <= 0 && bifm.exchange.prunable() +} - // certOrNil == nil indicates that we're the submitter and want to collect a - // cert. +type blobBroadcastPhase string + +const ( + blobBroadcastPhaseOffering blobBroadcastPhase = "offering" + blobBroadcastPhaseAcceptedGrace blobBroadcastPhase = "acceptedGrace" + blobBroadcastPhaseAccepted blobBroadcastPhase = "accepted" + blobBroadcastPhaseRejected blobBroadcastPhase = "rejected" +) + +type blobBroadcastMeta struct { + chNotify chan struct{} + waiters int + phase blobBroadcastPhase + // certOrNil == nil indicates we still have not assembled a cert. certOrNil *LightCertifiedBlob + oracles []blobBroadcastOracleMeta +} + +type blobBroadcastOracleMeta struct { + weReceivedOfferResponse bool + weReceivedOfferResponseAccepting bool + signature BlobAvailabilitySignature +} + +func (bibm *blobBroadcastMeta) shouldOffer() bool { + return bibm.phase == blobBroadcastPhaseOffering || bibm.phase == blobBroadcastPhaseAcceptedGrace +} + +func (bibm *blobBroadcastMeta) shouldOfferTo(oracleID commontypes.OracleID) bool { + return bibm.shouldOffer() && !bibm.oracles[oracleID].weReceivedOfferResponse +} + +func (bibm *blobBroadcastMeta) weServiced() { + bibm.waiters-- +} + +func (bibm *blobBroadcastMeta) prunable() bool { + if bibm == nil { + return true + } + return bibm.waiters <= 0 +} + +type blobExchangeMeta struct { + weSentOfferResponse bool + latestOfferRequestHandle types.RequestHandle +} + +func (biem *blobExchangeMeta) weServiced() { + biem.weSentOfferResponse = true +} + +func (biem *blobExchangeMeta) prunable() bool { + if biem == nil { + return true + } + return biem.weSentOfferResponse || biem.latestOfferRequestHandle == nil +} + +type blob struct { + timeWhenAdded time.Time + + broadcast *blobBroadcastMeta + fetch *blobFetchMeta - chunks []chunk - oracles []blobOracle + chunkDigests []BlobChunkDigest + chunkHaves []bool payloadLength uint64 expirySeqNr uint64 submitter commontypes.OracleID } -type blobOracle struct { - signature BlobAvailabilitySignature +func (b *blob) getBlobChunkSize(chunkIndex uint64) uint64 { + if chunkIndex == uint64(len(b.chunkDigests))-1 { + return b.payloadLength % BlobChunkSize + } + return BlobChunkSize +} + +func (b *blob) haveAllChunks() bool { + return !slices.Contains(b.chunkHaves, false) } -type chunk struct { - have bool - digest BlobChunkDigest +func (b *blob) prunable() bool { + return b.broadcast.prunable() && b.fetch.prunable() } func (bex *blobExchangeState[RI]) run() { bex.logger.Info("BlobExchange: running", nil) + bex.subs.Go(func() { + RunBlobReap(bex.ctx, bex.logger, bex.kv) + }) + // Take a reference to the ctx.Done channel once, here, to avoid taking the // context lock below. chDone := bex.ctx.Done() @@ -206,7 +539,6 @@ func (bex *blobExchangeState[RI]) run() { case msg := <-bex.chNetToBlobExchange: msg.msg.processBlobExchange(bex, msg.sender) case ev := <-bex.chOutcomeGenerationToBlobExchange: - ev.processBlobExchange(bex) case req := <-bex.chBlobBroadcastRequest: @@ -214,10 +546,15 @@ func (bex *blobExchangeState[RI]) run() { case req := <-bex.chBlobFetchRequest: bex.processBlobFetchRequest(req) - case ev := <-bex.missingCertScheduler.Scheduled(): - ev.processBlobExchange(bex) - case ev := <-bex.missingChunkScheduler.Scheduled(): + case ev := <-bex.broadcastGraceTimeoutScheduler.Scheduled(): ev.processBlobExchange(bex) + case <-bex.offerRequesterGadget.Ticker(): + bex.offerRequesterGadget.Tick() + + case <-bex.chunkRequesterGadget.Ticker(): + bex.chunkRequesterGadget.Tick() + case <-bex.tStopExpiredBlobFetches: + bex.eventTStopExpiredBlobFetches() case <-chDone: } @@ -235,54 +572,135 @@ func (bex *blobExchangeState[RI]) run() { } } -func (bex *blobExchangeState[RI]) messageBlobOffer(msg MessageBlobOffer[RI], sender commontypes.OracleID) { - if msg.PayloadLength == 0 { - bex.logger.Debug("dropping MessageBlobOffer with zero payload length", commontypes.LogFields{ - "sender": sender, +func (bex *blobExchangeState[RI]) eventTStopExpiredBlobFetches() { + defer func() { + bex.tStopExpiredBlobFetches = time.After(DeltaStopExpiredBlobFetches) + }() + + tx, err := bex.kv.NewReadTransactionUnchecked() + if err != nil { + bex.logger.Error("failed to create read transaction for eventTStopExpiredBlobFetches", commontypes.LogFields{ + "error": err, }) return } + defer tx.Discard() + + highestCommittedSeqNr, err := tx.ReadHighestCommittedSeqNr() + if err != nil { + bex.logger.Error("failed to read highest committed seq nr for eventTStopExpiredBlobFetches", commontypes.LogFields{ + "error": err, + }) + return + } + + for blobDigest, blob := range bex.blobs { + fetch := blob.fetch + if fetch == nil { + continue + } + if fetch.expired || blob.haveAllChunks() { + continue + } + + if !hasBlobExpired(blob.expirySeqNr, highestCommittedSeqNr) { + continue + } + + bex.logger.Debug("stopping expired blob fetch", commontypes.LogFields{ + "blobDigest": blobDigest, + "expirySeqNr": blob.expirySeqNr, + "highestCommittedSeqNr": highestCommittedSeqNr, + "submitter": blob.submitter, + }) + + if fetch.exchange != nil { + bex.sendBlobOfferResponseRejecting(blobDigest, blob.submitter, fetch.exchange.latestOfferRequestHandle) + fetch.exchange.weServiced() + } + + fetch.expired = true + close(fetch.chNotify) + + if blob.prunable() { + delete(bex.blobs, blobDigest) + } + } +} + +func (bex *blobExchangeState[RI]) messageBlobOffer(msg MessageBlobOffer[RI], sender commontypes.OracleID) { + submitter := sender blobDigest := blobtypes.MakeBlobDigest( bex.config.ConfigDigest, msg.ChunkDigests, msg.PayloadLength, msg.ExpirySeqNr, - msg.Submitter, + submitter, ) - // check if we maybe already have this blob in full - { - payload, err := bex.readBlobPayload(blobDigest) - if err != nil { - bex.logger.Warn("dropping MessageBlobOffer, failed to check if we already have the payload", commontypes.LogFields{ - "blobDigest": blobDigest, - "sender": sender, - }) - return - } - if payload != nil { - bex.logger.Debug("received MessageBlobOffer for which we already have the payload", commontypes.LogFields{ - "blobDigest": blobDigest, - "sender": sender, - }) + chunkHaves, err := bex.loadChunkHaves(blobDigest, msg.PayloadLength) + if err != nil { + bex.logger.Warn("dropping MessageBlobOffer, failed to check if we already have the payload", commontypes.LogFields{ + "blobDigest": blobDigest, + "sender": sender, + }) + return + } - bex.sendAvailabilitySignature(blobDigest, msg.Submitter) - return - } + // check if we maybe already have this blob in full + if !slices.Contains(chunkHaves, false) { + bex.logger.Debug("received MessageBlobOffer for which we already have the payload", commontypes.LogFields{ + "blobDigest": blobDigest, + "sender": sender, + }) + bex.sendBlobOfferResponseAccepting(blobDigest, submitter, msg.RequestHandle) + return } - if _, ok := bex.blobs[blobDigest]; ok { - bex.logger.Debug("dropping duplicate MessageBlobOffer", commontypes.LogFields{ + if blob, ok := bex.blobs[blobDigest]; ok { + bex.logger.Debug("duplicate MessageBlobOffer, updating offer request handle", commontypes.LogFields{ "blobDigest": blobDigest, "sender": sender, }) + if blob.fetch != nil && blob.fetch.exchange != nil { + blob.fetch.exchange.latestOfferRequestHandle = msg.RequestHandle + } + return + } + + // Reject if payload length exceeds maximum allowed length + if msg.PayloadLength > uint64(bex.limits.MaxBlobPayloadLength) { + bex.logger.Debug("received MessageBlobOffer with payload length that exceeds maximum allowed length, rejecting", commontypes.LogFields{ + "blobDigest": blobDigest, + "submitter": submitter, + "payloadLength": msg.PayloadLength, + "maxPayloadLength": bex.limits.MaxBlobPayloadLength, + }) + bex.sendBlobOfferResponseRejecting(blobDigest, submitter, msg.RequestHandle) + return + } + + // Reject if blob has already expired + committedSeqNr, err := bex.kv.HighestCommittedSeqNr() + if err != nil { + bex.logger.Error("failed to read highest committed seq nr for MessageBlobOffer", commontypes.LogFields{ + "error": err, + }) + return + } + if hasBlobExpired(msg.ExpirySeqNr, committedSeqNr) { + bex.logger.Debug("received MessageBlobOffer for already expired blob, rejecting", commontypes.LogFields{ + "blobDigest": blobDigest, + "submitter": submitter, + "expirySeqNr": msg.ExpirySeqNr, + "committedSeqNr": committedSeqNr, + }) + bex.sendBlobOfferResponseRejecting(blobDigest, submitter, msg.RequestHandle) return } // TODO: enforce rate limit based on sender / length - // TODO: check payload length against Max - // TODO: check Max against MaxMax (in plugin config) bex.logger.Debug("received MessageBlobOffer", commontypes.LogFields{ "blobDigest": blobDigest, @@ -292,110 +710,334 @@ func (bex *blobExchangeState[RI]) messageBlobOffer(msg MessageBlobOffer[RI], sen "expirySeqNr": msg.ExpirySeqNr, }) - chunks := make([]chunk, len(msg.ChunkDigests)) - for i, chunkDigest := range msg.ChunkDigests { - chunks[i] = chunk{ - false, - chunkDigest, - } + seeders := map[commontypes.OracleID]struct{}{ + submitter: {}, } bex.blobs[blobDigest] = &blob{ - make(chan struct{}), - make(chan struct{}), - + time.Now(), nil, + &blobFetchMeta{ + make(chan struct{}), + 0, + &blobExchangeMeta{ + false, + msg.RequestHandle, + }, + seeders, + false, + }, - chunks, - make([]blobOracle, bex.config.N()), + msg.ChunkDigests, + chunkHaves, msg.PayloadLength, msg.ExpirySeqNr, - msg.Submitter, + submitter, } - bex.missingChunkScheduler.ScheduleDelay(EventMissingBlobChunk[RI]{ - blobDigest, - }, 0) + bex.chunkRequesterGadget.PleaseRecheckPendingItems() } -func (bex *blobExchangeState[RI]) readBlobPayload(blobDigest BlobDigest) ([]byte, error) { - tx, err := bex.kv.NewReadTransactionUnchecked() - if err != nil { - return nil, fmt.Errorf("failed to create read transaction") +func (bex *blobExchangeState[RI]) messageBlobOfferResponse(msg MessageBlobOfferResponse[RI], sender commontypes.OracleID) { + item := blobOfferItem{msg.BlobDigest, sender} + if !bex.offerRequesterGadget.CheckAndMarkResponse(item, sender) { + bex.logger.Debug("dropping MessageBlobOfferResponse, not allowed", commontypes.LogFields{ + "blobDigest": msg.BlobDigest, + "sender": sender, + }) + return } - defer tx.Discard() - return tx.ReadBlob(blobDigest) -} -func (bex *blobExchangeState[RI]) messageBlobChunkRequest(msg MessageBlobChunkRequest[RI], sender commontypes.OracleID) { blob, ok := bex.blobs[msg.BlobDigest] if !ok { - bex.logger.Debug("dropping MessageBlobChunkRequest for unknown blob", commontypes.LogFields{ + bex.logger.Debug("dropping MessageBlobOfferResponse for unknown blob", commontypes.LogFields{ "blobDigest": msg.BlobDigest, "sender": sender, }) return } - chunkIndex := msg.ChunkIndex - - bex.logger.Debug("received MessageBlobChunkRequest", commontypes.LogFields{ - "blobDigest": msg.BlobDigest, - "sender": sender, - "chunkIndex": chunkIndex, - "payloadLength": blob.payloadLength, - }) - - tx, err := bex.kv.NewReadTransactionUnchecked() - defer tx.Discard() - if err != nil { - bex.logger.Error("failed to create read transaction for MessageBlobChunkRequest", commontypes.LogFields{ - "error": err, + if blob.broadcast == nil { + bex.logger.Debug("dropping MessageBlobOfferResponse, not broadcasting", commontypes.LogFields{ + "blobDigest": msg.BlobDigest, + "sender": sender, }) return } + broadcast := blob.broadcast - chunk, err := tx.ReadBlobChunk(msg.BlobDigest, chunkIndex) - if err != nil { - bex.logger.Error("failed to read blob chunk for MessageBlobChunkRequest", commontypes.LogFields{ + if blob.submitter != bex.id { + bex.logger.Debug("dropping MessageBlobOfferResponse, not the submitter", commontypes.LogFields{ "blobDigest": msg.BlobDigest, "sender": sender, - "chunkIndex": chunkIndex, - "error": err, + "submitter": blob.submitter, + "localID": bex.id, }) return } - if chunk == nil { - bex.logger.Debug("dropping MessageBlobChunkRequest, do not have chunk", commontypes.LogFields{ + + if !(broadcast.phase == blobBroadcastPhaseOffering || broadcast.phase == blobBroadcastPhaseAcceptedGrace) { + bex.logger.Debug("dropping MessageBlobOfferResponse, not in offering or accepted grace phase", commontypes.LogFields{ "blobDigest": msg.BlobDigest, "sender": sender, - "chunkIndex": chunkIndex, + "phase": broadcast.phase, }) return } - chunkDigest := blobtypes.MakeBlobChunkDigest(chunk) - expectedChunkDigest := blob.chunks[chunkIndex].digest - if chunkDigest != expectedChunkDigest { - bex.logger.Critical("assumption violation: chunk digest mismatch while preparing MessageBlobChunkResponse", commontypes.LogFields{ - "blobDigest": msg.BlobDigest, - "expectedChunkDigest": expectedChunkDigest, - "actualChunkDigest": chunkDigest, + // check if we already have an offer response from this oracle + if broadcast.oracles[sender].weReceivedOfferResponse { + bex.logger.Debug("dropping MessageBlobOfferResponse, already have message from oracle", commontypes.LogFields{ + "blobDigest": msg.BlobDigest, + "sender": sender, }) return } - bex.logger.Debug("sending MessageBlobChunkResponse", commontypes.LogFields{ - "blobDigest": msg.BlobDigest, - "chunkIndex": chunkIndex, - "to": sender, - }) - - bex.netSender.SendTo( - MessageBlobChunkResponse[RI]{ + // did they accept our offer? + if !msg.RejectOffer { + // check signature + if err := msg.Signature.Verify(msg.BlobDigest, bex.config.OracleIdentities[sender].OffchainPublicKey); err != nil { + bex.logger.Debug("dropping MessageBlobOfferResponse, invalid signature", commontypes.LogFields{ + "blobDigest": msg.BlobDigest, + "sender": sender, + }) + return + } + + // save signature for oracle + broadcast.oracles[sender] = blobBroadcastOracleMeta{ + true, + true, + msg.Signature, + } + } else { + // save rejection for oracle + broadcast.oracles[sender] = blobBroadcastOracleMeta{ + true, + false, + nil, + } + } + + threshold := bex.minCertSigners() + + acceptingOracles, rejectingOracles := 0, 0 + for _, oracle := range broadcast.oracles { + if oracle.weReceivedOfferResponse { + if oracle.weReceivedOfferResponseAccepting { + acceptingOracles++ + } else { + rejectingOracles++ + } + } + } + + bex.logger.Debug("received MessageBlobOfferResponse", commontypes.LogFields{ + "blobDigest": msg.BlobDigest, + "sender": sender, + "reject": msg.RejectOffer, + "acceptingOracles": acceptingOracles, + "rejectingOracles": rejectingOracles, + "threshold": threshold, + }) + + if broadcast.phase == blobBroadcastPhaseAcceptedGrace { + return + } + + if acceptingOracles >= threshold { + bex.logger.Debug("minimum number of accepting oracles reached, entering grace period", commontypes.LogFields{ + "acceptingOracles": acceptingOracles, + "threshold": threshold, + "blobDigest": msg.BlobDigest, + "gracePeriod": DeltaBlobBroadcastGrace, + }) + broadcast.phase = blobBroadcastPhaseAcceptedGrace + bex.broadcastGraceTimeoutScheduler.ScheduleDelay(EventBlobBroadcastGraceTimeout[RI]{ + msg.BlobDigest, + }, DeltaBlobBroadcastGrace) + return + } + + if rejectingOracles >= threshold { + bex.logger.Warn("oracle quorum rejected our broadcast", commontypes.LogFields{ + "rejectingOracles": rejectingOracles, + "blobDigest": msg.BlobDigest, + }) + broadcast.phase = blobBroadcastPhaseRejected + close(broadcast.chNotify) + + return + } +} + +func (bex *blobExchangeState[RI]) eventBlobBroadcastGraceTimeout(ev EventBlobBroadcastGraceTimeout[RI]) { + blob, ok := bex.blobs[ev.BlobDigest] + if !ok { + bex.logger.Debug("dropping EventBlobBroadcastGraceTimeout for unknown blob", commontypes.LogFields{ + "blobDigest": ev.BlobDigest, + }) + return + } + broadcast := blob.broadcast + if broadcast == nil { + bex.logger.Debug("dropping EventBlobBroadcastGraceTimeout for blob with no broadcast", commontypes.LogFields{ + "blobDigest": ev.BlobDigest, + }) + return + } + if broadcast.phase != blobBroadcastPhaseAcceptedGrace { + bex.logger.Debug("dropping EventBlobBroadcastGraceTimeout for blob not in accepted grace phase", commontypes.LogFields{ + "blobDigest": ev.BlobDigest, + "phase": broadcast.phase, + }) + return + } + + maxSigners := bex.maxCertSigners() + + shuffledOracles := make([]commontypes.OracleID, 0, bex.config.N()) + for i := range bex.config.N() { + shuffledOracles = append(shuffledOracles, commontypes.OracleID(i)) + } + + rand.Shuffle(len(shuffledOracles), func(i, j int) { + shuffledOracles[i], shuffledOracles[j] = shuffledOracles[j], shuffledOracles[i] + }) + + var abass []AttributedBlobAvailabilitySignature + for _, oracleID := range shuffledOracles { + oracle := broadcast.oracles[oracleID] + if oracle.weReceivedOfferResponse && oracle.weReceivedOfferResponseAccepting && len(abass) < maxSigners { + abass = append(abass, AttributedBlobAvailabilitySignature{ + oracle.signature, + oracleID, + }) + } + } + + lcb := LightCertifiedBlob{ + blob.chunkDigests, + blob.payloadLength, + blob.expirySeqNr, + blob.submitter, + abass, + } + + if err := bex.verifyCert(&lcb); err != nil { + bex.logger.Critical("assumption violation: failed to verify own LightCertifiedBlob", commontypes.LogFields{ + "blobDigest": ev.BlobDigest, + "error": err, + }) + return + } + + bex.logger.Debug("assembled blob availability certificate", commontypes.LogFields{ + "acceptingOracles": len(abass), + "blobDigest": ev.BlobDigest, + }) + + broadcast.certOrNil = &lcb + broadcast.phase = blobBroadcastPhaseAccepted + close(broadcast.chNotify) +} + +func (bex *blobExchangeState[RI]) sendBlobOfferResponseAccepting(blobDigest BlobDigest, submitter commontypes.OracleID, requestHandle types.RequestHandle) { + + bas, err := blobtypes.MakeBlobAvailabilitySignature(blobDigest, bex.offchainKeyring.OffchainSign) + if err != nil { + bex.logger.Error("failed to make blob availability signature", commontypes.LogFields{ + "blobDigest": blobDigest, + "error": err, + }) + return + } + + bex.logger.Debug("sending accepting MessageBlobOfferResponse", commontypes.LogFields{ + "blobDigest": blobDigest, + "submitter": submitter, + }) + bex.netSender.SendTo( + MessageBlobOfferResponse[RI]{ + requestHandle, + blobDigest, + false, + bas, + }, + submitter, + ) +} +func (bex *blobExchangeState[RI]) sendBlobOfferResponseRejecting(blobDigest BlobDigest, submitter commontypes.OracleID, requestHandle types.RequestHandle) { + bex.netSender.SendTo( + MessageBlobOfferResponse[RI]{ + requestHandle, + blobDigest, + true, + nil, + }, + submitter, + ) +} + +func (bex *blobExchangeState[RI]) readBlobPayload(blobDigest BlobDigest) ([]byte, error) { + tx, err := bex.kv.NewReadTransactionUnchecked() + if err != nil { + return nil, fmt.Errorf("failed to create read transaction: %w", err) + } + defer tx.Discard() + + payload, err := tx.ReadBlobPayload(blobDigest) + if err != nil { + return nil, fmt.Errorf("failed to read blob payload: %w", err) + } + return payload, nil +} + +func (bex *blobExchangeState[RI]) messageBlobChunkRequest(msg MessageBlobChunkRequest[RI], sender commontypes.OracleID) { + chunkIndex := msg.ChunkIndex + + bex.logger.Trace("received MessageBlobChunkRequest", commontypes.LogFields{ + "blobDigest": msg.BlobDigest, + "sender": sender, + "chunkIndex": chunkIndex, + }) + + tx, err := bex.kv.NewReadTransactionUnchecked() + defer tx.Discard() + if err != nil { + bex.logger.Error("failed to create read transaction for MessageBlobChunkRequest", commontypes.LogFields{ + "error": err, + }) + return + } + + chunk, err := tx.ReadBlobChunk(msg.BlobDigest, chunkIndex) + if err != nil { + bex.logger.Error("failed to read blob chunk for MessageBlobChunkRequest", commontypes.LogFields{ + "blobDigest": msg.BlobDigest, + "sender": sender, + "chunkIndex": chunkIndex, + "error": err, + }) + return + } + + goAway := chunk == nil + + bex.logger.Debug("sending MessageBlobChunkResponse", commontypes.LogFields{ + "blobDigest": msg.BlobDigest, + "chunkIndex": chunkIndex, + "goAway": goAway, + "to": sender, + }) + + bex.netSender.SendTo( + MessageBlobChunkResponse[RI]{ msg.RequestHandle, msg.BlobDigest, chunkIndex, + goAway, chunk, }, sender, @@ -403,37 +1045,74 @@ func (bex *blobExchangeState[RI]) messageBlobChunkRequest(msg MessageBlobChunkRe } func (bex *blobExchangeState[RI]) messageBlobChunkResponse(msg MessageBlobChunkResponse[RI], sender commontypes.OracleID) { + bcid := blobChunkId{msg.BlobDigest, msg.ChunkIndex} + if !bex.chunkRequesterGadget.CheckAndMarkResponse(bcid, sender) { + bex.logger.Debug("dropping MessageBlobChunkResponse, not allowed", commontypes.LogFields{ + "blobDigest": msg.BlobDigest, + "sender": sender, + }) + return + } + + if msg.GoAway { + bex.logger.Debug("dropping MessageBlobChunkResponse, go away", commontypes.LogFields{ + "blobDigest": msg.BlobDigest, + "sender": sender, + }) + bex.chunkRequesterGadget.MarkGoAwayResponse(bcid, sender) + return + } + blob, ok := bex.blobs[msg.BlobDigest] if !ok { bex.logger.Debug("dropping MessageBlobChunkResponse for unknown blob", commontypes.LogFields{ "blobDigest": msg.BlobDigest, "sender": sender, }) + bex.chunkRequesterGadget.MarkBadResponse(bcid, sender) + return + } + + fetch := blob.fetch + if fetch == nil { + bex.logger.Debug("dropping MessageBlobChunkResponse for blob with no fetch", commontypes.LogFields{ + "blobDigest": msg.BlobDigest, + "sender": sender, + }) + return + } + if fetch.expired { + bex.logger.Debug("dropping MessageBlobChunkResponse for expired blob", commontypes.LogFields{ + "blobDigest": msg.BlobDigest, + "sender": sender, + }) return } chunkIndex := msg.ChunkIndex - if !(0 <= chunkIndex && chunkIndex < uint64(len(blob.chunks))) { + if !(0 <= chunkIndex && chunkIndex < uint64(len(blob.chunkDigests))) { bex.logger.Warn("dropping MessageBlobChunkResponse, chunk index out of range", commontypes.LogFields{ "blobDigest": msg.BlobDigest, "sender": sender, "chunkIndex": chunkIndex, - "chunkCount": len(blob.chunks), + "chunkCount": len(blob.chunkDigests), }) + bex.chunkRequesterGadget.MarkBadResponse(bcid, sender) return } - if blob.chunks[chunkIndex].have { + if blob.chunkHaves[chunkIndex] { bex.logger.Debug("dropping MessageBlobChunkResponse, already have chunk", commontypes.LogFields{ "blobDigest": msg.BlobDigest, "sender": sender, "chunkIndex": chunkIndex, }) + bex.chunkRequesterGadget.MarkBadResponse(bcid, sender) return } - expectedChunkDigest := blob.chunks[chunkIndex].digest + expectedChunkDigest := blob.chunkDigests[chunkIndex] actualChunkDigest := blobtypes.MakeBlobChunkDigest(msg.Chunk) if expectedChunkDigest != actualChunkDigest { bex.logger.Debug("dropping MessageBlobChunkResponse, chunk digest mismatch", commontypes.LogFields{ @@ -446,6 +1125,8 @@ func (bex *blobExchangeState[RI]) messageBlobChunkResponse(msg MessageBlobChunkR return } + bex.chunkRequesterGadget.MarkGoodResponse(bcid, sender) + bex.logger.Debug("received MessageBlobChunkResponse", commontypes.LogFields{ "blobDigest": msg.BlobDigest, "sender": sender, @@ -453,7 +1134,7 @@ func (bex *blobExchangeState[RI]) messageBlobChunkResponse(msg MessageBlobChunkR "payloadLength": blob.payloadLength, }) - tx, err := bex.kv.NewReadWriteTransactionUnchecked() + tx, err := bex.kv.NewUnserializedReadWriteTransactionUnchecked() if err != nil { bex.logger.Error("failed to create read-write transaction for MessageBlobChunkResponse", commontypes.LogFields{ "error": err, @@ -473,7 +1154,14 @@ func (bex *blobExchangeState[RI]) messageBlobChunkResponse(msg MessageBlobChunkR return } - err = tx.WriteBlobMeta(msg.BlobDigest, blob.payloadLength) + chunkHaves := slices.Clone(blob.chunkHaves) + chunkHaves[chunkIndex] = true + blobMeta := BlobMeta{ + blob.payloadLength, + chunkHaves, + blob.expirySeqNr, + } + err = tx.WriteBlobMeta(msg.BlobDigest, blobMeta) if err != nil { bex.logger.Error("failed to write blob meta for MessageBlobChunkResponse", commontypes.LogFields{ "blobDigest": msg.BlobDigest, @@ -484,6 +1172,15 @@ func (bex *blobExchangeState[RI]) messageBlobChunkResponse(msg MessageBlobChunkR return } + err = tx.WriteStaleBlobIndex(staleBlob(blob.expirySeqNr, msg.BlobDigest)) + if err != nil { + bex.logger.Error("failed to write stale blob index for MessageBlobChunkResponse", commontypes.LogFields{ + "blobDigest": msg.BlobDigest, + "error": err, + }) + return + } + err = tx.Commit() if err != nil { bex.logger.Error("failed to commit transaction for MessageBlobChunkResponse", commontypes.LogFields{ @@ -495,12 +1192,10 @@ func (bex *blobExchangeState[RI]) messageBlobChunkResponse(msg MessageBlobChunkR return } - blob.chunks[chunkIndex].have = true + blob.chunkHaves[chunkIndex] = true - for _, chunk := range blob.chunks { - if !chunk.have { - return - } + if !blob.haveAllChunks() { + return } bex.logger.Debug("blob fully received", commontypes.LogFields{ @@ -508,217 +1203,32 @@ func (bex *blobExchangeState[RI]) messageBlobChunkResponse(msg MessageBlobChunkR "sender": sender, "payloadLength": blob.payloadLength, }) - close(blob.chNotifyPayloadAvailable) - bex.sendAvailabilitySignature(msg.BlobDigest, blob.submitter) -} - -func (bex *blobExchangeState[RI]) messageBlobAvailable(msg MessageBlobAvailable[RI], sender commontypes.OracleID) { - blob, ok := bex.blobs[msg.BlobDigest] - if !ok { - bex.logger.Debug("dropping MessageBlobAvailable for unknown blob", commontypes.LogFields{ - "blobDigest": msg.BlobDigest, - "sender": sender, - }) - return - } - - if blob.submitter != bex.id { - bex.logger.Debug("dropping MessageBlobAvailable, not the submitter", commontypes.LogFields{ - "blobDigest": msg.BlobDigest, - "sender": sender, - "submitter": blob.submitter, - "localID": bex.id, - }) - return - } - - // check if we already have signature from oracle - if len(blob.oracles[sender].signature) != 0 { - bex.logger.Debug("dropping MessageBlobAvailable, already have signature from oracle", commontypes.LogFields{ - "blobDigest": msg.BlobDigest, - "sender": sender, - }) - return - } - - // check if we already have a certificate - if blob.certOrNil != nil { - bex.logger.Debug("dropping MessageBlobAvailable, already have certificate", commontypes.LogFields{ - "blobDigest": msg.BlobDigest, - "sender": sender, - }) - return - } - - // check signature - if err := msg.Signature.Verify(msg.BlobDigest, bex.config.OracleIdentities[sender].OffchainPublicKey); err != nil { - bex.logger.Debug("dropping MessageBlobAvailable, invalid signature", commontypes.LogFields{ - "blobDigest": msg.BlobDigest, - "sender": sender, - }) - return - } - - // save signature for oracle - blob.oracles[sender].signature = msg.Signature - - // when we obtain certificate, notify upcall - threshold := bex.config.N() - bex.config.F - signers := 0 - for _, oracle := range blob.oracles { - if len(oracle.signature) != 0 { - signers++ - } - } - - bex.logger.Debug("received MessageBlobAvailable", commontypes.LogFields{ - "blobDigest": msg.BlobDigest, - "sender": sender, - "signers": signers, - "threshold": threshold, - }) - - if signers < threshold { - return - } - - { - var abass []AttributedBlobAvailabilitySignature - for i, oracle := range blob.oracles { - if len(oracle.signature) != 0 { - abass = append(abass, AttributedBlobAvailabilitySignature{ - oracle.signature, - commontypes.OracleID(i), - }) - } - } - - var chunkDigests []BlobChunkDigest - for _, chunk := range blob.chunks { - chunkDigests = append(chunkDigests, chunk.digest) - } - - lcb := LightCertifiedBlob{ - chunkDigests, - blob.payloadLength, - blob.expirySeqNr, - blob.submitter, - abass, - } - - if err := lcb.Verify(bex.config.ConfigDigest, bex.config.OracleIdentities, threshold); err != nil { - bex.logger.Critical("assumption violation: failed to verify own LightCertifiedBlob", commontypes.LogFields{ - "blobDigest": msg.BlobDigest, - "error": err, - }) - return - } - - bex.logger.Debug("assembled blob availability certificate", commontypes.LogFields{ - "blobDigest": msg.BlobDigest, - }) - - blob.certOrNil = &lcb - close(blob.chNotifyCertAvailable) - } -} - -func (bex *blobExchangeState[RI]) eventMissingChunk(ev EventMissingBlobChunk[RI]) { - blob, ok := bex.blobs[ev.BlobDigest] - if !ok { - bex.logger.Debug("dropping EventMissingBlobChunk, unknown blob", commontypes.LogFields{ - "blobDigest": ev.BlobDigest, - }) - return - } - - for i, chunk := range blob.chunks { - if !chunk.have { - chunkIndex := uint64(i) - - bex.logger.Debug("broadcasting MessageBlobChunkRequest", commontypes.LogFields{ - "blobDigest": ev.BlobDigest, - "chunkIndex": chunkIndex, - "payloadLength": blob.payloadLength, - }) - bex.netSender.Broadcast( - MessageBlobChunkRequest[RI]{ - nil, - ev.BlobDigest, - chunkIndex, - }, - ) - - bex.missingChunkScheduler.ScheduleDelay(EventMissingBlobChunk[RI]{ - ev.BlobDigest, - }, DeltaBlobChunkRequest) - return - } - } -} - -func (bex *blobExchangeState[RI]) eventMissingCert(ev EventMissingBlobCert[RI]) { - blob, ok := bex.blobs[ev.BlobDigest] - if !ok { - bex.logger.Debug("dropping EventMissingBlobCert, unknown blob", commontypes.LogFields{ - "blobDigest": ev.BlobDigest, - }) - return - } - - if blob.certOrNil != nil { - return + close(fetch.chNotify) + if fetch.exchange != nil { + bex.sendBlobOfferResponseAccepting(msg.BlobDigest, blob.submitter, fetch.exchange.latestOfferRequestHandle) + fetch.exchange.weServiced() } - - var chunkDigests []BlobChunkDigest - for _, chnk := range blob.chunks { - chunkDigests = append(chunkDigests, chnk.digest) + if blob.prunable() { + delete(bex.blobs, msg.BlobDigest) } - - bex.netSender.Broadcast(MessageBlobOffer[RI]{ - chunkDigests, - blob.payloadLength, - blob.expirySeqNr, - blob.submitter, - }) - - bex.missingCertScheduler.ScheduleDelay(EventMissingBlobCert[RI]{ - ev.BlobDigest, - }, DeltaBlobCertRequest) } -func (bex *blobExchangeState[RI]) sendAvailabilitySignature(blobDigest BlobDigest, submitter commontypes.OracleID) { - - // delete(bex.blobs, blobDigest) - - bas, err := blobtypes.MakeBlobAvailabilitySignature(blobDigest, bex.offchainKeyring.OffchainSign) - if err != nil { - bex.logger.Error("failed to make blob availability signature", commontypes.LogFields{ - "blobDigest": blobDigest, - "error": err, +func (bex *blobExchangeState[RI]) processBlobBroadcastRequest(req blobBroadcastRequest) { + if len(req.payload) > bex.limits.MaxBlobPayloadLength { + req.respond(bex.ctx, blobBroadcastResponse{ + LightCertifiedBlob{}, + fmt.Errorf("blob payload length %d exceeds maximum allowed length %d", + len(req.payload), bex.limits.MaxBlobPayloadLength), }) return } - bex.logger.Debug("sending MessageBlobAvailable", commontypes.LogFields{ - "blobDigest": blobDigest, - "submitter": submitter, - }) - bex.netSender.SendTo( - MessageBlobAvailable[RI]{ - blobDigest, - bas, - }, - submitter, - ) -} - -func (bex *blobExchangeState[RI]) processBlobBroadcastRequest(req blobBroadcastRequest) { - var chunkDigests []BlobChunkDigest - var chunks []chunk payload := req.payload payloadLength := uint64(len(payload)) + chunkDigests := make([]BlobChunkDigest, 0, numChunks(payloadLength)) + chunkHaves := make([]bool, 0, numChunks(payloadLength)) + for i, chunkIdx := 0, 0; i < len(payload); i, chunkIdx = i+BlobChunkSize, chunkIdx+1 { payloadChunk := payload[i:min(i+BlobChunkSize, len(payload))] @@ -727,8 +1237,7 @@ func (bex *blobExchangeState[RI]) processBlobBroadcastRequest(req blobBroadcastR chunkDigests = append(chunkDigests, chunkDigest) // for local accounting - chunk := chunk{true, chunkDigest} - chunks = append(chunks, chunk) + chunkHaves = append(chunkHaves, true) } expirySeqNr := req.expirySeqNr @@ -742,85 +1251,132 @@ func (bex *blobExchangeState[RI]) processBlobBroadcastRequest(req blobBroadcastR submitter, ) + bex.logger.Debug("processing BlobBroadcastRequest", commontypes.LogFields{"blobDigest": blobDigest}) + var chNotifyCertAvailable chan struct{} if existingBlob, ok := bex.blobs[blobDigest]; ok { - chNotifyCertAvailable = existingBlob.chNotifyCertAvailable + if existingBlob.broadcast == nil { + existingBlob.broadcast = &blobBroadcastMeta{ + make(chan struct{}), + 1, + blobBroadcastPhaseOffering, + nil, + make([]blobBroadcastOracleMeta, bex.config.N()), + } + } + chNotifyCertAvailable = existingBlob.broadcast.chNotify } else { // if we haven't written the chunks to kv, we can't serve requests - if err := bex.writeBlob(blobDigest, payloadLength, payload); err != nil { - bex.chBlobBroadcastResponse <- blobBroadcastResponse{ - nil, + + if err := bex.writeBlob(blobDigest, payloadLength, payload, expirySeqNr); err != nil { + req.respond(bex.ctx, blobBroadcastResponse{ + LightCertifiedBlob{}, fmt.Errorf("failed to write blob: %w", err), - } + }) return } // write in-memory state chNotifyCertAvailable = make(chan struct{}) - chNotifyPayloadAvailable := make(chan struct{}) - close(chNotifyPayloadAvailable) - bex.blobs[blobDigest] = &blob{ - chNotifyCertAvailable, - chNotifyPayloadAvailable, - + time.Now(), + &blobBroadcastMeta{ + chNotifyCertAvailable, + 1, + blobBroadcastPhaseOffering, + nil, + make([]blobBroadcastOracleMeta, bex.config.N()), + }, nil, - chunks, - make([]blobOracle, bex.config.N()), + chunkDigests, + chunkHaves, payloadLength, expirySeqNr, submitter, } - bex.missingCertScheduler.ScheduleDelay(EventMissingBlobCert[RI]{ - blobDigest, - }, 0) + bex.offerRequesterGadget.PleaseRecheckPendingItems() } - chCert := make(chan LightCertifiedBlob) - bex.chBlobBroadcastResponse <- blobBroadcastResponse{chCert, nil} + chDone := bex.ctx.Done() bex.subs.Go(func() { - chDone := bex.ctx.Done() + select { + case <-req.chDone: + case <-chDone: + return + } + select { + case bex.chLocalEvent <- EventBlobBroadcastRequestDone[RI]{blobDigest}: + case <-chDone: + } + }) + + bex.subs.Go(func() { select { case <-chNotifyCertAvailable: - select { - case bex.chLocalEvent <- EventRespondWithBlobCert[RI]{blobDigest, chCert}: - case <-chDone: - } + case <-chDone: + return + case <-req.chDone: + return + } + + select { + case bex.chLocalEvent <- EventBlobBroadcastRequestRespond[RI]{blobDigest, req}: + case <-req.chDone: case <-chDone: } }) } -func (bex *blobExchangeState[RI]) eventRespondWithBlobCert(ev EventRespondWithBlobCert[RI]) { - blob, ok := bex.blobs[ev.BlobDigest] +func (bex *blobExchangeState[RI]) getCert(blobDigest BlobDigest) (LightCertifiedBlob, error) { + blob, ok := bex.blobs[blobDigest] if !ok { - bex.logger.Warn("dropping EventRespondWithBlobCert, no such blob", commontypes.LogFields{ - "blobDigest": ev.BlobDigest, - }) - return + return LightCertifiedBlob{}, fmt.Errorf("no such blob, unexpected") + } + if blob.broadcast == nil { + return LightCertifiedBlob{}, fmt.Errorf("no broadcast metadata available, unexpected") } + switch blob.broadcast.phase { + case blobBroadcastPhaseOffering: + return LightCertifiedBlob{}, fmt.Errorf("blob still being offered, unexpected") + case blobBroadcastPhaseAcceptedGrace: + return LightCertifiedBlob{}, fmt.Errorf("blob still in grace period, unexpected") + case blobBroadcastPhaseRejected: + return LightCertifiedBlob{}, fmt.Errorf("blob broadcast rejected by quorum") + case blobBroadcastPhaseAccepted: + if blob.broadcast.certOrNil == nil { + return LightCertifiedBlob{}, fmt.Errorf("blob was accepted but cert is nil, unexpected") + } + return *blob.broadcast.certOrNil, nil + } + panic("unreachable") +} - if blob.certOrNil == nil { - close(ev.Channel) - bex.logger.Critical("assumption violation: dropping EventRespondWithBlobCert, no cert available", commontypes.LogFields{ - "blobDigest": ev.BlobDigest, - }) +func (bex *blobExchangeState[RI]) eventBlobBroadcastRequestRespond(ev EventBlobBroadcastRequestRespond[RI]) { + cert, err := bex.getCert(ev.BlobDigest) + ev.Request.respond(bex.ctx, blobBroadcastResponse{cert, err}) +} + +func (bex *blobExchangeState[RI]) eventBlobBroadcastRequestDone(ev EventBlobBroadcastRequestDone[RI]) { + blob, ok := bex.blobs[ev.BlobDigest] + if !ok { return } - - select { - case ev.Channel <- *blob.certOrNil: - case <-bex.ctx.Done(): + broadcast := blob.broadcast + if broadcast != nil { + broadcast.weServiced() + } + if blob.prunable() { + delete(bex.blobs, ev.BlobDigest) } } -func (bex *blobExchangeState[RI]) writeBlob(blobDigest BlobDigest, payloadLength uint64, payload []byte) error { - tx, err := bex.kv.NewReadWriteTransactionUnchecked() +func (bex *blobExchangeState[RI]) writeBlob(blobDigest BlobDigest, payloadLength uint64, payload []byte, expirySeqNr uint64) error { + tx, err := bex.kv.NewUnserializedReadWriteTransactionUnchecked() if err != nil { return fmt.Errorf("failed to create read/write transaction: %w", err) } @@ -832,9 +1388,21 @@ func (bex *blobExchangeState[RI]) writeBlob(blobDigest BlobDigest, payloadLength } } - if err := tx.WriteBlobMeta(blobDigest, payloadLength); err != nil { + chunksHave := make([]bool, numChunks(payloadLength)) + for i := range chunksHave { + chunksHave[i] = true // mark all chunks as present since we're writing the full blob + } + blobMeta := BlobMeta{ + payloadLength, + chunksHave, + expirySeqNr, + } + if err := tx.WriteBlobMeta(blobDigest, blobMeta); err != nil { return fmt.Errorf("failed to write local blob meta: %w", err) } + if err := tx.WriteStaleBlobIndex(staleBlob(expirySeqNr, blobDigest)); err != nil { + return fmt.Errorf("failed to write stale blob index: %w", err) + } if err := tx.Commit(); err != nil { return fmt.Errorf("failed to commit kv transaction: %w", err) } @@ -842,15 +1410,13 @@ func (bex *blobExchangeState[RI]) writeBlob(blobDigest BlobDigest, payloadLength } func (bex *blobExchangeState[RI]) processBlobFetchRequest(req blobFetchRequest) { - threshold := bex.config.N() - bex.config.F + chDone := bex.ctx.Done() + cert := req.cert - err := cert.Verify( - bex.config.ConfigDigest, - bex.config.OracleIdentities, - threshold, - ) + err := bex.verifyCert(&cert) if err != nil { - bex.chBlobFetchResponse <- blobFetchResponse{nil, fmt.Errorf("invalid cert")} + req.respond(bex.ctx, blobFetchResponse{nil, fmt.Errorf("invalid cert")}) + return } blobDigest := blobtypes.MakeBlobDigest( @@ -861,65 +1427,175 @@ func (bex *blobExchangeState[RI]) processBlobFetchRequest(req blobFetchRequest) cert.Submitter, ) + bex.logger.Debug("processing BlobFetchRequest", commontypes.LogFields{"blobDigest": blobDigest}) + + seeders := make(map[commontypes.OracleID]struct{}, len(cert.AttributedBlobAvailabilitySignatures)) + for _, abs := range cert.AttributedBlobAvailabilitySignatures { + seeders[abs.Signer] = struct{}{} + } + var chNotifyPayloadAvailable chan struct{} + if existingBlob, ok := bex.blobs[blobDigest]; ok { - chNotifyPayloadAvailable = existingBlob.chNotifyPayloadAvailable + if existingBlob.fetch == nil { + chNotifyPayloadAvailable = make(chan struct{}) + + existingBlob.fetch = &blobFetchMeta{ + chNotifyPayloadAvailable, + 1, + nil, + seeders, + false, + } + if existingBlob.haveAllChunks() { + close(chNotifyPayloadAvailable) + } + } else { + for seeder := range seeders { + existingBlob.fetch.seeders[seeder] = struct{}{} // broaden seeders per cert + } + + existingBlob.fetch.waiters++ + + chNotifyPayloadAvailable = existingBlob.fetch.chNotify + } + + if !existingBlob.haveAllChunks() { + bex.chunkRequesterGadget.PleaseRecheckPendingItems() + } } else { chNotifyPayloadAvailable = make(chan struct{}) - chNotifyCertAvailable := make(chan struct{}) - var chunks []chunk - for _, chunkDigest := range cert.ChunkDigests { - chunks = append(chunks, chunk{false, chunkDigest}) + chunkHaves, err := bex.loadChunkHaves(blobDigest, cert.PayloadLength) + if err != nil { + req.respond(bex.ctx, blobFetchResponse{nil, fmt.Errorf("failed to import blob chunk haves from disk: %w", err)}) + return } - bex.blobs[blobDigest] = &blob{ - chNotifyCertAvailable, - chNotifyPayloadAvailable, - - &req.cert, + newBlob := &blob{ + time.Now(), + nil, + &blobFetchMeta{ + chNotifyPayloadAvailable, + 1, + nil, + seeders, + false, + }, - chunks, - make([]blobOracle, bex.config.N()), + cert.ChunkDigests, + chunkHaves, cert.PayloadLength, cert.ExpirySeqNr, cert.Submitter, } - bex.missingChunkScheduler.ScheduleDelay(EventMissingBlobChunk[RI]{ - blobDigest, - }, 0) - } + bex.blobs[blobDigest] = newBlob - chPayload := make(chan []byte) - bex.chBlobFetchResponse <- blobFetchResponse{chPayload, nil} + if newBlob.haveAllChunks() { + close(chNotifyPayloadAvailable) + } else { + bex.chunkRequesterGadget.PleaseRecheckPendingItems() + } + } bex.subs.Go(func() { - chDone := bex.ctx.Done() + select { + case <-req.chDone: + case <-chDone: + return + } + + select { + case bex.chLocalEvent <- EventBlobFetchRequestDone[RI]{blobDigest}: + case <-chDone: + } + }) + bex.subs.Go(func() { select { case <-chNotifyPayloadAvailable: - select { - case bex.chLocalEvent <- EventRespondWithBlobPayload[RI]{blobDigest, chPayload}: - case <-chDone: - } + case <-req.chDone: + return + case <-chDone: + return + } + + select { + case bex.chLocalEvent <- EventBlobFetchRequestRespond[RI]{blobDigest, req}: + case <-req.chDone: case <-chDone: } }) } -func (bex *blobExchangeState[RI]) eventRespondWithBlobPayload(ev EventRespondWithBlobPayload[RI]) { - payload, err := bex.readBlobPayload(ev.BlobDigest) - if err != nil { - close(ev.Channel) - bex.logger.Warn("dropping EventRespondWithBlobPayload, failed to read payload", commontypes.LogFields{ - "blobDigest": ev.BlobDigest, - }) +func (bex *blobExchangeState[RI]) eventBlobFetchRequestRespond(ev EventBlobFetchRequestRespond[RI]) { + var ( + payload []byte + err error + ) + blob, ok := bex.blobs[ev.BlobDigest] + if ok && blob != nil && blob.fetch != nil && blob.fetch.expired { + err = fmt.Errorf("blob expired during fetching") + } else { + payload, err = bex.readBlobPayload(ev.BlobDigest) + if payload == nil && err == nil { + err = fmt.Errorf("blob payload is unexpectedly nil") + } + } + ev.Request.respond(bex.ctx, blobFetchResponse{payload, err}) +} + +func (bex *blobExchangeState[RI]) eventBlobFetchRequestDone(ev EventBlobFetchRequestDone[RI]) { + blob, ok := bex.blobs[ev.BlobDigest] + if !ok { return } + fetch := blob.fetch + if fetch != nil { + fetch.weServiced() + } + if blob.prunable() { + delete(bex.blobs, ev.BlobDigest) + } +} - select { - case ev.Channel <- payload: - case <-bex.ctx.Done(): +func (bex *blobExchangeState[RI]) loadChunkHaves(blobDigest BlobDigest, payloadLength uint64) ([]bool, error) { + tx, err := bex.kv.NewReadTransactionUnchecked() + if err != nil { + return nil, fmt.Errorf("failed to create read transaction") + } + defer tx.Discard() + blobMeta, err := tx.ReadBlobMeta(blobDigest) + if err != nil { + return nil, fmt.Errorf("failed to read blob meta: %w", err) } + if blobMeta == nil { + return make([]bool, numChunks(payloadLength)), nil + } + if blobMeta.PayloadLength != payloadLength { + return nil, fmt.Errorf("payload length mismatch: disk %d != mem %d", blobMeta.PayloadLength, payloadLength) + } + return blobMeta.ChunksHave, nil +} + +func (bex *blobExchangeState[RI]) minCertSigners() int { + return bex.config.F + 1 +} + +func (bex *blobExchangeState[RI]) maxCertSigners() int { + + return byzquorum.Size(bex.config.N(), bex.config.F) +} + +func (bex *blobExchangeState[RI]) verifyCert(cert *LightCertifiedBlob) error { + return cert.Verify(bex.config.ConfigDigest, bex.config.OracleIdentities, bex.minCertSigners(), bex.maxCertSigners()) +} + +func staleBlob(expirySeqNr uint64, blobDigest BlobDigest) StaleBlob { + return StaleBlob{expirySeqNr + 1, blobDigest} +} + +func hasBlobExpired(expirySeqNr uint64, committedSeqNr uint64) bool { + return expirySeqNr < committedSeqNr } diff --git a/offchainreporting2plus/internal/ocr3_1/protocol/blob_reap.go b/offchainreporting2plus/internal/ocr3_1/protocol/blob_reap.go new file mode 100644 index 00000000..ff0631f8 --- /dev/null +++ b/offchainreporting2plus/internal/ocr3_1/protocol/blob_reap.go @@ -0,0 +1,121 @@ +package protocol + +import ( + "context" + "fmt" + "time" + + "github.com/smartcontractkit/libocr/commontypes" + "github.com/smartcontractkit/libocr/internal/loghelper" +) + +const ( + blobReapInterval = 10 * time.Second + maxBlobsToReapInSingleTransaction = 100 +) + +func reapBlobs(ctx context.Context, kvDb KeyValueDatabase) (done bool, err error) { + chDone := ctx.Done() + + tx, err := kvDb.NewUnserializedReadWriteTransactionUnchecked() + if err != nil { + return false, fmt.Errorf("failed to create read/write transaction: %w", err) + } + defer tx.Discard() + + committedSeqNr, err := tx.ReadHighestCommittedSeqNr() + if err != nil { + return false, fmt.Errorf("failed to read highest committed seq nr: %w", err) + } + + staleBlobs, err := tx.ReadStaleBlobIndex(committedSeqNr, maxBlobsToReapInSingleTransaction+1) + if err != nil { + return false, fmt.Errorf("failed to read stale blob index: %w", err) + } + + if len(staleBlobs) == 0 { + + return true, nil + } + + for i, staleBlob := range staleBlobs { + if i >= maxBlobsToReapInSingleTransaction { + break + } + + select { + case <-chDone: + return true, ctx.Err() + default: + } + + if err := reapSingleBlob(tx, staleBlob); err != nil { + return false, fmt.Errorf("failed to reap single blob: %w", err) + } + } + + if err := tx.Commit(); err != nil { + return false, fmt.Errorf("failed to commit transaction: %w", err) + } + + return len(staleBlobs) <= maxBlobsToReapInSingleTransaction, nil +} + +func reapSingleBlob(tx KeyValueDatabaseReadWriteTransaction, staleBlob StaleBlob) error { + meta, err := tx.ReadBlobMeta(staleBlob.BlobDigest) + if err != nil { + return fmt.Errorf("failed to read blob meta: %w", err) + } + + if meta == nil { + return fmt.Errorf("blob meta is nil") + } + + for chunkIndex, chunkHave := range meta.ChunksHave { + if !chunkHave { + continue + } + + if err := tx.DeleteBlobChunk(staleBlob.BlobDigest, uint64(chunkIndex)); err != nil { + return fmt.Errorf("failed to delete blob chunk: %w", err) + } + } + + if err := tx.DeleteBlobMeta(staleBlob.BlobDigest); err != nil { + return fmt.Errorf("failed to delete blob meta: %w", err) + } + if err := tx.DeleteStaleBlobIndex(staleBlob); err != nil { + return fmt.Errorf("failed to delete stale blob index: %w", err) + } + + return nil +} + +func RunBlobReap( + ctx context.Context, + logger loghelper.LoggerWithContext, + kvDb KeyValueDatabase, +) { + chDone := ctx.Done() + chTick := time.After(0) + + for { + select { + case <-chTick: + case <-chDone: + return + } + + done, err := reapBlobs(ctx, kvDb) + if err != nil { + logger.Warn("BlobReap: failed to reap blobs", commontypes.LogFields{ + "error": err, + }) + } + if done { + chTick = time.After(blobReapInterval) + } else { + chTick = time.After(0) + } + } +} diff --git a/offchainreporting2plus/internal/ocr3_1/protocol/db.go b/offchainreporting2plus/internal/ocr3_1/protocol/db.go index 76812cfb..114d007b 100644 --- a/offchainreporting2plus/internal/ocr3_1/protocol/db.go +++ b/offchainreporting2plus/internal/ocr3_1/protocol/db.go @@ -11,10 +11,6 @@ type PacemakerState struct { HighestSentNewEpochWish uint64 } -type StatePersistenceState struct { - HighestPersistedStateTransitionBlockSeqNr uint64 -} - type Database interface { types.ConfigDatabase @@ -23,10 +19,4 @@ type Database interface { ReadCert(ctx context.Context, configDigest types.ConfigDigest) (CertifiedPrepareOrCommit, error) WriteCert(ctx context.Context, configDigest types.ConfigDigest, cert CertifiedPrepareOrCommit) error - - ReadStatePersistenceState(ctx context.Context, configDigest types.ConfigDigest) (StatePersistenceState, error) - WriteStatePersistenceState(ctx context.Context, configDigest types.ConfigDigest, state StatePersistenceState) error - - ReadAttestedStateTransitionBlock(ctx context.Context, configDigest types.ConfigDigest, seqNr uint64) (AttestedStateTransitionBlock, error) - WriteAttestedStateTransitionBlock(ctx context.Context, configDigest types.ConfigDigest, seqNr uint64, ast AttestedStateTransitionBlock) error } diff --git a/offchainreporting2plus/internal/ocr3_1/protocol/event.go b/offchainreporting2plus/internal/ocr3_1/protocol/event.go index d55e3d7c..9ea05f03 100644 --- a/offchainreporting2plus/internal/ocr3_1/protocol/event.go +++ b/offchainreporting2plus/internal/ocr3_1/protocol/event.go @@ -76,10 +76,10 @@ func (ev EventComputedObservationQuorumSuccess[RI]) processOutcomeGeneration(out } type EventComputedObservation[RI any] struct { - Epoch uint64 - SeqNr uint64 - Query types.Query - Observation types.Observation + Epoch uint64 + SeqNr uint64 + AttributedQuery types.AttributedQuery + Observation types.Observation } var _ EventToOutcomeGeneration[struct{}] = EventComputedObservation[struct{}]{} @@ -89,10 +89,10 @@ func (ev EventComputedObservation[RI]) processOutcomeGeneration(outgen *outcomeG } type EventComputedProposalStateTransition[RI any] struct { - Epoch uint64 - SeqNr uint64 - KeyValueStoreReadWriteTransaction KeyValueStoreReadWriteTransaction - stateTransitionInfo stateTransitionInfo + Epoch uint64 + SeqNr uint64 + KeyValueDatabaseReadWriteTransaction KeyValueDatabaseReadWriteTransaction + stateTransitionInfo stateTransitionInfo } var _ EventToOutcomeGeneration[struct{}] = EventComputedProposalStateTransition[struct{}]{} @@ -101,12 +101,23 @@ func (ev EventComputedProposalStateTransition[RI]) processOutcomeGeneration(outg outgen.eventComputedProposalStateTransition(ev) } +type EventComputedCommitted[RI any] struct { + Epoch uint64 + SeqNr uint64 +} + +var _ EventToOutcomeGeneration[struct{}] = EventComputedCommitted[struct{}]{} + +func (ev EventComputedCommitted[RI]) processOutcomeGeneration(outgen *outcomeGenerationState[RI]) { + outgen.eventComputedCommitted(ev) +} + type EventToReportAttestation[RI any] interface { processReportAttestation(repatt *reportAttestationState[RI]) } -type EventToStatePersistence[RI any] interface { - processStatePersistence(state *statePersistenceState[RI]) +type EventToStateSync[RI any] interface { + processStateSync(stasy *stateSyncState[RI]) } type EventToBlobExchange[RI any] interface { @@ -165,77 +176,60 @@ type EventStateSyncRequest[RI any] struct { SeqNr uint64 } -var _ EventToStatePersistence[struct{}] = EventStateSyncRequest[struct{}]{} // implements EventToStatePersistence +var _ EventToStateSync[struct{}] = EventStateSyncRequest[struct{}]{} // implements EventToStateSync -func (ev EventStateSyncRequest[RI]) processStatePersistence(state *statePersistenceState[RI]) { - state.eventStateSyncRequest(ev) +func (ev EventStateSyncRequest[RI]) processStateSync(stasy *stateSyncState[RI]) { + stasy.eventStateSyncRequest(ev) } -type EventBlockSyncSummaryHeartbeat[RI any] struct{} - -var _ EventToStatePersistence[struct{}] = EventBlockSyncSummaryHeartbeat[struct{}]{} // implements EventToStatePersistence - -func (ev EventBlockSyncSummaryHeartbeat[RI]) processStatePersistence(state *statePersistenceState[RI]) { - state.eventEventBlockSyncSummaryHeartbeat(ev) -} - -type EventExpiredBlockSyncRequest[RI any] struct { - RequestedFrom commontypes.OracleID - Nonce uint64 -} - -var _ EventToStatePersistence[struct{}] = EventExpiredBlockSyncRequest[struct{}]{} // implements EventToStatePersistence - -func (ev EventExpiredBlockSyncRequest[RI]) processStatePersistence(state *statePersistenceState[RI]) { - state.eventExpiredBlockSyncRequest(ev) +type EventBlobBroadcastRequestRespond[RI any] struct { + BlobDigest BlobDigest + Request blobBroadcastRequest } -type EventReadyToSendNextBlockSyncRequest[RI any] struct{} - -var _ EventToStatePersistence[struct{}] = EventReadyToSendNextBlockSyncRequest[struct{}]{} // implements EventToStatePersistence +var _ EventToBlobExchange[struct{}] = EventBlobBroadcastRequestRespond[struct{}]{} // implements EventToBlobExchange -func (ev EventReadyToSendNextBlockSyncRequest[RI]) processStatePersistence(state *statePersistenceState[RI]) { - state.eventReadyToSendNextBlockSyncRequest(ev) +func (ev EventBlobBroadcastRequestRespond[RI]) processBlobExchange(bex *blobExchangeState[RI]) { + bex.eventBlobBroadcastRequestRespond(ev) } -type EventMissingBlobChunk[RI any] struct { +type EventBlobBroadcastRequestDone[RI any] struct { BlobDigest BlobDigest } -var _ EventToBlobExchange[struct{}] = EventMissingBlobChunk[struct{}]{} // implements EventToBlobExchange +var _ EventToBlobExchange[struct{}] = EventBlobBroadcastRequestDone[struct{}]{} // implements EventToBlobExchange -func (ev EventMissingBlobChunk[RI]) processBlobExchange(bex *blobExchangeState[RI]) { - bex.eventMissingChunk(ev) +func (ev EventBlobBroadcastRequestDone[RI]) processBlobExchange(bex *blobExchangeState[RI]) { + bex.eventBlobBroadcastRequestDone(ev) } -type EventMissingBlobCert[RI any] struct { +type EventBlobFetchRequestRespond[RI any] struct { BlobDigest BlobDigest + Request blobFetchRequest } -var _ EventToBlobExchange[struct{}] = EventMissingBlobCert[struct{}]{} // implements EventToBlobExchange +var _ EventToBlobExchange[struct{}] = EventBlobFetchRequestRespond[struct{}]{} // implements EventToBlobExchange -func (ev EventMissingBlobCert[RI]) processBlobExchange(bex *blobExchangeState[RI]) { - bex.eventMissingCert(ev) +func (ev EventBlobFetchRequestRespond[RI]) processBlobExchange(bex *blobExchangeState[RI]) { + bex.eventBlobFetchRequestRespond(ev) } -type EventRespondWithBlobCert[RI any] struct { +type EventBlobFetchRequestDone[RI any] struct { BlobDigest BlobDigest - Channel chan<- LightCertifiedBlob } -var _ EventToBlobExchange[struct{}] = EventRespondWithBlobCert[struct{}]{} // implements EventToBlobExchange +var _ EventToBlobExchange[struct{}] = EventBlobFetchRequestDone[struct{}]{} // implements EventToBlobExchange -func (ev EventRespondWithBlobCert[RI]) processBlobExchange(bex *blobExchangeState[RI]) { - bex.eventRespondWithBlobCert(ev) +func (ev EventBlobFetchRequestDone[RI]) processBlobExchange(bex *blobExchangeState[RI]) { + bex.eventBlobFetchRequestDone(ev) } -type EventRespondWithBlobPayload[RI any] struct { +type EventBlobBroadcastGraceTimeout[RI any] struct { BlobDigest BlobDigest - Channel chan<- []byte } -var _ EventToBlobExchange[struct{}] = EventRespondWithBlobPayload[struct{}]{} // implements EventToBlobExchange +var _ EventToBlobExchange[struct{}] = EventBlobBroadcastGraceTimeout[struct{}]{} // implements EventToBlobExchange -func (ev EventRespondWithBlobPayload[RI]) processBlobExchange(blobex *blobExchangeState[RI]) { - blobex.eventRespondWithBlobPayload(ev) +func (ev EventBlobBroadcastGraceTimeout[RI]) processBlobExchange(bex *blobExchangeState[RI]) { + bex.eventBlobBroadcastGraceTimeout(ev) } diff --git a/offchainreporting2plus/internal/ocr3_1/protocol/kv.go b/offchainreporting2plus/internal/ocr3_1/protocol/kv.go deleted file mode 100644 index c293fc07..00000000 --- a/offchainreporting2plus/internal/ocr3_1/protocol/kv.go +++ /dev/null @@ -1,89 +0,0 @@ -package protocol - -import ( - "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3_1types" - "github.com/smartcontractkit/libocr/offchainreporting2plus/types" -) - -type KeyValueStoreReadTransaction interface { - // The only read part of the interface that the plugin might see. The rest - // of the methods might only be called by protocol code. - ocr3_1types.KeyValueReader - KeyValueStoreSemanticRead - Discard() -} - -type KeyValueStoreSemanticRead interface { - // Returns the sequence number of which the state the transaction - // represents. Really read from the database here, no cached values allowed. - ReadHighestCommittedSeqNr() (uint64, error) - - ReadBlob(BlobDigest) ([]byte, error) - ReadBlobMeta(BlobDigest) (uint64, error) - ReadBlobChunk(BlobDigest, uint64) ([]byte, error) -} - -type KeyValueStoreReadWriteTransaction interface { - KeyValueStoreReadTransaction - // The only write part of the interface that the plugin might see. The rest - // of the methods might only be called by protocol code. - ocr3_1types.KeyValueReadWriter - KeyValueStoreSemanticWrite - // Commit writes the new highest committed sequence number to the magic key - // (if the transaction is _not_ unchecked) and commits the transaction to - // the key value store, then discards the transaction. - Commit() error -} - -type KeyValueStoreSemanticWrite interface { - // GetWriteSet returns a map from keys in string encoding to values that - // have been written in this transaction. If the value of a key has been - // deleted, it is mapped to nil. - - GetWriteSet() ([]KeyValuePair, error) - - // WriteHighestCommittedSeqNr writes the given sequence number to the magic - // key. It is called before Commit on checked transactions. - WriteHighestCommittedSeqNr(seqNr uint64) error - - WriteBlobMeta(BlobDigest, uint64) error - WriteBlobChunk(BlobDigest, uint64, []byte) error -} - -type KeyValuePair struct { - Key []byte - Value []byte - Deleted bool -} - -type KeyValueStore interface { - // Must error if the key value store is not ready to apply state transition - // for the given sequence number. Must update the highest committed sequence - // number magic key upon commit. Convenience method for synchronization - // between outcome generation & state persistence. - NewReadWriteTransaction(postSeqNr uint64) (KeyValueStoreReadWriteTransaction, error) - // Must error if the key value store is not ready to apply state transition - // for the given sequence number. Convenience method for synchronization - // between outcome generation & state persistence. - NewReadTransaction(postSeqNr uint64) (KeyValueStoreReadTransaction, error) - - // Unchecked transactions are useful when you don't care that the - // transaction state represents the kv state as of some particular sequence - // number, mostly when writing auxiliary data to the kv store. Unchecked - // transactions do not update the highest committed sequence number magic - // key upon commit, as would checked transactions. - NewReadWriteTransactionUnchecked() (KeyValueStoreReadWriteTransaction, error) - // Unchecked transactions are useful when you don't care that the - // transaction state represents the kv state as of some particular sequence - // number, mostly when reading auxiliary data from the kv store. - NewReadTransactionUnchecked() (KeyValueStoreReadTransaction, error) - - // Deprecated: Kept for convenience/small diff, consider using - // [KeyValueStoreSemanticRead.ReadHighestCommittedSeqNr] instead. - HighestCommittedSeqNr() (uint64, error) - Close() error -} - -type KeyValueStoreFactory interface { - NewKeyValueStore(configDigest types.ConfigDigest) (KeyValueStore, error) -} diff --git a/offchainreporting2plus/internal/ocr3_1/protocol/kvdb.go b/offchainreporting2plus/internal/ocr3_1/protocol/kvdb.go new file mode 100644 index 00000000..6437cb1c --- /dev/null +++ b/offchainreporting2plus/internal/ocr3_1/protocol/kvdb.go @@ -0,0 +1,181 @@ +package protocol + +import ( + "github.com/smartcontractkit/libocr/internal/jmt" + "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3_1types" + "github.com/smartcontractkit/libocr/offchainreporting2plus/types" +) + +type KeyValueDatabaseReadTransaction interface { + // The only read part of the interface that the plugin might see. The rest + // of the methods might only be called by protocol code. + ocr3_1types.KeyValueStateReader + KeyValueDatabaseSemanticRead + Discard() +} + +type KeyValueDatabaseSemanticRead interface { + // ReadHighestCommittedSeqNr returns the sequence number of which the state the transaction + // represents. Really read from the database here, no cached values allowed. + ReadHighestCommittedSeqNr() (uint64, error) + ReadLowestPersistedSeqNr() (uint64, error) + + ReadAttestedStateTransitionBlock(seqNr uint64) (AttestedStateTransitionBlock, error) + ReadAttestedStateTransitionBlocks(minSeqNr uint64, maxItems int) (blocks []AttestedStateTransitionBlock, more bool, err error) + + ReadTreeSyncStatus() (TreeSyncStatus, error) + // ReadTreeSyncChunk retrieves a chunk of undigested key-value pairs in the + // range [startIndex, requestEndInclIndex] of the key digest space. It + // returns a maximally sized chunk that fully covers the range [startIndex, + // endInclIndex], where endInclIndex <= requestEndInclIndex, such that the + // chunk respects the protocol.MaxTreeSyncChunkKeys and + // protocol.MaxTreeSyncChunkKeysPlusValuesLength limits. It also includes in + // boundingLeaves the subrange proof, proving inclusion of key-values in the + // range [startIndex, endInclIndex] without omissions. + ReadTreeSyncChunk( + toSeqNr uint64, + startIndex jmt.Digest, + requestEndInclIndex jmt.Digest, + ) ( + endInclIndex jmt.Digest, + boundingLeaves []jmt.BoundingLeaf, + keyValues []KeyValuePair, + err error, + ) + // ReadBlobPayload returns the payload of the blob if it exists in full and + // the blob has not expired. If the blob existed at some point but has since + // expired, it returns an error. If the blob never existed, it returns nil. + // If only some chunks are present, it returns an error. + ReadBlobPayload(BlobDigest) ([]byte, error) + ReadBlobMeta(BlobDigest) (*BlobMeta, error) + ReadBlobChunk(BlobDigest, uint64) ([]byte, error) + ReadStaleBlobIndex(maxStaleSinceSeqNr uint64, limit int) ([]StaleBlob, error) + + jmt.RootReader + jmt.NodeReader +} + +type KeyValueDatabaseReadWriteTransaction interface { + KeyValueDatabaseReadTransaction + // The only write part of the interface that the plugin might see. The rest + // of the methods might only be called by protocol code. + ocr3_1types.KeyValueStateReadWriter + KeyValueDatabaseSemanticWrite + // Commit writes the new highest committed sequence number to the magic key + // (if the transaction is _not_ unchecked) and commits the transaction to + // the key value store, then discards the transaction. + Commit() error +} + +type VerifyAndWriteTreeSyncChunkResult int + +const ( + _ VerifyAndWriteTreeSyncChunkResult = iota + VerifyAndWriteTreeSyncChunkResultOkNeedMore + VerifyAndWriteTreeSyncChunkResultOkComplete + VerifyAndWriteTreeSyncChunkResultByzantine + VerifyAndWriteTreeSyncChunkResultUnrelatedError +) + +type KeyValueDatabaseSemanticWrite interface { + // GetWriteSet returns a slice the KeyValuePair entries that + // have been written in this transaction. If the value of a key has been + // deleted, it the value is mapped to nil. + GetWriteSet() ([]KeyValuePairWithDeletions, error) + + // CloseWriteSet returns the state root, writes it to the KV store + // and closes the transaction for writing: any future attempts for Writes or Deletes + // on this transaction will fail. + CloseWriteSet() (StateRootDigest, error) + + // ApplyWriteSet applies the write set to the transaction and returns the + // state root digest. Useful for reproposals and state synchronization. Only + // works on checked transactions where the postSeqNr is specified at + // creation. + ApplyWriteSet(writeSet []KeyValuePairWithDeletions) (StateRootDigest, error) + + WriteAttestedStateTransitionBlock(seqNr uint64, block AttestedStateTransitionBlock) error + DeleteAttestedStateTransitionBlocks(maxSeqNrToDelete uint64, maxItems int) (done bool, err error) + + // WriteHighestCommittedSeqNr writes the given sequence number to the magic + // key. It is called before Commit on checked transactions. + WriteHighestCommittedSeqNr(seqNr uint64) error + WriteLowestPersistedSeqNr(seqNr uint64) error + // VerifyAndWriteTreeSyncChunk first verifies that the keyValues are fully + // and without omissions included in the key digest range of [startIndex, + // endInclIndex]. Only after doing so, it writes all keyValues into the tree + // and flat representation. + VerifyAndWriteTreeSyncChunk( + targetRootDigest StateRootDigest, + targetSeqNr uint64, + startIndex jmt.Digest, + endInclIndex jmt.Digest, + boundingLeaves []jmt.BoundingLeaf, + keyValues []KeyValuePair, + ) (VerifyAndWriteTreeSyncChunkResult, error) + + WriteTreeSyncStatus(state TreeSyncStatus) error + WriteBlobMeta(BlobDigest, BlobMeta) error + DeleteBlobMeta(BlobDigest) error + WriteBlobChunk(BlobDigest, uint64, []byte) error + DeleteBlobChunk(BlobDigest, uint64) error + WriteStaleBlobIndex(StaleBlob) error + DeleteStaleBlobIndex(StaleBlob) error + + jmt.RootWriter + DeleteRoots(minVersionToKeep jmt.Version, maxItems int) (done bool, err error) + + jmt.NodeWriter + jmt.StaleNodeWriter + DeleteStaleNodes(maxStaleSinceVersion jmt.Version, maxItems int) (done bool, err error) + + DestructiveDestroyForTreeSync(n int) (done bool, err error) +} + +type BlobMeta struct { + PayloadLength uint64 + ChunksHave []bool + ExpirySeqNr uint64 +} + +type StaleBlob struct { + StaleSinceSeqNr uint64 + BlobDigest BlobDigest +} + +type KeyValueDatabase interface { + // Must error if the key value store is not ready to apply state transition + // for the given sequence number. Must update the highest committed sequence + // number magic key upon commit. Convenience method for synchronization + // between outcome generation & state sync. + NewSerializedReadWriteTransaction(postSeqNr uint64) (KeyValueDatabaseReadWriteTransaction, error) + // Must error if the key value store is not ready to apply state transition + // for the given sequence number. Convenience method for synchronization + // between outcome generation & state sync. + NewReadTransaction(postSeqNr uint64) (KeyValueDatabaseReadTransaction, error) + + // Unchecked transactions are useful when you don't care that the + // transaction state represents the kv state as of some particular sequence + // number, mostly when writing auxiliary data to the kv store. Unchecked + // transactions do not update the highest committed sequence number magic + // key upon commit, as would checked transactions. + NewSerializedReadWriteTransactionUnchecked() (KeyValueDatabaseReadWriteTransaction, error) + // Unserialized transactions are guaranteed to commit. + // The protocol should make sure that there are no conflicts across potentially concurrent unserialized transactions, + // and if two unserialized transactions could actually have conflicts the protocol ensures that the are + // never opened concurrently. + NewUnserializedReadWriteTransactionUnchecked() (KeyValueDatabaseReadWriteTransaction, error) + // Unchecked transactions are useful when you don't care that the + // transaction state represents the kv state as of some particular sequence + // number, mostly when reading auxiliary data from the kv store. + NewReadTransactionUnchecked() (KeyValueDatabaseReadTransaction, error) + + // Deprecated: Kept for convenience/small diff, consider using + // [KeyValueDatabaseSemanticRead.ReadHighestCommittedSeqNr] instead. + HighestCommittedSeqNr() (uint64, error) + Close() error +} + +type KeyValueDatabaseFactory interface { + NewKeyValueDatabase(configDigest types.ConfigDigest) (KeyValueDatabase, error) +} diff --git a/offchainreporting2plus/internal/ocr3_1/protocol/message.go b/offchainreporting2plus/internal/ocr3_1/protocol/message.go index d93f8414..ff8f41b9 100644 --- a/offchainreporting2plus/internal/ocr3_1/protocol/message.go +++ b/offchainreporting2plus/internal/ocr3_1/protocol/message.go @@ -1,14 +1,18 @@ -package protocol // +package protocol import ( "crypto/ed25519" + "time" "github.com/smartcontractkit/libocr/commontypes" "github.com/smartcontractkit/libocr/internal/byzquorum" + "github.com/smartcontractkit/libocr/internal/jmt" "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3_1types" "github.com/smartcontractkit/libocr/offchainreporting2plus/types" ) +//go-sumtype:decl Message + // Message is the interface used to pass an inter-oracle message to the local // oracle process. type Message[RI any] interface { @@ -62,14 +66,14 @@ type MessageToReportAttestationWithSender[RI any] struct { sender commontypes.OracleID } -type MessageToStatePersistence[RI any] interface { +type MessageToStateSync[RI any] interface { Message[RI] - processStatePersistence(state *statePersistenceState[RI], sender commontypes.OracleID) + processStateSync(stasy *stateSyncState[RI], sender commontypes.OracleID) } -type MessageToStatePersistenceWithSender[RI any] struct { - msg MessageToStatePersistence[RI] +type MessageToStateSyncWithSender[RI any] struct { + msg MessageToStateSync[RI] sender commontypes.OracleID } @@ -383,74 +387,165 @@ func (msg MessageCertifiedCommit[RI]) processReportAttestation(repatt *reportAtt } type MessageBlockSyncRequest[RI any] struct { - RequestHandle types.RequestHandle // actual handle for outbound message, sentinel for inbound - HighestCommittedSeqNr uint64 - Nonce uint64 + RequestHandle types.RequestHandle // actual handle for outbound message, sentinel for inbound + StartSeqNr uint64 // a successful response must contain at least the block with this sequence number + EndExclSeqNr uint64 // the response may only contain sequence numbers less than this } -var _ MessageToStatePersistence[struct{}] = MessageBlockSyncRequest[struct{}]{} +var _ MessageToStateSync[struct{}] = MessageBlockSyncRequest[struct{}]{} func (msg MessageBlockSyncRequest[RI]) CheckSize(n int, f int, _ ocr3_1types.ReportingPluginLimits, _ int) bool { return true } func (msg MessageBlockSyncRequest[RI]) process(o *oracleState[RI], sender commontypes.OracleID) { - o.chNetToStatePersistence <- MessageToStatePersistenceWithSender[RI]{msg, sender} + o.chNetToStateSync <- MessageToStateSyncWithSender[RI]{msg, sender} } -func (msg MessageBlockSyncRequest[RI]) processStatePersistence(state *statePersistenceState[RI], sender commontypes.OracleID) { - state.messageBlockSyncReq(msg, sender) +func (msg MessageBlockSyncRequest[RI]) processStateSync(stasy *stateSyncState[RI], sender commontypes.OracleID) { + stasy.messageBlockSyncRequest(msg, sender) } -type MessageBlockSyncSummary[RI any] struct { - LowestPersistedSeqNr uint64 +type MessageStateSyncSummary[RI any] struct { + LowestPersistedSeqNr uint64 + HighestCommittedSeqNr uint64 } -var _ MessageToStatePersistence[struct{}] = MessageBlockSyncSummary[struct{}]{} +var _ MessageToStateSync[struct{}] = MessageStateSyncSummary[struct{}]{} -func (msg MessageBlockSyncSummary[RI]) CheckSize(n int, f int, _ ocr3_1types.ReportingPluginLimits, _ int) bool { +func (msg MessageStateSyncSummary[RI]) CheckSize(n int, f int, _ ocr3_1types.ReportingPluginLimits, _ int) bool { return true } -func (msg MessageBlockSyncSummary[RI]) process(o *oracleState[RI], sender commontypes.OracleID) { - o.chNetToStatePersistence <- MessageToStatePersistenceWithSender[RI]{msg, sender} +func (msg MessageStateSyncSummary[RI]) process(o *oracleState[RI], sender commontypes.OracleID) { + o.chNetToStateSync <- MessageToStateSyncWithSender[RI]{msg, sender} } -func (msg MessageBlockSyncSummary[RI]) processStatePersistence(state *statePersistenceState[RI], sender commontypes.OracleID) { - state.messageBlockSyncSummary(msg, sender) +func (msg MessageStateSyncSummary[RI]) processStateSync(stasy *stateSyncState[RI], sender commontypes.OracleID) { + stasy.messageStateSyncSummary(msg, sender) } -type MessageBlockSync[RI any] struct { +type MessageBlockSyncResponse[RI any] struct { RequestHandle types.RequestHandle // actual handle for outbound message, sentinel for inbound - AttestedStateTransitionBlocks []AttestedStateTransitionBlock - Nonce uint64 + RequestStartSeqNr uint64 + RequestEndExclSeqNr uint64 + AttestedStateTransitionBlocks []AttestedStateTransitionBlock // must be contiguous and (if non-empty) starting at RequestStartSeqNr +} + +var _ MessageToStateSync[struct{}] = MessageBlockSyncResponse[struct{}]{} + +func (msg MessageBlockSyncResponse[RI]) CheckSize(n int, f int, limits ocr3_1types.ReportingPluginLimits, maxReportSigLen int) bool { + if len(msg.AttestedStateTransitionBlocks) > MaxBlocksPerBlockSyncResponse { + return false + } + for _, astb := range msg.AttestedStateTransitionBlocks { + if !astb.CheckSize(n, f, limits) { + return false + } + } + return true +} + +func (msg MessageBlockSyncResponse[RI]) process(o *oracleState[RI], sender commontypes.OracleID) { + o.chNetToStateSync <- MessageToStateSyncWithSender[RI]{msg, sender} +} + +func (msg MessageBlockSyncResponse[RI]) processStateSync(stasy *stateSyncState[RI], sender commontypes.OracleID) { + stasy.messageBlockSyncResponse(msg, sender) +} + +type MessageTreeSyncChunkRequest[RI any] struct { + RequestHandle types.RequestHandle // actual handle for outbound message, sentinel for inbound + ToSeqNr uint64 + StartIndex jmt.Digest + EndInclIndex jmt.Digest +} + +var _ MessageToStateSync[struct{}] = MessageTreeSyncChunkRequest[struct{}]{} + +func (msg MessageTreeSyncChunkRequest[RI]) CheckSize(n int, f int, limits ocr3_1types.ReportingPluginLimits, maxReportSigLen int) bool { + return true +} + +func (msg MessageTreeSyncChunkRequest[RI]) process(o *oracleState[RI], sender commontypes.OracleID) { + o.chNetToStateSync <- MessageToStateSyncWithSender[RI]{msg, sender} } -var _ MessageToStatePersistence[struct{}] = MessageBlockSync[struct{}]{} +func (msg MessageTreeSyncChunkRequest[RI]) processStateSync(stasy *stateSyncState[RI], sender commontypes.OracleID) { + stasy.messageTreeSyncChunkRequest(msg, sender) +} -func (msg MessageBlockSync[RI]) CheckSize(n int, f int, _ ocr3_1types.ReportingPluginLimits, _ int) bool { +type MessageTreeSyncChunkResponse[RI any] struct { + RequestHandle types.RequestHandle // actual handle for outbound message, sentinel for inbound + ToSeqNr uint64 + StartIndex jmt.Digest + RequestEndInclIndex jmt.Digest + GoAway bool + EndInclIndex jmt.Digest + KeyValues []KeyValuePair + BoundingLeaves []jmt.BoundingLeaf +} + +var _ MessageToStateSync[struct{}] = MessageTreeSyncChunkResponse[struct{}]{} + +func (msg MessageTreeSyncChunkResponse[RI]) CheckSize(n int, f int, limits ocr3_1types.ReportingPluginLimits, maxReportSigLen int) bool { + if len(msg.BoundingLeaves) > jmt.MaxBoundingLeaves { + return false + } + for _, bl := range msg.BoundingLeaves { + if len(bl.Siblings) > jmt.MaxProofLength { + return false + } + } + if len(msg.KeyValues) > MaxTreeSyncChunkKeys { + return false + } + treeSyncChunkLeavesSize := 0 + for _, kv := range msg.KeyValues { + if len(kv.Key) > ocr3_1types.MaxMaxKeyValueKeyLength { + return false + } + if len(kv.Value) > ocr3_1types.MaxMaxKeyValueValueLength { + return false + } + treeSyncChunkLeavesSize += len(kv.Key) + len(kv.Value) + } + if treeSyncChunkLeavesSize > MaxTreeSyncChunkKeysPlusValuesLength { + return false + } return true } -func (msg MessageBlockSync[RI]) process(o *oracleState[RI], sender commontypes.OracleID) { - o.chNetToStatePersistence <- MessageToStatePersistenceWithSender[RI]{msg, sender} +func (msg MessageTreeSyncChunkResponse[RI]) process(o *oracleState[RI], sender commontypes.OracleID) { + o.chNetToStateSync <- MessageToStateSyncWithSender[RI]{msg, sender} } -func (msg MessageBlockSync[RI]) processStatePersistence(state *statePersistenceState[RI], sender commontypes.OracleID) { - state.messageBlockSync(msg, sender) +func (msg MessageTreeSyncChunkResponse[RI]) processStateSync(stasy *stateSyncState[RI], sender commontypes.OracleID) { + stasy.messageTreeSyncChunkResponse(msg, sender) +} + +type MessageBlobOfferRequestInfo struct { + ExpiryTimestamp time.Time } type MessageBlobOffer[RI any] struct { + RequestHandle types.RequestHandle // actual handle for outbound message, sentinel for inbound + RequestInfo *MessageBlobOfferRequestInfo ChunkDigests []BlobChunkDigest PayloadLength uint64 ExpirySeqNr uint64 - Submitter commontypes.OracleID } var _ MessageToBlobExchange[struct{}] = MessageBlobOffer[struct{}]{} -func (msg MessageBlobOffer[RI]) CheckSize(n int, f int, _ ocr3_1types.ReportingPluginLimits, _ int) bool { - return true // TODO: add proper size checks +func (msg MessageBlobOffer[RI]) CheckSize(n int, f int, limits ocr3_1types.ReportingPluginLimits, _ int) bool { + if msg.PayloadLength > uint64(limits.MaxBlobPayloadLength) { + return false + } + if uint64(len(msg.ChunkDigests)) != numChunks(msg.PayloadLength) { + return false + } + return true } func (msg MessageBlobOffer[RI]) process(o *oracleState[RI], sender commontypes.OracleID) { @@ -461,16 +556,48 @@ func (msg MessageBlobOffer[RI]) processBlobExchange(bex *blobExchangeState[RI], bex.messageBlobOffer(msg, sender) } -type MessageBlobChunkRequest[RI any] struct { +type MessageBlobOfferResponse[RI any] struct { RequestHandle types.RequestHandle // actual handle for outbound message, sentinel for inbound BlobDigest BlobDigest - ChunkIndex uint64 + RejectOffer bool + Signature BlobAvailabilitySignature +} + +var _ MessageToBlobExchange[struct{}] = MessageBlobOfferResponse[struct{}]{} + +func (msg MessageBlobOfferResponse[RI]) CheckSize(n int, f int, _ ocr3_1types.ReportingPluginLimits, _ int) bool { + if msg.RejectOffer { + return len(msg.Signature) == 0 + } else { + return len(msg.Signature) == ed25519.SignatureSize + } +} + +func (msg MessageBlobOfferResponse[RI]) process(o *oracleState[RI], sender commontypes.OracleID) { + o.chNetToBlobExchange <- MessageToBlobExchangeWithSender[RI]{msg, sender} +} + +func (msg MessageBlobOfferResponse[RI]) processBlobExchange(bex *blobExchangeState[RI], sender commontypes.OracleID) { + bex.messageBlobOfferResponse(msg, sender) +} + +type MessageBlobChunkRequestInfo struct { + ExpiryTimestamp time.Time +} + +type MessageBlobChunkRequest[RI any] struct { + RequestHandle types.RequestHandle // actual handle for outbound message, sentinel for inbound + + RequestInfo *MessageBlobChunkRequestInfo + + BlobDigest BlobDigest + ChunkIndex uint64 } var _ MessageToBlobExchange[struct{}] = MessageBlobChunkRequest[struct{}]{} func (msg MessageBlobChunkRequest[RI]) CheckSize(n int, f int, _ ocr3_1types.ReportingPluginLimits, _ int) bool { - return true // TODO: add proper size checks + return true } func (msg MessageBlobChunkRequest[RI]) process(o *oracleState[RI], sender commontypes.OracleID) { @@ -486,13 +613,17 @@ type MessageBlobChunkResponse[RI any] struct { BlobDigest BlobDigest ChunkIndex uint64 + GoAway bool Chunk []byte } var _ MessageToBlobExchange[struct{}] = MessageBlobChunkResponse[struct{}]{} func (msg MessageBlobChunkResponse[RI]) CheckSize(n int, f int, _ ocr3_1types.ReportingPluginLimits, _ int) bool { - return true // TODO: add proper size checks + if len(msg.Chunk) > BlobChunkSize { + return false + } + return true } func (msg MessageBlobChunkResponse[RI]) process(o *oracleState[RI], sender commontypes.OracleID) { @@ -502,22 +633,3 @@ func (msg MessageBlobChunkResponse[RI]) process(o *oracleState[RI], sender commo func (msg MessageBlobChunkResponse[RI]) processBlobExchange(bex *blobExchangeState[RI], sender commontypes.OracleID) { bex.messageBlobChunkResponse(msg, sender) } - -type MessageBlobAvailable[RI any] struct { - BlobDigest BlobDigest - Signature BlobAvailabilitySignature -} - -var _ MessageToBlobExchange[struct{}] = MessageBlobAvailable[struct{}]{} - -func (msg MessageBlobAvailable[RI]) CheckSize(n int, f int, _ ocr3_1types.ReportingPluginLimits, _ int) bool { - return true // TODO: add proper size checks -} - -func (msg MessageBlobAvailable[RI]) process(o *oracleState[RI], sender commontypes.OracleID) { - o.chNetToBlobExchange <- MessageToBlobExchangeWithSender[RI]{msg, sender} -} - -func (msg MessageBlobAvailable[RI]) processBlobExchange(bex *blobExchangeState[RI], sender commontypes.OracleID) { - bex.messageBlobAvailable(msg, sender) -} diff --git a/offchainreporting2plus/internal/ocr3_1/protocol/oracle.go b/offchainreporting2plus/internal/ocr3_1/protocol/oracle.go index 03e3d625..2d4123d1 100644 --- a/offchainreporting2plus/internal/ocr3_1/protocol/oracle.go +++ b/offchainreporting2plus/internal/ocr3_1/protocol/oracle.go @@ -29,7 +29,8 @@ func RunOracle[RI any]( contractTransmitter ocr3types.ContractTransmitter[RI], database Database, id commontypes.OracleID, - kvStore KeyValueStore, + kvDb KeyValueDatabase, + limits ocr3_1types.ReportingPluginLimits, localConfig types.LocalConfig, logger loghelper.LoggerWithContext, metricsRegisterer prometheus.Registerer, @@ -47,7 +48,8 @@ func RunOracle[RI any]( contractTransmitter: contractTransmitter, database: database, id: id, - kvStore: kvStore, + kvDb: kvDb, + limits: limits, localConfig: localConfig, logger: logger, metricsRegisterer: metricsRegisterer, @@ -68,7 +70,8 @@ type oracleState[RI any] struct { contractTransmitter ocr3types.ContractTransmitter[RI] database Database id commontypes.OracleID - kvStore KeyValueStore + kvDb KeyValueDatabase + limits ocr3_1types.ReportingPluginLimits localConfig types.LocalConfig logger loghelper.LoggerWithContext metricsRegisterer prometheus.Registerer @@ -81,7 +84,7 @@ type oracleState[RI any] struct { chNetToPacemaker chan<- MessageToPacemakerWithSender[RI] chNetToOutcomeGeneration chan<- MessageToOutcomeGenerationWithSender[RI] chNetToReportAttestation chan<- MessageToReportAttestationWithSender[RI] - chNetToStatePersistence chan<- MessageToStatePersistenceWithSender[RI] + chNetToStateSync chan<- MessageToStateSyncWithSender[RI] chNetToBlobExchange chan<- MessageToBlobExchangeWithSender[RI] childCancel context.CancelFunc childCtx context.Context @@ -110,7 +113,7 @@ type oracleState[RI any] struct { // │ request│ │ │ // ▼ │ ▼ ▼ // ┌──────┐ ┌──┴───────────────┐ ┌─────────────────┐ -// │Oracle│◄────────────►│Outcome Generation│◄────────────►│State Persistence│ +// │Oracle│◄────────────►│Outcome Generation│◄────────────►│State Sync │ // └──────┘ out.gen. └──────┬───────────┘ └─────────────────┘ // ▲ message │ ▲ // │ │certified │ @@ -164,11 +167,11 @@ func (o *oracleState[RI]) run() { chReportAttestationToTransmission := make(chan EventToTransmission[RI]) - chNetToStatePersistence := make(chan MessageToStatePersistenceWithSender[RI]) - o.chNetToStatePersistence = chNetToStatePersistence + chNetToStateSync := make(chan MessageToStateSyncWithSender[RI]) + o.chNetToStateSync = chNetToStateSync - chOutcomeGenerationToStatePersistence := make(chan EventToStatePersistence[RI]) - chReportAttestationToStatePersistence := make(chan EventToStatePersistence[RI]) + chOutcomeGenerationToStateSync := make(chan EventToStateSync[RI]) + chReportAttestationToStateSync := make(chan EventToStateSync[RI]) chNetToBlobExchange := make(chan MessageToBlobExchangeWithSender[RI]) o.chNetToBlobExchange = chNetToBlobExchange @@ -177,42 +180,28 @@ func (o *oracleState[RI]) run() { // communication between blob exchange and blob endpoint chBlobBroadcastRequest := make(chan blobBroadcastRequest) - chBlobBroadcastResponse := make(chan blobBroadcastResponse) - chBlobFetchRequest := make(chan blobFetchRequest) - chBlobFetchResponse := make(chan blobFetchResponse) // be careful if you want to change anything here. // chNetTo* sends in message.go assume that their recipients are running. o.childCtx, o.childCancel = context.WithCancel(context.Background()) defer o.childCancel() - defer o.kvStore.Close() + defer o.kvDb.Close() - paceState, cert, statePersistenceState, err := o.restoreFromDatabase() + paceState, cert, err := o.restoreFromDatabase() if err != nil { o.logger.Error("restoreFromDatabase returned an error, exiting oracle", commontypes.LogFields{ "error": err, }) return } - highestCommittedToKVdSeqNr, err := o.kvStore.HighestCommittedSeqNr() - if err != nil { - o.logger.Error("cannot read highest committed seqNr from key value store, exiting oracle", - commontypes.LogFields{ - "error": err, - }) - return - } blobEndpoint := BlobEndpoint{ o.childCtx, chBlobBroadcastRequest, - chBlobBroadcastResponse, - chBlobFetchRequest, - chBlobFetchResponse, } o.blobEndpointWrapper.setBlobEndpoint(&blobEndpoint) // pass through to plugin @@ -244,12 +233,12 @@ func (o *oracleState[RI]) run() { chPacemakerToOutcomeGeneration, chOutcomeGenerationToPacemaker, chOutcomeGenerationToReportAttestation, - chOutcomeGenerationToStatePersistence, + chOutcomeGenerationToStateSync, &blobEndpoint, o.config, o.database, o.id, - o.kvStore, + o.kvDb, o.localConfig, o.logger, o.metricsRegisterer, @@ -268,7 +257,7 @@ func (o *oracleState[RI]) run() { chNetToReportAttestation, chOutcomeGenerationToReportAttestation, - chReportAttestationToStatePersistence, + chReportAttestationToStateSync, chReportAttestationToTransmission, o.config, o.contractTransmitter, @@ -280,21 +269,19 @@ func (o *oracleState[RI]) run() { }) o.subprocesses.Go(func() { - RunStatePersistence[RI]( + RunStateSync[RI]( o.childCtx, - chNetToStatePersistence, - chOutcomeGenerationToStatePersistence, - chReportAttestationToStatePersistence, + chNetToStateSync, + chOutcomeGenerationToStateSync, + chReportAttestationToStateSync, o.config, o.database, o.id, - o.kvStore, + o.kvDb, o.logger, o.netEndpoint, o.reportingPlugin, - statePersistenceState, - highestCommittedToKVdSeqNr, ) }) @@ -320,14 +307,12 @@ func (o *oracleState[RI]) run() { chOutcomeGenerationToBlobExchange, chBlobBroadcastRequest, - chBlobBroadcastResponse, - chBlobFetchRequest, - chBlobFetchResponse, o.config, - o.kvStore, + o.kvDb, o.id, + o.limits, o.localConfig, o.logger, o.metricsRegisterer, @@ -399,7 +384,7 @@ func tryUntilSuccess[T any](ctx context.Context, logger commontypes.Logger, retr } } -func (o *oracleState[RI]) restoreFromDatabase() (PacemakerState, CertifiedPrepareOrCommit, StatePersistenceState, error) { +func (o *oracleState[RI]) restoreFromDatabase() (PacemakerState, CertifiedPrepareOrCommit, error) { const retryPeriod = 5 * time.Second paceState, err := tryUntilSuccess[PacemakerState]( @@ -413,7 +398,7 @@ func (o *oracleState[RI]) restoreFromDatabase() (PacemakerState, CertifiedPrepar }, ) if err != nil { - return PacemakerState{}, nil, StatePersistenceState{}, err + return PacemakerState{}, nil, err } o.logger.Info("restoreFromDatabase: successfully restored pacemaker state", commontypes.LogFields{ @@ -431,7 +416,7 @@ func (o *oracleState[RI]) restoreFromDatabase() (PacemakerState, CertifiedPrepar }, ) if err != nil { - return PacemakerState{}, nil, StatePersistenceState{}, err + return PacemakerState{}, nil, err } if cert != nil { @@ -443,23 +428,5 @@ func (o *oracleState[RI]) restoreFromDatabase() (PacemakerState, CertifiedPrepar cert = &CertifiedCommit{} } - statePersistenceState, err := tryUntilSuccess[StatePersistenceState]( - o.ctx, - o.logger, - retryPeriod, - o.localConfig.DatabaseTimeout, - "Database.ReadStatePersistenceState", - func(ctx context.Context) (StatePersistenceState, error) { - return o.database.ReadStatePersistenceState(ctx, o.config.ConfigDigest) - }, - ) - if err != nil { - return PacemakerState{}, nil, StatePersistenceState{}, err - } - - o.logger.Info("restoreFromDatabase: successfully restored state persistence state", commontypes.LogFields{ - "state": statePersistenceState, - }) - - return paceState, cert, statePersistenceState, nil + return paceState, cert, nil } diff --git a/offchainreporting2plus/internal/ocr3_1/protocol/outcome_generation.go b/offchainreporting2plus/internal/ocr3_1/protocol/outcome_generation.go index dcdc9278..f730118e 100644 --- a/offchainreporting2plus/internal/ocr3_1/protocol/outcome_generation.go +++ b/offchainreporting2plus/internal/ocr3_1/protocol/outcome_generation.go @@ -33,12 +33,12 @@ func RunOutcomeGeneration[RI any]( chPacemakerToOutcomeGeneration <-chan EventToOutcomeGeneration[RI], chOutcomeGenerationToPacemaker chan<- EventToPacemaker[RI], chOutcomeGenerationToReportAttestation chan<- EventToReportAttestation[RI], - chOutcomeGenerationToStatePersistence chan<- EventToStatePersistence[RI], + chOutcomeGenerationToStateSync chan<- EventToStateSync[RI], blobBroadcastFetcher ocr3_1types.BlobBroadcastFetcher, config ocr3config.SharedConfig, database Database, id commontypes.OracleID, - kvStore KeyValueStore, + kvDb KeyValueDatabase, localConfig types.LocalConfig, logger loghelper.LoggerWithContext, metricsRegisterer prometheus.Registerer, @@ -59,12 +59,12 @@ func RunOutcomeGeneration[RI any]( chPacemakerToOutcomeGeneration: chPacemakerToOutcomeGeneration, chOutcomeGenerationToPacemaker: chOutcomeGenerationToPacemaker, chOutcomeGenerationToReportAttestation: chOutcomeGenerationToReportAttestation, - chOutcomeGenerationToStatePersistence: chOutcomeGenerationToStatePersistence, + chOutcomeGenerationToStateSync: chOutcomeGenerationToStateSync, blobBroadcastFetcher: blobBroadcastFetcher, config: config, database: database, id: id, - kvStore: kvStore, + kvDb: kvDb, localConfig: localConfig, logger: logger.MakeUpdated(commontypes.LogFields{"proto": "outgen"}), metrics: newOutcomeGenerationMetrics(metricsRegisterer, logger), @@ -85,12 +85,12 @@ type outcomeGenerationState[RI any] struct { chPacemakerToOutcomeGeneration <-chan EventToOutcomeGeneration[RI] chOutcomeGenerationToPacemaker chan<- EventToPacemaker[RI] chOutcomeGenerationToReportAttestation chan<- EventToReportAttestation[RI] - chOutcomeGenerationToStatePersistence chan<- EventToStatePersistence[RI] + chOutcomeGenerationToStateSync chan<- EventToStateSync[RI] blobBroadcastFetcher ocr3_1types.BlobBroadcastFetcher config ocr3config.SharedConfig database Database id commontypes.OracleID - kvStore KeyValueStore + kvDb KeyValueDatabase localConfig types.LocalConfig logger loghelper.LoggerWithContext metrics *outcomeGenerationMetrics @@ -138,7 +138,7 @@ type followerState[RI any] struct { stateTransitionInfo stateTransitionInfo - openKVTxn KeyValueStoreReadWriteTransaction + openKVTxn KeyValueDatabaseReadWriteTransaction // lock @@ -152,6 +152,7 @@ type stateTransitionInfo struct { InputsDigest StateTransitionInputsDigest Outputs StateTransitionOutputs OutputDigest StateTransitionOutputDigest + StateRootDigest StateRootDigest ReportsPlusPrecursor ocr3_1types.ReportsPlusPrecursor ReportsPlusPrecursorDigest ReportsPlusPrecursorDigest } diff --git a/offchainreporting2plus/internal/ocr3_1/protocol/outcome_generation_follower.go b/offchainreporting2plus/internal/ocr3_1/protocol/outcome_generation_follower.go index d87036b5..4ec7a019 100644 --- a/offchainreporting2plus/internal/ocr3_1/protocol/outcome_generation_follower.go +++ b/offchainreporting2plus/internal/ocr3_1/protocol/outcome_generation_follower.go @@ -3,12 +3,14 @@ package protocol import ( "bytes" "context" + "sync" "github.com/smartcontractkit/libocr/commontypes" "github.com/smartcontractkit/libocr/internal/loghelper" "github.com/smartcontractkit/libocr/offchainreporting2plus/internal/common/pool" "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3_1types" "github.com/smartcontractkit/libocr/offchainreporting2plus/types" + "github.com/smartcontractkit/libocr/subprocesses" ) type outgenFollowerPhase string @@ -22,6 +24,7 @@ const ( outgenFollowerPhaseBackgroundProposalStateTransition outgenFollowerPhase = "backgroundProposalStateTransition" outgenFollowerPhaseSentPrepare outgenFollowerPhase = "sentPrepare" outgenFollowerPhaseSentCommit outgenFollowerPhase = "sentCommit" + outgenFollowerPhaseBackgroundCommitted outgenFollowerPhase = "backgroundCommitted" ) func (outgen *outcomeGenerationState[RI]) eventTInitialTimeout() { @@ -84,7 +87,7 @@ func (outgen *outcomeGenerationState[RI]) messageEpochStart(msg MessageEpochStar outgen.refreshCommittedSeqNrAndCert() if !outgen.ensureHighestCertifiedIsCompatible(msg.EpochStartProof.HighestCertified, "MessageEpochStart") { select { - case outgen.chOutcomeGenerationToStatePersistence <- EventStateSyncRequest[RI]{ + case outgen.chOutcomeGenerationToStateSync <- EventStateSyncRequest[RI]{ msg.EpochStartProof.HighestCertified.SeqNr(), }: case <-outgen.ctx.Done(): @@ -123,6 +126,7 @@ func (outgen *outcomeGenerationState[RI]) messageEpochStart(msg MessageEpochStar if prepareQcSeqNr == outgen.sharedState.committedSeqNr { stateTransitionInputsDigest := prepareQc.StateTransitionInputsDigest + stateRootDigest := prepareQc.StateRootDigest stateTransitionOutputDigest := MakeStateTransitionOutputDigest( outgen.ID(), @@ -141,6 +145,7 @@ func (outgen *outcomeGenerationState[RI]) messageEpochStart(msg MessageEpochStar prepareQcSeqNr, stateTransitionInputsDigest, stateTransitionOutputDigest, + stateRootDigest, reportPlusPrecursorDigest, outgen.offchainKeyring.OffchainSign, ) @@ -157,6 +162,7 @@ func (outgen *outcomeGenerationState[RI]) messageEpochStart(msg MessageEpochStar stateTransitionInputsDigest, prepareQc.StateTransitionOutputs, stateTransitionOutputDigest, + stateRootDigest, prepareQc.ReportsPlusPrecursor, reportPlusPrecursorDigest, } @@ -180,7 +186,7 @@ func (outgen *outcomeGenerationState[RI]) messageEpochStart(msg MessageEpochStar "seqNr": outgen.sharedState.seqNr, }) - kvReadWriteTxn, err := outgen.kvStore.NewReadWriteTransaction(prepareQcSeqNr) + kvReadWriteTxn, err := outgen.kvDb.NewSerializedReadWriteTransaction(prepareQcSeqNr) if err != nil { outgen.logger.Warn("could not create kv read/write transaction", commontypes.LogFields{ "seqNr": outgen.sharedState.seqNr, @@ -198,6 +204,7 @@ func (outgen *outcomeGenerationState[RI]) messageEpochStart(msg MessageEpochStar inputsDigest := prepareQc.StateTransitionInputsDigest writeSet := prepareQc.StateTransitionOutputs.WriteSet reportsPlusPrecursor := prepareQc.ReportsPlusPrecursor + stateRootDigest := prepareQc.StateRootDigest outgen.subs.Go(func() { outgen.backgroundProposalStateTransition( @@ -208,6 +215,7 @@ func (outgen *outcomeGenerationState[RI]) messageEpochStart(msg MessageEpochStar inputsDigest, writeSet, + stateRootDigest, reportsPlusPrecursor, types.AttributedQuery{}, @@ -324,7 +332,7 @@ func (outgen *outcomeGenerationState[RI]) tryProcessRoundStartPool() { *outgen.followerState.query, outgen.sharedState.l, } - kvReadTxn, err := outgen.kvStore.NewReadTransaction(roundCtx.SeqNr) + kvReadTxn, err := outgen.kvDb.NewReadTransaction(roundCtx.SeqNr) if err != nil { outgen.logger.Warn("failed to create new transaction, aborting tryProcessRoundStartPool", commontypes.LogFields{ "seqNr": roundCtx.SeqNr, @@ -343,7 +351,7 @@ func (outgen *outcomeGenerationState[RI]) backgroundObservation( logger loghelper.LoggerWithContext, roundCtx RoundContext, aq types.AttributedQuery, - kvReadTxn KeyValueStoreReadTransaction, + kvReadTxn KeyValueDatabaseReadTransaction, ) { observation, ok := callPluginFromOutcomeGenerationBackground[types.Observation]( ctx, @@ -352,7 +360,15 @@ func (outgen *outcomeGenerationState[RI]) backgroundObservation( outgen.config.MaxDurationObservation, roundCtx, func(ctx context.Context, roundCtx RoundContext) (types.Observation, error) { - return outgen.reportingPlugin.Observation(ctx, roundCtx.SeqNr, aq, kvReadTxn, outgen.blobBroadcastFetcher) + return outgen.reportingPlugin.Observation(ctx, + roundCtx.SeqNr, + aq, + kvReadTxn, + NewRoundBlobBroadcastFetcher( + roundCtx.SeqNr, + outgen.blobBroadcastFetcher, + ), + ) }, ) kvReadTxn.Discard() @@ -364,7 +380,7 @@ func (outgen *outcomeGenerationState[RI]) backgroundObservation( case outgen.chLocalEvent <- EventComputedObservation[RI]{ roundCtx.Epoch, roundCtx.SeqNr, - aq.Query, + aq, observation, }: case <-ctx.Done(): @@ -389,7 +405,7 @@ func (outgen *outcomeGenerationState[RI]) eventComputedObservation(ev EventCompu return } - so, err := MakeSignedObservation(outgen.ID(), outgen.sharedState.seqNr, ev.Query, ev.Observation, outgen.offchainKeyring.OffchainSign) + so, err := MakeSignedObservation(outgen.ID(), outgen.sharedState.seqNr, ev.AttributedQuery, ev.Observation, outgen.offchainKeyring.OffchainSign) if err != nil { outgen.logger.Error("MakeSignedObservation returned error", commontypes.LogFields{ "seqNr": outgen.sharedState.seqNr, @@ -398,7 +414,7 @@ func (outgen *outcomeGenerationState[RI]) eventComputedObservation(ev EventCompu return } - if err := so.Verify(outgen.ID(), outgen.sharedState.seqNr, ev.Query, outgen.offchainKeyring.OffchainPublicKey()); err != nil { + if err := so.Verify(outgen.ID(), outgen.sharedState.seqNr, ev.AttributedQuery, outgen.offchainKeyring.OffchainPublicKey()); err != nil { outgen.logger.Error("MakeSignedObservation produced invalid signature", commontypes.LogFields{ "seqNr": outgen.sharedState.seqNr, "error": err, @@ -490,7 +506,7 @@ func (outgen *outcomeGenerationState[RI]) tryProcessProposalPool() { outgen.followerState.phase = outgenFollowerPhaseBackgroundProposalStateTransition - kvReadWriteTxn, err := outgen.kvStore.NewReadWriteTransaction(outgen.sharedState.seqNr) + kvReadWriteTxn, err := outgen.kvDb.NewSerializedReadWriteTransaction(outgen.sharedState.seqNr) if err != nil { outgen.logger.Warn("could not create kv read/write transaction", commontypes.LogFields{ "seqNr": outgen.sharedState.seqNr, @@ -520,6 +536,7 @@ func (outgen *outcomeGenerationState[RI]) tryProcessProposalPool() { StateTransitionInputsDigest{}, nil, + StateRootDigest{}, nil, aq, @@ -537,12 +554,13 @@ func (outgen *outcomeGenerationState[RI]) backgroundProposalStateTransition( roundCtx RoundContext, stateTransitionInputsDigest StateTransitionInputsDigest, - writeSet []KeyValuePair, + writeSet []KeyValuePairWithDeletions, + stateRootDigest StateRootDigest, reportsPlusPrecursor ocr3_1types.ReportsPlusPrecursor, aq types.AttributedQuery, asos []AttributedSignedObservation, - kvReadWriteTxn KeyValueStoreReadWriteTransaction, + kvReadWriteTxn KeyValueDatabaseReadWriteTransaction, ) { shouldDiscardKVTxn := true defer func() { @@ -553,7 +571,7 @@ func (outgen *outcomeGenerationState[RI]) backgroundProposalStateTransition( if asos != nil { - aos, ok := outgen.checkAttributedSignedObservations(ctx, logger, ogid, roundCtx, aq, asos, kvReadWriteTxn) + aos, ok := outgen.backgroundCheckAttributedSignedObservations(ctx, logger, ogid, roundCtx, aq, asos, kvReadWriteTxn) if !ok { return } @@ -564,7 +582,17 @@ func (outgen *outcomeGenerationState[RI]) backgroundProposalStateTransition( 0, // StateTransition is a pure function and should finish "instantly" roundCtx, func(ctx context.Context, roundCtx RoundContext) (ocr3_1types.ReportsPlusPrecursor, error) { - return outgen.reportingPlugin.StateTransition(ctx, roundCtx.SeqNr, aq, aos, kvReadWriteTxn, outgen.blobBroadcastFetcher) + return outgen.reportingPlugin.StateTransition( + ctx, + roundCtx.SeqNr, + aq, + aos, + kvReadWriteTxn, + NewRoundBlobBroadcastFetcher( + roundCtx.SeqNr, + outgen.blobBroadcastFetcher, + ), + ) }, ) if !ok { @@ -574,7 +602,7 @@ func (outgen *outcomeGenerationState[RI]) backgroundProposalStateTransition( stateTransitionInputsDigest = MakeStateTransitionInputsDigest( ogid, roundCtx.SeqNr, - aq.Query, + aq, aos, ) @@ -587,23 +615,31 @@ func (outgen *outcomeGenerationState[RI]) backgroundProposalStateTransition( }) return } + stateRootDigest, err = kvReadWriteTxn.CloseWriteSet() + if err != nil { + outgen.logger.Warn("failed to close the transaction WriteSet", commontypes.LogFields{ + "seqNr": outgen.sharedState.seqNr, + "error": err, + }) + return + } } else { // apply write set instead of executing StateTransition - for _, m := range writeSet { - var err error - if m.Deleted { - err = kvReadWriteTxn.Delete(m.Key) - } else { - err = kvReadWriteTxn.Write(m.Key, m.Value) - } - if err != nil { - logger.Error("failed to write write-set modification", commontypes.LogFields{ - "seqNr": outgen.sharedState.seqNr, - "error": err, - }) - return - } + localStateRootDigest, err := kvReadWriteTxn.ApplyWriteSet(writeSet) + if err != nil { + outgen.logger.Warn("failed to apply write set to kv read/write transaction", commontypes.LogFields{ + "seqNr": outgen.sharedState.seqNr, + "error": err, + }) + return + } + if localStateRootDigest != stateRootDigest { + logger.Error("StateRootDigest mismatch", commontypes.LogFields{ + "localStateRootDigest": localStateRootDigest, + "receivedStateRootDigest": stateRootDigest, + }) + return } } @@ -621,6 +657,7 @@ func (outgen *outcomeGenerationState[RI]) backgroundProposalStateTransition( stateTransitionInputsDigest, stateTransitionOutputs, stateTransitionOutputDigest, + stateRootDigest, reportsPlusPrecursor, reportsPlusPrecursorDigest, }, @@ -648,13 +685,14 @@ func (outgen *outcomeGenerationState[RI]) eventComputedProposalStateTransition(e return } - outgen.followerState.openKVTxn = ev.KeyValueStoreReadWriteTransaction + outgen.followerState.openKVTxn = ev.KeyValueDatabaseReadWriteTransaction prepareSignature, err := MakePrepareSignature( outgen.ID(), outgen.sharedState.seqNr, ev.stateTransitionInfo.InputsDigest, ev.stateTransitionInfo.OutputDigest, + ev.stateTransitionInfo.StateRootDigest, ev.stateTransitionInfo.ReportsPlusPrecursorDigest, outgen.offchainKeyring.OffchainSign, ) @@ -739,6 +777,7 @@ func (outgen *outcomeGenerationState[RI]) tryProcessPreparePool() { outgen.sharedState.seqNr, outgen.followerState.stateTransitionInfo.InputsDigest, outgen.followerState.stateTransitionInfo.OutputDigest, + outgen.followerState.stateTransitionInfo.StateRootDigest, outgen.followerState.stateTransitionInfo.ReportsPlusPrecursorDigest, outgen.config.OracleIdentities[sender].OffchainPublicKey, ) @@ -775,6 +814,7 @@ func (outgen *outcomeGenerationState[RI]) tryProcessPreparePool() { outgen.sharedState.seqNr, outgen.followerState.stateTransitionInfo.InputsDigest, outgen.followerState.stateTransitionInfo.OutputDigest, + outgen.followerState.stateTransitionInfo.StateRootDigest, outgen.followerState.stateTransitionInfo.ReportsPlusPrecursorDigest, outgen.offchainKeyring.OffchainSign, ) @@ -791,6 +831,7 @@ func (outgen *outcomeGenerationState[RI]) tryProcessPreparePool() { outgen.sharedState.seqNr, outgen.followerState.stateTransitionInfo.InputsDigest, outgen.followerState.stateTransitionInfo.Outputs, + outgen.followerState.stateTransitionInfo.StateRootDigest, outgen.followerState.stateTransitionInfo.ReportsPlusPrecursor, prepareQuorumCertificate, }) { @@ -869,6 +910,7 @@ func (outgen *outcomeGenerationState[RI]) tryProcessCommitPool() { outgen.sharedState.seqNr, outgen.followerState.stateTransitionInfo.InputsDigest, outgen.followerState.stateTransitionInfo.OutputDigest, + outgen.followerState.stateTransitionInfo.StateRootDigest, outgen.followerState.stateTransitionInfo.ReportsPlusPrecursorDigest, outgen.config.OracleIdentities[sender].OffchainPublicKey, ) @@ -909,6 +951,7 @@ func (outgen *outcomeGenerationState[RI]) tryProcessCommitPool() { outgen.sharedState.seqNr, outgen.followerState.stateTransitionInfo.InputsDigest, outgen.followerState.stateTransitionInfo.Outputs, + outgen.followerState.stateTransitionInfo.StateRootDigest, outgen.followerState.stateTransitionInfo.ReportsPlusPrecursor, commitQuorumCertificate, }) @@ -932,7 +975,7 @@ func (outgen *outcomeGenerationState[RI]) tryProcessCommitPool() { }) { - kvSeqNr, err := outgen.kvStore.HighestCommittedSeqNr() + kvSeqNr, err := outgen.kvDb.HighestCommittedSeqNr() if err != nil { outgen.logger.Error("failed to validate kv commit post-condition, upon kv commit failure", commontypes.LogFields{ "seqNr": outgen.sharedState.seqNr, @@ -952,6 +995,95 @@ func (outgen *outcomeGenerationState[RI]) tryProcessCommitPool() { } } + kvReadTxn, err := outgen.kvDb.NewReadTransaction(outgen.sharedState.seqNr + 1) + if err != nil { + outgen.logger.Warn("skipping call to ReportingPlugin.Committed", commontypes.LogFields{ + "seqNr": outgen.sharedState.seqNr, + "error": err, + }) + outgen.completeRound() + return + } + + outgen.followerState.phase = outgenFollowerPhaseBackgroundCommitted + + { + ctx := outgen.epochCtx + logger := outgen.logger + roundCtx := RoundContext{ + outgen.sharedState.seqNr, + outgen.sharedState.e, + outgen.sharedState.seqNr - outgen.sharedState.firstSeqNrOfEpoch + 1, + } + kvReadTxn := kvReadTxn + outgen.subs.Go(func() { + outgen.backgroundCommitted( + ctx, + logger, + roundCtx, + kvReadTxn, + ) + }) + } +} + +func (outgen *outcomeGenerationState[RI]) backgroundCommitted( + ctx context.Context, + logger loghelper.LoggerWithContext, + roundCtx RoundContext, + kvReadTxn KeyValueDatabaseReadTransaction, +) { + _, ok := callPluginFromOutcomeGenerationBackground[error]( + ctx, + logger, + "Committed", + 0, // Committed is a pure function and should finish "instantly" + roundCtx, + func(ctx context.Context, roundCtx RoundContext) (error, error) { + return outgen.reportingPlugin.Committed(ctx, roundCtx.SeqNr, kvReadTxn), nil + }, + ) + kvReadTxn.Discard() + + if !ok { + outgen.logger.Info("continuing after ReportingPlugin.Committed returned an error", commontypes.LogFields{ + "seqNr": outgen.sharedState.seqNr, + }) + + } + + select { + case outgen.chLocalEvent <- EventComputedCommitted[RI]{ + roundCtx.Epoch, + roundCtx.SeqNr, + }: + case <-ctx.Done(): + } +} + +func (outgen *outcomeGenerationState[RI]) eventComputedCommitted(ev EventComputedCommitted[RI]) { + if ev.Epoch != outgen.sharedState.e || ev.SeqNr != outgen.sharedState.seqNr { + outgen.logger.Debug("discarding EventComputedCommitted from old round", commontypes.LogFields{ + "seqNr": outgen.sharedState.seqNr, + "evEpoch": ev.Epoch, + "evSeqNr": ev.SeqNr, + }) + return + } + + if outgen.followerState.phase != outgenFollowerPhaseBackgroundCommitted { + outgen.logger.Debug("discarding EventComputedCommitted, wrong phase", commontypes.LogFields{ + "seqNr": outgen.sharedState.seqNr, + "phase": outgen.followerState.phase, + }) + return + } + + outgen.completeRound() +} + +func (outgen *outcomeGenerationState[RI]) completeRound() { + if uint64(outgen.config.RMax) <= outgen.sharedState.seqNr-outgen.sharedState.firstSeqNrOfEpoch+1 { outgen.logger.Debug("epoch has been going on for too long, sending EventChangeLeader to Pacemaker", commontypes.LogFields{ "firstSeqNrOfEpoch": outgen.sharedState.firstSeqNrOfEpoch, @@ -1053,6 +1185,7 @@ func (outgen *outcomeGenerationState[RI]) commit(commit CertifiedCommit) (persis commit.SeqNr(), commit.StateTransitionOutputs.WriteSet, ), + commit.StateRootDigest, reportsPlusPrecursor, commit.CommitQuorumCertificate, }, @@ -1091,25 +1224,89 @@ func (outgen *outcomeGenerationState[RI]) persistAndUpdateCertIfGreater(cert Cer return true } +func (outgen *outcomeGenerationState[RI]) backgroundCheckAttributedSignedObservation( + ctx context.Context, + logger loghelper.LoggerWithContext, + ogid OutcomeGenerationID, + roundCtx RoundContext, + aq types.AttributedQuery, + aso AttributedSignedObservation, + kvReader ocr3_1types.KeyValueStateReader, // we don't discard the kvReader in this function because it is managed further up the call stack +) bool { + if err := aso.SignedObservation.Verify(ogid, roundCtx.SeqNr, aq, outgen.config.OracleIdentities[aso.Observer].OffchainPublicKey); err != nil { + logger.Warn("dropping MessageProposal that contains signed observation with invalid signature", commontypes.LogFields{ + "seqNr": roundCtx.SeqNr, + "error": err, + }) + return false + } + + err, ok := callPluginFromOutcomeGenerationBackground[error]( + ctx, + logger, + "ValidateObservation", + + 0, // ValidateObservation is a pure function and should finish "instantly" + roundCtx, + func(ctx context.Context, roundCtx RoundContext) (error, error) { + return outgen.reportingPlugin.ValidateObservation( + ctx, + roundCtx.SeqNr, + aq, + types.AttributedObservation{ + aso.SignedObservation.Observation, + aso.Observer, + }, + kvReader, + NewRoundBlobBroadcastFetcher( + roundCtx.SeqNr, + outgen.blobBroadcastFetcher, + ), + ), nil + }, + ) + + if !ok { + logger.Error("dropping MessageProposal containing observation that could not be validated", commontypes.LogFields{ + "seqNr": roundCtx.SeqNr, + "observer": aso.Observer, + }) + return false + } + + if err != nil { + logger.Warn("dropping MessageProposal that contains an invalid observation", commontypes.LogFields{ + "seqNr": roundCtx.SeqNr, + "error": err, + "observer": aso.Observer, + }) + return false + } + + return true +} + // If the attributed signed observations have valid signature, and they satisfy ValidateObservation // and ObservationQuorum plugin methods, this function returns the vector of corresponding // AttributedObservations and true. -func (outgen *outcomeGenerationState[RI]) checkAttributedSignedObservations( +func (outgen *outcomeGenerationState[RI]) backgroundCheckAttributedSignedObservations( ctx context.Context, logger loghelper.LoggerWithContext, ogid OutcomeGenerationID, roundCtx RoundContext, aq types.AttributedQuery, asos []AttributedSignedObservation, - kvReader ocr3_1types.KeyValueReader, + kvReader ocr3_1types.KeyValueStateReader, // we don't discard the kvReader in this function because it is managed further up the call stack ) ([]types.AttributedObservation, bool) { - attributedObservations := []types.AttributedObservation{} + attributedObservations := make([]types.AttributedObservation, 0, len(asos)) + + subs, allValidMutex, allValid := subprocesses.Subprocesses{}, sync.Mutex{}, true - seen := map[commontypes.OracleID]bool{} + myObservationIncluded := false - for _, aso := range asos { - if !(0 <= int(aso.Observer) && int(aso.Observer) <= outgen.config.N()) { + for i, aso := range asos { + if !(0 <= int(aso.Observer) && int(aso.Observer) < outgen.config.N()) { logger.Warn("dropping MessageProposal that contains signed observation with invalid observer", commontypes.LogFields{ "seqNr": roundCtx.SeqNr, "invalidObserver": aso.Observer, @@ -1117,63 +1314,35 @@ func (outgen *outcomeGenerationState[RI]) checkAttributedSignedObservations( return nil, false } - if seen[aso.Observer] { + if i > 0 && !(asos[i-1].Observer < aso.Observer) { logger.Warn("dropping MessageProposal that contains duplicate signed observation", commontypes.LogFields{ "seqNr": roundCtx.SeqNr, }) return nil, false } - seen[aso.Observer] = true - - if err := aso.SignedObservation.Verify(ogid, roundCtx.SeqNr, aq.Query, outgen.config.OracleIdentities[aso.Observer].OffchainPublicKey); err != nil { - logger.Warn("dropping MessageProposal that contains signed observation with invalid signature", commontypes.LogFields{ - "seqNr": roundCtx.SeqNr, - "error": err, - }) - return nil, false - } - - err, ok := callPluginFromOutcomeGenerationBackground[error]( - ctx, - logger, - "ValidateObservation", - 0, // ValidateObservation is a pure function and should finish "instantly" - roundCtx, - func(ctx context.Context, roundCtx RoundContext) (error, error) { - return outgen.reportingPlugin.ValidateObservation( - ctx, - roundCtx.SeqNr, - aq, - types.AttributedObservation{aso.SignedObservation.Observation, aso.Observer}, - kvReader, - outgen.blobBroadcastFetcher, - ), nil - }, - ) - // kvReader.Discard() must not happen here, because - // backgroundStateTransition (our caller) manages the lifecycle of the - // underlying transaction. - if !ok { - logger.Error("dropping MessageProposal containing observation that could not be validated", commontypes.LogFields{ - "seqNr": roundCtx.SeqNr, - "observer": aso.Observer, - }) - return nil, false - } - if err != nil { - logger.Warn("dropping MessageProposal that contains an invalid observation", commontypes.LogFields{ - "seqNr": roundCtx.SeqNr, - "error": err, - "observer": aso.Observer, - }) - return nil, false + if aso.Observer == outgen.id { + myObservationIncluded = true } attributedObservations = append(attributedObservations, types.AttributedObservation{ aso.SignedObservation.Observation, aso.Observer, }) + + subs.Go(func() { + if !outgen.backgroundCheckAttributedSignedObservation(ctx, logger, ogid, roundCtx, aq, aso, kvReader) { + allValidMutex.Lock() + allValid = false + allValidMutex.Unlock() + } + }) + } + + subs.Wait() + if !allValid { + // no need to log, since backgroundCheckAttributedSignedObservation will already have done so + return nil, false } observationQuorum, ok := callPluginFromOutcomeGenerationBackground[bool]( @@ -1183,7 +1352,17 @@ func (outgen *outcomeGenerationState[RI]) checkAttributedSignedObservations( 0, // ObservationQuorum is a pure function and should finish "instantly" roundCtx, func(ctx context.Context, roundCtx RoundContext) (bool, error) { - return outgen.reportingPlugin.ObservationQuorum(ctx, roundCtx.SeqNr, aq, attributedObservations, kvReader, outgen.blobBroadcastFetcher) + return outgen.reportingPlugin.ObservationQuorum( + ctx, + roundCtx.SeqNr, + aq, + attributedObservations, + kvReader, + NewRoundBlobBroadcastFetcher( + roundCtx.SeqNr, + outgen.blobBroadcastFetcher, + ), + ) }, ) @@ -1198,7 +1377,7 @@ func (outgen *outcomeGenerationState[RI]) checkAttributedSignedObservations( return nil, false } - if seen[outgen.id] { + if myObservationIncluded { outgen.metrics.includedObservationsTotal.Inc() } @@ -1206,8 +1385,7 @@ func (outgen *outcomeGenerationState[RI]) checkAttributedSignedObservations( } func (outgen *outcomeGenerationState[RI]) persistCommitAsBlock(commit *CertifiedCommit) bool { - ctx := outgen.ctx - configDigest := outgen.config.ConfigDigest + seqNr := commit.SeqNr() astb := AttestedStateTransitionBlock{ StateTransitionBlock{ @@ -1215,51 +1393,61 @@ func (outgen *outcomeGenerationState[RI]) persistCommitAsBlock(commit *Certified seqNr, commit.StateTransitionInputsDigest, commit.StateTransitionOutputs, + commit.StateRootDigest, commit.ReportsPlusPrecursor, }, commit.CommitQuorumCertificate, } - werr := outgen.database.WriteAttestedStateTransitionBlock( - ctx, - configDigest, - seqNr, - astb, - ) - - if werr != nil { + tx, err := outgen.kvDb.NewUnserializedReadWriteTransactionUnchecked() + if err != nil { + outgen.logger.Error("error creating read transaction", commontypes.LogFields{ + "error": err, + }) + return false + } + defer tx.Discard() - astb, rerr := outgen.database.ReadAttestedStateTransitionBlock( - ctx, - configDigest, - seqNr, - ) - if astb.StateTransitionBlock.SeqNr() == seqNr && rerr == nil { - // already persisted by someone else - return true - } else { - outgen.logger.Error("error persisting commit as attested state transition block", commontypes.LogFields{ - "seqNr": seqNr, - "error": werr, - }) - return false - } - } else { - // persited now by us - outgen.logger.Trace("persisted block", commontypes.LogFields{ + err = tx.WriteAttestedStateTransitionBlock(seqNr, astb) + if err != nil { + outgen.logger.Error("error writing attested state transition block", commontypes.LogFields{ "seqNr": seqNr, + "error": err, }) - return true + return false + } + + err = tx.Commit() + if err != nil { + outgen.logger.Error("error committing transaction", commontypes.LogFields{ + "error": err, + }) + return false } + + // persited now + outgen.logger.Trace("persisted block", commontypes.LogFields{ + "seqNr": seqNr, + }) + return true } func (outgen *outcomeGenerationState[RI]) refreshCommittedSeqNrAndCert() { preRefreshCommittedSeqNr := outgen.sharedState.committedSeqNr - postRefreshCommittedSeqNr, err := outgen.kvStore.HighestCommittedSeqNr() + tx, err := outgen.kvDb.NewReadTransactionUnchecked() + if err != nil { + outgen.logger.Error("error creating read transaction", commontypes.LogFields{ + "error": err, + }) + return + } + defer tx.Discard() + + postRefreshCommittedSeqNr, err := tx.ReadHighestCommittedSeqNr() if err != nil { - outgen.logger.Error("kvStore.HighestCommittedSeqNr() failed during refresh", commontypes.LogFields{ + outgen.logger.Error("kvDb.HighestCommittedSeqNr() failed during refresh", commontypes.LogFields{ "preRefreshCommittedSeqNr": preRefreshCommittedSeqNr, "error": err, }) @@ -1277,7 +1465,7 @@ func (outgen *outcomeGenerationState[RI]) refreshCommittedSeqNrAndCert() { logger.Warn("last kv transaction commit failed, requesting state sync", nil) select { - case outgen.chOutcomeGenerationToStatePersistence <- EventStateSyncRequest[RI]{ + case outgen.chOutcomeGenerationToStateSync <- EventStateSyncRequest[RI]{ preRefreshCommittedSeqNr, }: case <-outgen.ctx.Done(): @@ -1289,20 +1477,14 @@ func (outgen *outcomeGenerationState[RI]) refreshCommittedSeqNrAndCert() { panic("") } - ctx := outgen.ctx - configDigest := outgen.config.ConfigDigest - astb, err := outgen.database.ReadAttestedStateTransitionBlock( - ctx, - configDigest, - postRefreshCommittedSeqNr, - ) + astb, err := tx.ReadAttestedStateTransitionBlock(postRefreshCommittedSeqNr) if err != nil { logger.Error("error reading attested state transition block during refresh", commontypes.LogFields{ "error": err, }) return } - if astb.StateTransitionBlock.SeqNr() == 0 { + if astb.StateTransitionBlock.SeqNr() == 0 { // The block does not exist in the database logger.Critical("assumption violation, attested state transition block for kv committed seq nr does not exist", nil) panic("") } @@ -1320,8 +1502,9 @@ func (outgen *outcomeGenerationState[RI]) refreshCommittedSeqNrAndCert() { stb.SeqNr(), stb.StateTransitionInputsDigest, stb.StateTransitionOutputs, + stb.StateRootDigest, stb.ReportsPlusPrecursor, - astb.AttributedSignatures, + astb.AttributedCommitSignatures, }) if !persistedBlockAndCert { diff --git a/offchainreporting2plus/internal/ocr3_1/protocol/outcome_generation_leader.go b/offchainreporting2plus/internal/ocr3_1/protocol/outcome_generation_leader.go index d48fe44f..3f4065f6 100644 --- a/offchainreporting2plus/internal/ocr3_1/protocol/outcome_generation_leader.go +++ b/offchainreporting2plus/internal/ocr3_1/protocol/outcome_generation_leader.go @@ -1,7 +1,9 @@ package protocol import ( + "cmp" "context" + "slices" "time" "github.com/smartcontractkit/libocr/commontypes" @@ -185,7 +187,14 @@ func (outgen *outcomeGenerationState[RI]) messageEpochStartRequest(msg MessageEp outgen.sharedState.firstSeqNrOfEpoch = outgen.sharedState.committedSeqNr + 1 outgen.startSubsequentLeaderRound() } else if commitQC, ok := epochStartProof.HighestCertified.(*CertifiedCommit); ok { - outgen.commit(*commitQC) + + if commitQC.SeqNr() != outgen.sharedState.committedSeqNr { + outgen.logger.Critical("assumption violation, we should have already committed the seqNr of the commitQC", commontypes.LogFields{ + "seqNr": outgen.sharedState.seqNr, + "commitSeqNr": commitQC.SeqNr(), + }) + panic("") + } outgen.sharedState.firstSeqNrOfEpoch = outgen.sharedState.committedSeqNr + 1 outgen.startSubsequentLeaderRound() } else { @@ -231,7 +240,7 @@ func (outgen *outcomeGenerationState[RI]) startSubsequentLeaderRound() { ctx := outgen.epochCtx logger := outgen.logger roundCtx := outgen.RoundCtx(outgen.sharedState.committedSeqNr + 1) - kvReadTxn, err := outgen.kvStore.NewReadTransaction(roundCtx.SeqNr) + kvReadTxn, err := outgen.kvDb.NewReadTransaction(roundCtx.SeqNr) if err != nil { outgen.logger.Warn("failed to create new transaction, aborting startSubsequentLeaderRound", commontypes.LogFields{ "seqNr": outgen.sharedState.seqNr, @@ -249,7 +258,7 @@ func (outgen *outcomeGenerationState[RI]) backgroundQuery( ctx context.Context, logger loghelper.LoggerWithContext, roundCtx RoundContext, - kvReadTxn KeyValueStoreReadTransaction, + kvReadTxn KeyValueDatabaseReadTransaction, ) { query, ok := callPluginFromOutcomeGenerationBackground[types.Query]( ctx, @@ -258,7 +267,15 @@ func (outgen *outcomeGenerationState[RI]) backgroundQuery( outgen.config.MaxDurationQuery, roundCtx, func(ctx context.Context, outctx RoundContext) (types.Query, error) { - return outgen.reportingPlugin.Query(ctx, roundCtx.SeqNr, kvReadTxn, outgen.blobBroadcastFetcher) + return outgen.reportingPlugin.Query( + ctx, + roundCtx.SeqNr, + kvReadTxn, + NewRoundBlobBroadcastFetcher( + roundCtx.SeqNr, + outgen.blobBroadcastFetcher, + ), + ) }, ) kvReadTxn.Discard() @@ -369,7 +386,7 @@ func (outgen *outcomeGenerationState[RI]) messageObservation(msg MessageObservat outgen.leaderState.query, outgen.sharedState.l, } - kvReadTxn, err := outgen.kvStore.NewReadTransaction(roundCtx.SeqNr) + kvReadTxn, err := outgen.kvDb.NewReadTransaction(roundCtx.SeqNr) if err != nil { outgen.logger.Warn("failed to create new transaction, aborting messageObservation", commontypes.LogFields{ "seqNr": outgen.sharedState.seqNr, @@ -391,12 +408,12 @@ func (outgen *outcomeGenerationState[RI]) backgroundVerifyValidateObservation( sender commontypes.OracleID, signedObservation SignedObservation, aq types.AttributedQuery, - kvReadTxn KeyValueStoreReadTransaction, + kvReadTxn KeyValueDatabaseReadTransaction, ) { if err := signedObservation.Verify( ogid, roundCtx.SeqNr, - aq.Query, + aq, outgen.config.OracleIdentities[sender].OffchainPublicKey, ); err != nil { logger.Warn("dropping MessageObservation carrying invalid SignedObservation", commontypes.LogFields{ @@ -417,9 +434,15 @@ func (outgen *outcomeGenerationState[RI]) backgroundVerifyValidateObservation( return outgen.reportingPlugin.ValidateObservation(ctx, roundCtx.SeqNr, aq, - types.AttributedObservation{signedObservation.Observation, sender}, + types.AttributedObservation{ + signedObservation.Observation, + sender, + }, kvReadTxn, - outgen.blobBroadcastFetcher, + NewRoundBlobBroadcastFetcher( + roundCtx.SeqNr, + outgen.blobBroadcastFetcher, + ), ), nil }, ) @@ -491,7 +514,7 @@ func (outgen *outcomeGenerationState[RI]) eventComputedValidateVerifyObservation } aos = append(aos, types.AttributedObservation{observationPoolEntry.Item.Observation, sender}) } - kvReadTxn, err := outgen.kvStore.NewReadTransaction(outctx.SeqNr) + kvReadTxn, err := outgen.kvDb.NewReadTransaction(outctx.SeqNr) if err != nil { outgen.logger.Warn("failed to create new transaction, aborting eventComputedValidateVerifyObservation", commontypes.LogFields{ "seqNr": outgen.sharedState.seqNr, @@ -519,7 +542,7 @@ func (outgen *outcomeGenerationState[RI]) backgroundObservationQuorum( roundCtx RoundContext, aq types.AttributedQuery, aos []types.AttributedObservation, - kvReadTxn KeyValueStoreReadTransaction, + kvReadTxn KeyValueDatabaseReadTransaction, ) { observationQuorum, ok := callPluginFromOutcomeGenerationBackground[bool]( ctx, @@ -528,7 +551,17 @@ func (outgen *outcomeGenerationState[RI]) backgroundObservationQuorum( 0, // ObservationQuorum is a pure function and should finish "instantly" roundCtx, func(ctx context.Context, roundCtx RoundContext) (bool, error) { - return outgen.reportingPlugin.ObservationQuorum(ctx, roundCtx.SeqNr, aq, aos, kvReadTxn, outgen.blobBroadcastFetcher) + return outgen.reportingPlugin.ObservationQuorum( + ctx, + roundCtx.SeqNr, + aq, + aos, + kvReadTxn, + NewRoundBlobBroadcastFetcher( + roundCtx.SeqNr, + outgen.blobBroadcastFetcher, + ), + ) }, ) kvReadTxn.Discard() @@ -591,14 +624,21 @@ func (outgen *outcomeGenerationState[RI]) eventTGraceTimeout() { }) return } + asos := make([]AttributedSignedObservation, 0, outgen.config.N()) - contributors := make([]commontypes.OracleID, 0, outgen.config.N()) for sender, observationPoolEntry := range outgen.leaderState.observationPool.Entries(outgen.sharedState.seqNr) { if observationPoolEntry.Verified == nil || !*observationPoolEntry.Verified { continue } asos = append(asos, AttributedSignedObservation{SignedObservation: observationPoolEntry.Item, Observer: sender}) - contributors = append(contributors, sender) + } + slices.SortFunc(asos, func(aso1, aso2 AttributedSignedObservation) int { + return cmp.Compare(aso1.Observer, aso2.Observer) + }) + + contributors := make([]commontypes.OracleID, 0, len(asos)) + for _, aso := range asos { + contributors = append(contributors, aso.Observer) } outgen.leaderState.phase = outgenLeaderPhaseSentProposal diff --git a/offchainreporting2plus/internal/ocr3_1/protocol/queue/queue.go b/offchainreporting2plus/internal/ocr3_1/protocol/queue/queue.go deleted file mode 100644 index 9a521740..00000000 --- a/offchainreporting2plus/internal/ocr3_1/protocol/queue/queue.go +++ /dev/null @@ -1,68 +0,0 @@ -package queue - -type Queue[T any] struct { - elements []T - maxCapacity *int -} - -// NewQueue returns a queue with infinite maxCapacity -func NewQueue[T any]() *Queue[T] { - return &Queue[T]{ - elements: make([]T, 0), - } -} - -// NewQueueWithMaxCapacity returns queue with maxCapacity cap. -// If the maxCapacity is reached the queue does not accept more elements. -func NewQueueWithMaxCapacity[T any](cap int) *Queue[T] { - return &Queue[T]{ - elements: make([]T, 0), - maxCapacity: &cap, - } -} - -func (q *Queue[T]) IsEmpty() bool { - return len(q.elements) == 0 -} - -func (q *Queue[T]) Size() int { - return len(q.elements) -} - -// Push returns false if the queue is at maxCapacity and the element is not added -func (q *Queue[T]) Push(element T) bool { - if q.maxCapacity == nil || len(q.elements) < *q.maxCapacity { - q.elements = append(q.elements, element) - return true - } - return false -} - -// Peek returns the first element without removing it. It returns false if the queue is empty. -func (q *Queue[T]) Peek() (*T, bool) { - if len(q.elements) == 0 { - return nil, false - } - return &q.elements[0], true -} - -// Pop returns the first element after removing it. It returns false if the queue is empty. -func (q *Queue[T]) Pop() (T, bool) { - if len(q.elements) == 0 { - var zero T - return zero, false - } - first := q.elements[0] - - q.elements = q.elements[1:len(q.elements)] - return first, true -} - -// PeekLast returns the last element without removing it. It returns false if the queue is empty. -func (q *Queue[T]) PeekLast() (T, bool) { - if len(q.elements) == 0 { - var zero T - return zero, false - } - return q.elements[len(q.elements)-1], true -} diff --git a/offchainreporting2plus/internal/ocr3_1/protocol/report_attestation.go b/offchainreporting2plus/internal/ocr3_1/protocol/report_attestation.go index 6145077d..f33a7443 100644 --- a/offchainreporting2plus/internal/ocr3_1/protocol/report_attestation.go +++ b/offchainreporting2plus/internal/ocr3_1/protocol/report_attestation.go @@ -26,7 +26,7 @@ func RunReportAttestation[RI any]( chNetToReportAttestation <-chan MessageToReportAttestationWithSender[RI], chOutcomeGenerationToReportAttestation <-chan EventToReportAttestation[RI], - chReportAttestationToStatePersistence chan<- EventToStatePersistence[RI], + chReportAttestationToStateSync chan<- EventToStateSync[RI], chReportAttestationToTransmission chan<- EventToTransmission[RI], config ocr3config.SharedConfig, contractTransmitter ocr3types.ContractTransmitter[RI], @@ -40,7 +40,7 @@ func RunReportAttestation[RI any]( newReportAttestationState(ctx, chNetToReportAttestation, chOutcomeGenerationToReportAttestation, - chReportAttestationToStatePersistence, chReportAttestationToTransmission, + chReportAttestationToStateSync, chReportAttestationToTransmission, config, contractTransmitter, logger, netSender, onchainKeyring, reportingPlugin, sched).run() } @@ -59,7 +59,7 @@ type reportAttestationState[RI any] struct { chNetToReportAttestation <-chan MessageToReportAttestationWithSender[RI] chOutcomeGenerationToReportAttestation <-chan EventToReportAttestation[RI] - chReportAttestationToStatePersistence chan<- EventToStatePersistence[RI] + chReportAttestationToStateSync chan<- EventToStateSync[RI] chReportAttestationToTransmission chan<- EventToTransmission[RI] config ocr3config.SharedConfig contractTransmitter ocr3types.ContractTransmitter[RI] @@ -372,7 +372,7 @@ func (repatt *reportAttestationState[RI]) tryComplete(seqNr uint64) { repatt.rounds[seqNr].startedFetch = true repatt.scheduler.ScheduleDelay(EventMissingOutcome[RI]{seqNr}, repatt.config.DeltaCertifiedCommitRequest) select { - case repatt.chReportAttestationToStatePersistence <- EventStateSyncRequest[RI]{seqNr}: + case repatt.chReportAttestationToStateSync <- EventStateSyncRequest[RI]{seqNr}: case <-repatt.ctx.Done(): } } @@ -709,7 +709,7 @@ func newReportAttestationState[RI any]( chNetToReportAttestation <-chan MessageToReportAttestationWithSender[RI], chOutcomeGenerationToReportAttestation <-chan EventToReportAttestation[RI], - chReportAttestationToStatePersistence chan<- EventToStatePersistence[RI], + chReportAttestationToStateSync chan<- EventToStateSync[RI], chReportAttestationToTransmission chan<- EventToTransmission[RI], config ocr3config.SharedConfig, contractTransmitter ocr3types.ContractTransmitter[RI], @@ -725,7 +725,7 @@ func newReportAttestationState[RI any]( chNetToReportAttestation, chOutcomeGenerationToReportAttestation, - chReportAttestationToStatePersistence, + chReportAttestationToStateSync, chReportAttestationToTransmission, config, contractTransmitter, diff --git a/offchainreporting2plus/internal/ocr3_1/protocol/requestergadget/requester_gadget.go b/offchainreporting2plus/internal/ocr3_1/protocol/requestergadget/requester_gadget.go new file mode 100644 index 00000000..5075a5d5 --- /dev/null +++ b/offchainreporting2plus/internal/ocr3_1/protocol/requestergadget/requester_gadget.go @@ -0,0 +1,261 @@ +package requestergadget + +import ( + "cmp" + "maps" + "slices" + "time" + + "github.com/smartcontractkit/libocr/commontypes" +) + +type RequestInfo struct { + // If no response is received by this time, the request is considered timed out. + ExpiryTimestamp time.Time +} + +func NewRequesterGadget[Item comparable]( + n int, + requestInterval time.Duration, // Wait interval between requests to the same seeder + sendRequestFn func(Item, commontypes.OracleID) (*RequestInfo, bool), // Invoked by the RequesterGadget to send a request for the given item to the given seeder. + getPendingItemsFn func() []Item, // Invoked by the RequesterGadget to get the list of items that should be requested. RequesterGadget will attempt to request items earlier in the list first. + getSeedersFn func(Item) map[commontypes.OracleID]struct{}, // Invoked by the RequesterGadget to get the list of seeders that can serve the given item. +) *RequesterGadget[Item] { + oracles := make(map[commontypes.OracleID]*oracleState, n) + for i := range n { + oracles[commontypes.OracleID(i)] = &oracleState{ + time.Time{}, + 0, + } + } + return &RequesterGadget[Item]{ + oracles, + requestInterval, + make(map[Item]*pendingItemState), + time.After(0), + sendRequestFn, + getPendingItemsFn, + getSeedersFn, + } +} + +// PleaseRecheckPendingItems must be called by the protocol when the output of +// getPendingItemsFn or getSeedersFn has changed. +func (rg *RequesterGadget[Item]) PleaseRecheckPendingItems() { + rg.chTick = time.After(0) +} + +// CheckAndMarkResponse must be called by the protocol when a response is +// received, to ensure that the response matches a request that the gadget has +// sent. It will return true even if the request has technically timed out in +// some cases. +func (rg *RequesterGadget[Item]) CheckAndMarkResponse(item Item, sender commontypes.OracleID) bool { + rg.PleaseRecheckPendingItems() // overly sensitive, but easier to reason about + if pendingItem, ok := rg.ourPendingItems[item]; ok { + if pendingItem.pendingRequestOrNil == nil { + return false + } + pendingRequest := pendingItem.pendingRequestOrNil + if pendingRequest.seeder == sender { + pendingItem.pendingRequestOrNil = nil + return true + } + } + return false +} + +// We temporarily exclude a responder for an item when they time out, send a go +// away, or send a bad response. We clear the exclusion list when we've excluded +// them all but still haven't received the item. +func (rg *RequesterGadget[Item]) temporaryExcludeResponderForItem(item Item, sender commontypes.OracleID) { + if pendingItem, ok := rg.ourPendingItems[item]; ok { + pendingItem.temporarilyExcludedSeeders[sender] = struct{}{} + } +} + +func (rg *RequesterGadget[Item]) MarkGoAwayResponse(item Item, sender commontypes.OracleID) { + rg.temporaryExcludeResponderForItem(item, sender) +} + +func (rg *RequesterGadget[Item]) MarkGoodResponder(sender commontypes.OracleID) { + rg.oracles[sender].score++ +} + +func (rg *RequesterGadget[Item]) MarkGoodResponse(_ Item, sender commontypes.OracleID) { + rg.MarkGoodResponder(sender) +} + +func (rg *RequesterGadget[Item]) MarkBadResponder(sender commontypes.OracleID) { + rg.oracles[sender].score /= 2 +} + +func (rg *RequesterGadget[Item]) MarkBadResponse(item Item, sender commontypes.OracleID) { + rg.temporaryExcludeResponderForItem(item, sender) + rg.MarkBadResponder(sender) +} + +// Only called by the requester gadget itself. The protocol using this gadget +// has no way of knowing a request was sent or timed out. +func (rg *RequesterGadget[Item]) markTimedOutResponse(item Item, sender commontypes.OracleID) { + rg.MarkBadResponse(item, sender) +} + +func (rg *RequesterGadget[Item]) rankedSeeders(seeders map[commontypes.OracleID]struct{}, excluded map[commontypes.OracleID]struct{}) []commontypes.OracleID { + type scoredSeeder struct { + seeder commontypes.OracleID + score uint64 + } + scoredSeeders := make([]scoredSeeder, 0, len(seeders)) + for seeder := range seeders { + if _, ok := excluded[seeder]; ok { + continue + } + scoredSeeders = append(scoredSeeders, scoredSeeder{ + seeder, + rg.oracles[seeder].score, + }) + } + slices.SortFunc(scoredSeeders, func(a, b scoredSeeder) int { + // higher score goes first + return cmp.Compare(b.score, a.score) + }) + + ranks := make([]commontypes.OracleID, 0, len(scoredSeeders)) + for _, scoredSeeder := range scoredSeeders { + ranks = append(ranks, scoredSeeder.seeder) + } + return shuffle(ranks) +} + +func (rg *RequesterGadget[Item]) Ticker() <-chan time.Time { + return rg.chTick +} + +const maxNextTickInterval = 15 * time.Second + +func (rg *RequesterGadget[Item]) Tick() { + + now := time.Now() + + pendingItems := rg.getPendingItemsFn() + // Discard any pending requests for no longer needed items. + maps.DeleteFunc(rg.ourPendingItems, func(item Item, _ *pendingItemState) bool { + return !slices.Contains(pendingItems, item) + }) + + nextTick := now.Add(maxNextTickInterval) + + for _, item := range pendingItems { + // Add state for this item if we didn't have it before. + if _, ok := rg.ourPendingItems[item]; !ok { + rg.ourPendingItems[item] = &pendingItemState{ + nil, + make(map[commontypes.OracleID]struct{}), + } + } + pendingItemState := rg.ourPendingItems[item] + pendingRequestOrNil := pendingItemState.pendingRequestOrNil + + var shouldRequestNow bool + if pendingRequestOrNil != nil { + pendingRequest := pendingRequestOrNil + if pendingRequest.expiryTimestamp.Before(now) { + // Previous request timed out. + rg.markTimedOutResponse(item, pendingRequest.seeder) + shouldRequestNow = true + } + } else { + shouldRequestNow = true + } + + if !shouldRequestNow { + continue + } + + seeders := rg.getSeedersFn(item) + rankedNonExcludedSeeders := rg.rankedSeeders(seeders, pendingItemState.temporarilyExcludedSeeders) + // If we have no remaining seeders because we have excluded all of them, + // clear the exclusion list. We still need to make progress fetching the + // thing, and we could have excluded the oracles due to a transient + // issue on our end even. + if len(rankedNonExcludedSeeders) == 0 && len(seeders) != 0 { + clear(pendingItemState.temporarilyExcludedSeeders) + rankedNonExcludedSeeders = rg.rankedSeeders(seeders, pendingItemState.temporarilyExcludedSeeders) + } + + for _, seeder := range rankedNonExcludedSeeders { + if rg.oracles[seeder].nextPossibleSendTimestamp.After(now) { + continue + } + + // try sending to this oracle + requestInfo, ok := rg.sendRequestFn(item, seeder) + if !ok { + continue + } + + rg.oracles[seeder].nextPossibleSendTimestamp = now.Add(rg.requestInterval) + pendingItemState.pendingRequestOrNil = &pendingRequest{ + seeder, + requestInfo.ExpiryTimestamp, + } + break + } + + var nextTickForThisRequest time.Time + if pendingItemState.pendingRequestOrNil != nil { + // We sent a request in this tick, but want to recheck in case of + // timeout. + nextTickForThisRequest = pendingItemState.pendingRequestOrNil.expiryTimestamp + } else if len(rankedNonExcludedSeeders) > 0 { + // We didn't manage to send a request in this tick, so want to send + // a request in the next tick, preferably to the best ranked seeder. + // No guarantee we'll be able to get to them first though, prior + // pending requests in the list will have priority. + nextTickForThisRequest = rg.oracles[rankedNonExcludedSeeders[0]].nextPossibleSendTimestamp // <= now.Add(rg.requestInterval) + } + + if nextTickForThisRequest.Before(nextTick) { + nextTick = nextTickForThisRequest + } + } + + rg.chTick = time.After(time.Until(nextTick)) +} + +// A RequesterGadget helps us track and send requests for some data +// from a set of *seeders* that may or may not be able to serve the requests. +// Seeders may be byzantine, crashed, or just slow. +// +// Not thread-safe. RequesterGadget is expected to be integrated into a single subprotocol +// event loop via selecting on Ticker() and then calling Tick(). +// +// Response processing is not handled by RequesterGadget. It is the responsibility +// of the subprotocol integrating RequesterGadget. After a response is received, the subprotocol must call +// CheckAndMarkResponse. It should also call one of MarkGoAwayResponse, MarkGoodResponse, +// MarkBadResponse, MarkGoodResponder, MarkBadResponder once the response has been processed. +type RequesterGadget[Item comparable] struct { + oracles map[commontypes.OracleID]*oracleState + requestInterval time.Duration + ourPendingItems map[Item]*pendingItemState + chTick <-chan time.Time + + sendRequestFn func(Item, commontypes.OracleID) (*RequestInfo, bool) + getPendingItemsFn func() []Item + getSeedersFn func(Item) map[commontypes.OracleID]struct{} +} + +type pendingItemState struct { + pendingRequestOrNil *pendingRequest + temporarilyExcludedSeeders map[commontypes.OracleID]struct{} +} + +type pendingRequest struct { + seeder commontypes.OracleID + expiryTimestamp time.Time +} + +type oracleState struct { + nextPossibleSendTimestamp time.Time + score uint64 +} diff --git a/offchainreporting2plus/internal/ocr3_1/protocol/requestergadget/shuffle.go b/offchainreporting2plus/internal/ocr3_1/protocol/requestergadget/shuffle.go new file mode 100644 index 00000000..35089a75 --- /dev/null +++ b/offchainreporting2plus/internal/ocr3_1/protocol/requestergadget/shuffle.go @@ -0,0 +1,52 @@ +package requestergadget + +import ( + "math" + "math/rand" + + "github.com/smartcontractkit/libocr/commontypes" +) + +func shuffle(ranks []commontypes.OracleID) []commontypes.OracleID { + + return softmaxShuffle(ranks, math.Ln2) +} + +func softmaxShuffle[T any](ranks []T, alpha float64) []T { + n := len(ranks) + // precompute weights + weights := make([]float64, n) + for i := range n { + weights[i] = math.Exp(-alpha * float64(i)) + } + + shuffled := make([]T, 0, n) + used := make([]bool, n) + + for len(shuffled) < n { + // compute total weight of unused + total := 0.0 + for i, w := range weights { + if !used[i] { + total += w + } + } + + // sample + r := rand.Float64() * total + acc := 0.0 + for i, w := range weights { + if used[i] { + continue + } + acc += w + if r <= acc { + used[i] = true + shuffled = append(shuffled, ranks[i]) + break + } + } + } + + return shuffled +} diff --git a/offchainreporting2plus/internal/ocr3_1/protocol/signed_data.go b/offchainreporting2plus/internal/ocr3_1/protocol/signed_data.go index a98ec216..838a2942 100644 --- a/offchainreporting2plus/internal/ocr3_1/protocol/signed_data.go +++ b/offchainreporting2plus/internal/ocr3_1/protocol/signed_data.go @@ -15,18 +15,19 @@ import ( "github.com/smartcontractkit/libocr/offchainreporting2plus/types" ) -// Returns a byte slice whose first four bytes are the string "ocr3" and the rest -// of which is the sum returned by h. Used for domain separation vs ocr2, where -// we just directly sign sha256 hashes. +// Returns a byte slice whose first six bytes are the string "ocr3.1" and the rest +// of which is the sum returned by h. Used for domain separation as per the comment +// on offchainreporting2plus/types.OffchainKeyring. // // Any signatures made with the OffchainKeyring should use ocr3_1DomainSeparatedSum! func ocr3_1DomainSeparatedSum(h hash.Hash) []byte { - result := make([]byte, 0, 6+32) - result = append(result, []byte("ocr3.1")...) + const domainSeparator = "ocr3.1" + result := make([]byte, 0, len(domainSeparator)+sha256.Size) + result = append(result, []byte(domainSeparator)...) return h.Sum(result) } -const signedObservationDomainSeparator = "ocr3.1 SignedObservation" +const signedObservationDomainSeparator = "ocr3.1/SignedObservation/" type SignedObservation struct { Observation types.Observation @@ -36,14 +37,14 @@ type SignedObservation struct { func MakeSignedObservation( ogid OutcomeGenerationID, seqNr uint64, - query types.Query, + aq types.AttributedQuery, observation types.Observation, signer func(msg []byte) (sig []byte, err error), ) ( SignedObservation, error, ) { - payload := signedObservationMsg(ogid, seqNr, query, observation) + payload := signedObservationMsg(ogid, seqNr, aq, observation) sig, err := signer(payload) if err != nil { return SignedObservation{}, err @@ -51,14 +52,14 @@ func MakeSignedObservation( return SignedObservation{observation, sig}, nil } -func (so SignedObservation) Verify(ogid OutcomeGenerationID, seqNr uint64, query types.Query, publicKey types.OffchainPublicKey) error { +func (so SignedObservation) Verify(ogid OutcomeGenerationID, seqNr uint64, aq types.AttributedQuery, publicKey types.OffchainPublicKey) error { pk := ed25519.PublicKey(publicKey[:]) // should never trigger since types.OffchainPublicKey is an array with length ed25519.PublicKeySize if len(pk) != ed25519.PublicKeySize { return fmt.Errorf("ed25519 public key size mismatch, expected %v but got %v", ed25519.PublicKeySize, len(pk)) } - ok := ed25519.Verify(pk, signedObservationMsg(ogid, seqNr, query, so.Observation), so.Signature) + ok := ed25519.Verify(pk, signedObservationMsg(ogid, seqNr, aq, so.Observation), so.Signature) if !ok { return fmt.Errorf("SignedObservation has invalid signature") } @@ -66,7 +67,7 @@ func (so SignedObservation) Verify(ogid OutcomeGenerationID, seqNr uint64, query return nil } -func signedObservationMsg(ogid OutcomeGenerationID, seqNr uint64, query types.Query, observation types.Observation) []byte { +func signedObservationMsg(ogid OutcomeGenerationID, seqNr uint64, attributedQuery types.AttributedQuery, observation types.Observation) []byte { h := sha256.New() _, _ = h.Write([]byte(signedObservationDomainSeparator)) @@ -79,9 +80,12 @@ func signedObservationMsg(ogid OutcomeGenerationID, seqNr uint64, query types.Qu _, _ = h.Write(ogid.ConfigDigest[:]) _ = binary.Write(h, binary.BigEndian, seqNr) - // query - _ = binary.Write(h, binary.BigEndian, uint64(len(query))) - _, _ = h.Write(query) + // attributedQuery.Query + _ = binary.Write(h, binary.BigEndian, uint64(len(attributedQuery.Query))) + _, _ = h.Write(attributedQuery.Query) + + // attributedQuery.Proposer + _ = binary.Write(h, binary.BigEndian, uint64(attributedQuery.Proposer)) // observation _ = binary.Write(h, binary.BigEndian, uint64(len(observation))) @@ -100,7 +104,7 @@ type StateTransitionInputsDigest [32]byte func MakeStateTransitionInputsDigest( ogid OutcomeGenerationID, seqNr uint64, - query types.Query, + attributedQuery types.AttributedQuery, attributedObservations []types.AttributedObservation, ) StateTransitionInputsDigest { h := sha256.New() @@ -110,8 +114,10 @@ func MakeStateTransitionInputsDigest( _ = binary.Write(h, binary.BigEndian, seqNr) - _ = binary.Write(h, binary.BigEndian, uint64(len(query))) - _, _ = h.Write(query) + _ = binary.Write(h, binary.BigEndian, uint64(len(attributedQuery.Query))) + _, _ = h.Write(attributedQuery.Query) + + _ = binary.Write(h, binary.BigEndian, uint64(attributedQuery.Proposer)) _ = binary.Write(h, binary.BigEndian, uint64(len(attributedObservations))) for _, ao := range attributedObservations { @@ -129,7 +135,7 @@ func MakeStateTransitionInputsDigest( type StateTransitionOutputDigest [32]byte -func MakeStateTransitionOutputDigest(ogid OutcomeGenerationID, seqNr uint64, output []KeyValuePair) StateTransitionOutputDigest { +func MakeStateTransitionOutputDigest(ogid OutcomeGenerationID, seqNr uint64, output []KeyValuePairWithDeletions) StateTransitionOutputDigest { h := sha256.New() _, _ = h.Write(ogid.ConfigDigest[:]) @@ -170,7 +176,7 @@ func MakeReportsPlusPrecursorDigest(ogid OutcomeGenerationID, seqNr uint64, prec return result } -const prepareSignatureDomainSeparator = "ocr3.1 PrepareSignature" +const prepareSignatureDomainSeparator = "ocr3.1/PrepareSignature/" type PrepareSignature []byte @@ -179,10 +185,11 @@ func MakePrepareSignature( seqNr uint64, inputsDigest StateTransitionInputsDigest, outputDigest StateTransitionOutputDigest, + rootDigest StateRootDigest, reportsPlusPrecursorDigest ReportsPlusPrecursorDigest, signer func(msg []byte) ([]byte, error), ) (PrepareSignature, error) { - return signer(prepareSignatureMsg(ogid, seqNr, inputsDigest, outputDigest, reportsPlusPrecursorDigest)) + return signer(prepareSignatureMsg(ogid, seqNr, inputsDigest, outputDigest, rootDigest, reportsPlusPrecursorDigest)) } func (sig PrepareSignature) Verify( @@ -190,6 +197,7 @@ func (sig PrepareSignature) Verify( seqNr uint64, inputsDigest StateTransitionInputsDigest, outputDigest StateTransitionOutputDigest, + rootDigest StateRootDigest, reportsPlusPrecursorDigest ReportsPlusPrecursorDigest, publicKey types.OffchainPublicKey, ) error { @@ -198,8 +206,8 @@ func (sig PrepareSignature) Verify( if len(pk) != ed25519.PublicKeySize { return fmt.Errorf("ed25519 public key size mismatch, expected %v but got %v", ed25519.PublicKeySize, len(pk)) } - msg := prepareSignatureMsg(ogid, seqNr, inputsDigest, outputDigest, reportsPlusPrecursorDigest) - ok := ed25519.Verify(pk, prepareSignatureMsg(ogid, seqNr, inputsDigest, outputDigest, reportsPlusPrecursorDigest), sig) + msg := prepareSignatureMsg(ogid, seqNr, inputsDigest, outputDigest, rootDigest, reportsPlusPrecursorDigest) + ok := ed25519.Verify(pk, prepareSignatureMsg(ogid, seqNr, inputsDigest, outputDigest, rootDigest, reportsPlusPrecursorDigest), sig) if !ok { // Other less common causes include leader equivocation or actually invalid signatures. return fmt.Errorf("PrepareSignature failed to verify. This is commonly caused by non-determinism in the ReportingPlugin msg: %x, sig: %x", msg, sig) @@ -213,6 +221,7 @@ func prepareSignatureMsg( seqNr uint64, inputsDigest StateTransitionInputsDigest, outputDigest StateTransitionOutputDigest, + rootDigest StateRootDigest, reportsPlusPrecursorDigest ReportsPlusPrecursorDigest, ) []byte { h := sha256.New() @@ -228,6 +237,8 @@ func prepareSignatureMsg( _, _ = h.Write(outputDigest[:]) + _, _ = h.Write(rootDigest[:]) + _, _ = h.Write(reportsPlusPrecursorDigest[:]) return ocr3_1DomainSeparatedSum(h) @@ -238,7 +249,7 @@ type AttributedPrepareSignature struct { Signer commontypes.OracleID } -const commitSignatureDomainSeparator = "ocr3.1 CommitSignature" +const commitSignatureDomainSeparator = "ocr3.1/CommitSignature/" type CommitSignature []byte @@ -247,10 +258,11 @@ func MakeCommitSignature( seqNr uint64, inputsDigest StateTransitionInputsDigest, outputDigest StateTransitionOutputDigest, + rootDigest StateRootDigest, reportsPlusPrecursorDigest ReportsPlusPrecursorDigest, signer func(msg []byte) ([]byte, error), ) (CommitSignature, error) { - return signer(commitSignatureMsg(ogid, seqNr, inputsDigest, outputDigest, reportsPlusPrecursorDigest)) + return signer(commitSignatureMsg(ogid, seqNr, inputsDigest, outputDigest, rootDigest, reportsPlusPrecursorDigest)) } func (sig CommitSignature) Verify( @@ -258,6 +270,7 @@ func (sig CommitSignature) Verify( seqNr uint64, inputsDigest StateTransitionInputsDigest, outputDigest StateTransitionOutputDigest, + rootDigest StateRootDigest, reportsPlusPrecursorDigest ReportsPlusPrecursorDigest, publicKey types.OffchainPublicKey, ) error { @@ -267,7 +280,7 @@ func (sig CommitSignature) Verify( return fmt.Errorf("ed25519 public key size mismatch, expected %v but got %v", ed25519.PublicKeySize, len(pk)) } - ok := ed25519.Verify(pk, commitSignatureMsg(ogid, seqNr, inputsDigest, outputDigest, reportsPlusPrecursorDigest), sig) + ok := ed25519.Verify(pk, commitSignatureMsg(ogid, seqNr, inputsDigest, outputDigest, rootDigest, reportsPlusPrecursorDigest), sig) if !ok { return fmt.Errorf("CommitSignature failed to verify") } @@ -280,6 +293,7 @@ func commitSignatureMsg( seqNr uint64, inputsDigest StateTransitionInputsDigest, outputDigest StateTransitionOutputDigest, + rootDigest StateRootDigest, reportsPlusPrecursorDigest ReportsPlusPrecursorDigest, ) []byte { h := sha256.New() @@ -295,6 +309,8 @@ func commitSignatureMsg( _, _ = h.Write(outputDigest[:]) + _, _ = h.Write(rootDigest[:]) + _, _ = h.Write(reportsPlusPrecursorDigest[:]) return ocr3_1DomainSeparatedSum(h) @@ -317,7 +333,7 @@ func (t HighestCertifiedTimestamp) Less(t2 HighestCertifiedTimestamp) bool { t.SeqNr == t2.SeqNr && t.CommittedElsePrepared == t2.CommittedElsePrepared && t.Epoch < t2.Epoch } -const signedHighestCertifiedTimestampDomainSeparator = "ocr3.1 SignedHighestCertifiedTimestamp" +const signedHighestCertifiedTimestampDomainSeparator = "ocr3.1/SignedHighestCertifiedTimestamp/" type SignedHighestCertifiedTimestamp struct { HighestCertifiedTimestamp HighestCertifiedTimestamp @@ -431,6 +447,8 @@ func (qc *EpochStartProof) Verify( return nil } +//go-sumtype:decl CertifiedPrepareOrCommit + type CertifiedPrepareOrCommit interface { isCertifiedPrepareOrCommit() Epoch() uint64 @@ -452,6 +470,7 @@ type CertifiedPrepare struct { PrepareSeqNr uint64 StateTransitionInputsDigest StateTransitionInputsDigest StateTransitionOutputs StateTransitionOutputs + StateRootDigest StateRootDigest ReportsPlusPrecursor ocr3_1types.ReportsPlusPrecursor PrepareQuorumCertificate []AttributedPrepareSignature } @@ -512,7 +531,7 @@ func (hc *CertifiedPrepare) Verify( hc.ReportsPlusPrecursor, ) if err := aps.Signature.Verify( - ogid, hc.SeqNr(), hc.StateTransitionInputsDigest, outputDigest, + ogid, hc.SeqNr(), hc.StateTransitionInputsDigest, outputDigest, hc.StateRootDigest, reportsPlusPrecursorDigest, oracleIdentities[aps.Signer].OffchainPublicKey); err != nil { return fmt.Errorf("%v-th signature by %v-th oracle with pubkey %x does not verify: %w", i, aps.Signer, oracleIdentities[aps.Signer].OffchainPublicKey, err) } @@ -520,7 +539,6 @@ func (hc *CertifiedPrepare) Verify( return nil } func (hc *CertifiedPrepare) CheckSize(n int, f int, limits ocr3_1types.ReportingPluginLimits, maxReportSigLen int) bool { - if len(hc.PrepareQuorumCertificate) != byzquorum.Size(n, f) { return false } @@ -529,6 +547,12 @@ func (hc *CertifiedPrepare) CheckSize(n int, f int, limits ocr3_1types.Reporting return false } } + if !checkWriteSetSize(hc.StateTransitionOutputs.WriteSet, limits) { + return false + } + if len(hc.ReportsPlusPrecursor) > limits.MaxReportsPlusPrecursorLength { + return false + } return true } @@ -540,6 +564,7 @@ type CertifiedCommit struct { CommitSeqNr uint64 StateTransitionInputsDigest StateTransitionInputsDigest StateTransitionOutputs StateTransitionOutputs + StateRootDigest StateRootDigest ReportsPlusPrecursor ocr3_1types.ReportsPlusPrecursor CommitQuorumCertificate []AttributedCommitSignature } @@ -568,6 +593,7 @@ func (hc *CertifiedCommit) IsGenesis() bool { return hc.Epoch() == uint64(0) && hc.SeqNr() == uint64(0) && hc.StateTransitionInputsDigest == StateTransitionInputsDigest{} && + hc.StateRootDigest == StateRootDigest{} && len(hc.ReportsPlusPrecursor) == 0 && len(hc.CommitQuorumCertificate) == 0 } @@ -613,6 +639,7 @@ func (hc *CertifiedCommit) Verify( hc.SeqNr(), hc.StateTransitionInputsDigest, outputDigest, + hc.StateRootDigest, reportsPlusPrecursorDigest, oracleIdentities[acs.Signer].OffchainPublicKey); err != nil { return fmt.Errorf("%v-th signature by %v-th oracle does not verify: %w", i, acs.Signer, err) @@ -629,17 +656,78 @@ func (hc *CertifiedCommit) CheckSize(n int, f int, limits ocr3_1types.ReportingP if len(hc.CommitQuorumCertificate) != byzquorum.Size(n, f) { return false } - for _, aps := range hc.CommitQuorumCertificate { - if len(aps.Signature) != ed25519.SignatureSize { + for _, acs := range hc.CommitQuorumCertificate { + if len(acs.Signature) != ed25519.SignatureSize { + return false + } + } + if !checkWriteSetSize(hc.StateTransitionOutputs.WriteSet, limits) { + return false + } + if len(hc.ReportsPlusPrecursor) > limits.MaxReportsPlusPrecursorLength { + return false + } + return true +} + +type StateTransitionBlock struct { + Epoch uint64 + BlockSeqNr uint64 + StateTransitionInputsDigest StateTransitionInputsDigest + StateTransitionOutputs StateTransitionOutputs + StateRootDigest StateRootDigest + ReportsPlusPrecursor ocr3_1types.ReportsPlusPrecursor +} + +func (stb *StateTransitionBlock) SeqNr() uint64 { + return stb.BlockSeqNr +} + +func checkWriteSetSize(writeSet []KeyValuePairWithDeletions, limits ocr3_1types.ReportingPluginLimits) bool { + modifiedKeysPlusValuesLength := 0 + for _, kvPair := range writeSet { + if len(kvPair.Key) > ocr3_1types.MaxMaxKeyValueKeyLength { return false } + if kvPair.Deleted && len(kvPair.Value) > 0 { + return false + } + if len(kvPair.Value) > ocr3_1types.MaxMaxKeyValueValueLength { + return false + } + modifiedKeysPlusValuesLength += len(kvPair.Key) + len(kvPair.Value) + } + if modifiedKeysPlusValuesLength > limits.MaxKeyValueModifiedKeysPlusValuesLength { + return false + } + return true +} + +func (stb *StateTransitionBlock) CheckSize(limits ocr3_1types.ReportingPluginLimits) bool { + if !checkWriteSetSize(stb.StateTransitionOutputs.WriteSet, limits) { + return false + } + if len(stb.ReportsPlusPrecursor) > limits.MaxReportsPlusPrecursorLength { + return false } return true } type AttestedStateTransitionBlock struct { - StateTransitionBlock StateTransitionBlock - AttributedSignatures []AttributedCommitSignature + StateTransitionBlock StateTransitionBlock + AttributedCommitSignatures []AttributedCommitSignature +} + +func (astb *AttestedStateTransitionBlock) CheckSize(n int, f int, limits ocr3_1types.ReportingPluginLimits) bool { + if len(astb.AttributedCommitSignatures) != byzquorum.Size(n, f) { + return false + } + for _, acs := range astb.AttributedCommitSignatures { + if len(acs.Signature) != ed25519.SignatureSize { + return false + } + } + return astb.StateTransitionBlock.CheckSize(limits) } func (astb *AttestedStateTransitionBlock) Verify( @@ -647,8 +735,8 @@ func (astb *AttestedStateTransitionBlock) Verify( oracleIdentities []config.OracleIdentity, byzQuorumSize int, ) error { - if byzQuorumSize != len(astb.AttributedSignatures) { - return fmt.Errorf("wrong number of signatures, expected %d for byz. quorum but got %d", byzQuorumSize, len(astb.AttributedSignatures)) + if byzQuorumSize != len(astb.AttributedCommitSignatures) { + return fmt.Errorf("wrong number of signatures, expected %d for byz. quorum but got %d", byzQuorumSize, len(astb.AttributedCommitSignatures)) } ogid := OutcomeGenerationID{ @@ -658,7 +746,7 @@ func (astb *AttestedStateTransitionBlock) Verify( seqNr := astb.StateTransitionBlock.SeqNr() seen := make(map[commontypes.OracleID]bool) - for i, sig := range astb.AttributedSignatures { + for i, sig := range astb.AttributedCommitSignatures { if seen[sig.Signer] { return fmt.Errorf("duplicate signature by %v", sig.Signer) } @@ -675,6 +763,7 @@ func (astb *AttestedStateTransitionBlock) Verify( seqNr, astb.StateTransitionBlock.StateTransitionOutputs.WriteSet, ), + astb.StateTransitionBlock.StateRootDigest, MakeReportsPlusPrecursorDigest( ogid, seqNr, @@ -694,6 +783,7 @@ type CertifiedCommittedReports[RI any] struct { SeqNr uint64 StateTransitionInputsDigest StateTransitionInputsDigest StateTransitionOutputDigest StateTransitionOutputDigest + StateRootDigest StateRootDigest ReportsPlusPrecursor ocr3_1types.ReportsPlusPrecursor CommitQuorumCertificate []AttributedCommitSignature } @@ -755,6 +845,7 @@ func (ccrs *CertifiedCommittedReports[RI]) Verify( ccrs.SeqNr, ccrs.StateTransitionInputsDigest, ccrs.StateTransitionOutputDigest, + ccrs.StateRootDigest, reportsPlusPrecursorDigest, oracleIdentities[acs.Signer].OffchainPublicKey); err != nil { return fmt.Errorf("%v-th signature by %v-th oracle does not verify: %w", i, acs.Signer, err) diff --git a/offchainreporting2plus/internal/ocr3_1/protocol/state_block_synchronization.go b/offchainreporting2plus/internal/ocr3_1/protocol/state_block_synchronization.go deleted file mode 100644 index d296ffc7..00000000 --- a/offchainreporting2plus/internal/ocr3_1/protocol/state_block_synchronization.go +++ /dev/null @@ -1,367 +0,0 @@ -package protocol - -import ( - "crypto/rand" - "math" - "math/big" - "time" - - "github.com/smartcontractkit/libocr/commontypes" - "github.com/smartcontractkit/libocr/offchainreporting2plus/internal/common/scheduler" -) - -const ( - MaxBlocksSent int = 10 - - // Maximum delay between a BLOCK-SYNC-REQ and a BLOCK-SYNC response. We'll try - // with another oracle if we don't get a response in this time. - - DeltaMaxBlockSyncRequest time.Duration = 1 * time.Second - - // Minimum delay between two consecutive BLOCK-SYNC-REQ requests - DeltaMinBlockSyncRequest = 10 * time.Millisecond - - // An oracle sends a BLOCK-SYNC-SUMMARY message every DeltaBlockSyncHeartbeat - DeltaBlockSyncHeartbeat time.Duration = time.Duration(math.MaxInt64) - - MaxBlockSyncSize int = 50000000 -) - -type blockSyncState[RI any] struct { - logger commontypes.Logger - oracles []*blockSyncTargetOracle[RI] - scheduler *scheduler.Scheduler[EventToStatePersistence[RI]] -} - -func (state *statePersistenceState[RI]) numInflightRequests() int { - count := 0 - for _, oracle := range state.blockSyncState.oracles { - if oracle.inFlightRequest != nil { - count++ - } - } - return count -} - -type blockSyncTargetOracle[RI any] struct { - // lowestPersistedSeqNr is the lowest sequence number the oracle still has an attested - // state transition block for - - lowestPersistedSeqNr uint64 - lastSummaryReceivedAt time.Time - // whether it is viable to send the next block sync request to this oracle - candidate bool - // the current inflight request to this oracle, nil otherwise - inFlightRequest *inFlightRequest[RI] -} - -type inFlightRequest[RI any] struct { - message MessageBlockSyncRequest[RI] - requestedFrom commontypes.OracleID -} - -func (state *statePersistenceState[RI]) highestHeardIncreased() { - state.trySendNextRequest() -} - -func (state *statePersistenceState[RI]) clearStaleBlockSyncRequests() { - state.refreshHighestPersistedStateTransitionBlockSeqNr() - nowPersistedSeqNr := state.highestPersistedStateTransitionBlockSeqNr - for _, oracle := range state.blockSyncState.oracles { - req := oracle.inFlightRequest - if req == nil { - continue - } - thenPersistedSeqNr := req.message.HighestCommittedSeqNr - if thenPersistedSeqNr < nowPersistedSeqNr { - // this is a stale request - state.blockSyncState.logger.Debug("removing stale BlockSyncRequest", commontypes.LogFields{ - "requestedFrom": req.requestedFrom, - "thenPersistedSeqNr": thenPersistedSeqNr, - "nowPersistedSeqNr": nowPersistedSeqNr, - }) - oracle.inFlightRequest = nil - } - } -} - -func (state *statePersistenceState[RI]) trySendNextRequest() { - if !state.readyToSendBlockSyncReq { - state.blockSyncState.logger.Trace("trySendNextRequest: not marked as ready to send BlockSyncRequest, dropping", nil) - return - } - if state.numInflightRequests() != 0 { - // if numInflightRequests > 0, we are already waiting for a response which - // we'll either receive or timeout, but regardless it will carry us over - // until state.highestHeard is retrieved - state.blockSyncState.logger.Debug("we are already fetching blocks", commontypes.LogFields{ - "numInflightRequests": state.numInflightRequests(), - }) - return - } - - state.refreshHighestPersistedStateTransitionBlockSeqNr() - reqSeqNr := state.highestPersistedStateTransitionBlockSeqNr - if state.highestHeardSeqNr > reqSeqNr { - state.blockSyncState.logger.Trace("trySendNextRequest: highestHeardSeqNr > highestPersistedStateTransitionBlockSeqNr, sending BlockSyncRequest", commontypes.LogFields{ - "highestHeardSeqNr": state.highestHeardSeqNr, - "highestPersistedSeqNr": state.highestPersistedStateTransitionBlockSeqNr, - }) - state.sendBlockSyncReq(reqSeqNr) - } -} - -func (state *statePersistenceState[RI]) tryComplete() { - state.clearStaleBlockSyncRequests() - state.trySendNextRequest() -} - -func (state *statePersistenceState[RI]) processBlockSyncSummaryHeartbeat() { - defer state.blockSyncState.scheduler.ScheduleDelay(EventBlockSyncSummaryHeartbeat[RI]{}, DeltaBlockSyncHeartbeat) - lowestPersistedSeqNr := 0 - state.refreshHighestPersistedStateTransitionBlockSeqNr() - if state.highestPersistedStateTransitionBlockSeqNr >= 1 { - lowestPersistedSeqNr = 1 - } - state.netSender.Broadcast(MessageBlockSyncSummary[RI]{ - uint64(lowestPersistedSeqNr), - }) -} - -func (state *statePersistenceState[RI]) messageBlockSyncSummary(msg MessageBlockSyncSummary[RI], sender commontypes.OracleID) { - state.blockSyncState.logger.Debug("received messageBlockSyncSummary", commontypes.LogFields{ - "sender": sender, - "msgLowestPersistedSeqNr": msg.LowestPersistedSeqNr, - }) - oracle := state.blockSyncState.oracles[sender] - oracle.lowestPersistedSeqNr = msg.LowestPersistedSeqNr - oracle.lastSummaryReceivedAt = time.Now() -} - -func (state *statePersistenceState[RI]) processExpiredBlockSyncRequest(requestedFrom commontypes.OracleID, nonce uint64) { - oracle := state.blockSyncState.oracles[requestedFrom] - if oracle.inFlightRequest == nil { - return - } - if oracle.inFlightRequest.message.Nonce == nonce { - oracle.inFlightRequest = nil - oracle.candidate = false - } - state.tryComplete() -} - -func (state *statePersistenceState[RI]) sendBlockSyncReq(seqNr uint64) { - candidates := make([]commontypes.OracleID, 0, state.config.N()) - for oracleID, oracle := range state.blockSyncState.oracles { - if commontypes.OracleID(oracleID) == state.id { - continue - } - if oracle.candidate { - - candidates = append(candidates, commontypes.OracleID(oracleID)) - } - } - - if len(candidates) == 0 { - - state.blockSyncState.logger.Debug("not candidate oracles for MessageBlockSyncRequest, restarting from scratch", nil) - candidates = make([]commontypes.OracleID, 0, state.config.N()) - for oracleID, oracle := range state.blockSyncState.oracles { - oracle.candidate = true - candidates = append(candidates, commontypes.OracleID(oracleID)) - } - } - randomIndex, err := rand.Int(rand.Reader, big.NewInt(int64(len(candidates)))) - if err != nil { - state.blockSyncState.logger.Critical("unexpected error returned by rand.Int", commontypes.LogFields{ - "error": err, - }) - return - } - target := candidates[int(randomIndex.Int64())] - nonce, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 64)) - if err != nil { - state.blockSyncState.logger.Critical("unexpected error returned by rand.Int", commontypes.LogFields{ - "error": err, - }) - return - } - state.blockSyncState.logger.Debug("sending MessageBlockSyncRequest", commontypes.LogFields{ - "highestCommittedSeqNr": seqNr, - "target": target, - }) - msg := MessageBlockSyncRequest[RI]{ - nil, // TODO: consider using a sentinel value here, e.g. "EmptyRequestHandleForInboundResponse" - seqNr, - nonce.Uint64(), - } - state.netSender.SendTo(msg, target) - if !(0 <= int(target) && int(target) < len(state.blockSyncState.oracles)) { - state.blockSyncState.logger.Critical("target oracle out of bounds", commontypes.LogFields{ - "target": target, - "N": state.config.N(), - }) - return - } - state.blockSyncState.oracles[target].inFlightRequest = &inFlightRequest[RI]{msg, target} - state.blockSyncState.scheduler.ScheduleDelay(EventExpiredBlockSyncRequest[RI]{target, nonce.Uint64()}, DeltaMaxBlockSyncRequest) - state.readyToSendBlockSyncReq = false - state.blockSyncState.scheduler.ScheduleDelay(EventReadyToSendNextBlockSyncRequest[RI]{}, DeltaMinBlockSyncRequest) -} - -func (state *statePersistenceState[RI]) messageBlockSyncReq(msg MessageBlockSyncRequest[RI], sender commontypes.OracleID) { - state.blockSyncState.logger.Debug("received MessageBlockSyncRequest", commontypes.LogFields{ - "sender": sender, - "msgHighestCommittedSeqNr": msg.HighestCommittedSeqNr, - }) - loSeqNr := msg.HighestCommittedSeqNr + 1 - - state.refreshHighestPersistedStateTransitionBlockSeqNr() - - var ( - astbs []AttestedStateTransitionBlock - hiSeqNr uint64 - ) - for seqNr := loSeqNr; len(astbs) < MaxBlocksSent && seqNr <= state.highestPersistedStateTransitionBlockSeqNr; seqNr++ { - - astb, err := state.database.ReadAttestedStateTransitionBlock(state.ctx, state.config.ConfigDigest, seqNr) - if err != nil { - state.blockSyncState.logger.Error("Database.ReadAttestedStateTransitionBlock failed while producing MessageBlockSync", commontypes.LogFields{ - "seqNr": seqNr, - "error": err, - }) - break // Stopping to not produce a gap. - } - - if astb.StateTransitionBlock.SeqNr() != seqNr { - break // Stopping to not produce a gap. - } - astbs = append(astbs, astb) - hiSeqNr = seqNr - } - - if len(astbs) > 0 { - state.blockSyncState.logger.Debug("sending MessageBlockSync", commontypes.LogFields{ - "highestPersisted": state.highestPersistedStateTransitionBlockSeqNr, - "loSeqNr": loSeqNr, - "hiSeqNr": hiSeqNr, - "to": sender, - }) - state.netSender.SendTo(MessageBlockSync[RI]{ - msg.RequestHandle, - astbs, - msg.Nonce, - }, sender) - } else { - state.blockSyncState.logger.Debug("no blocks to send, not responding to MessageBlockSyncRequest", commontypes.LogFields{ - "highestPersisted": state.highestPersistedStateTransitionBlockSeqNr, - "loSeqNr": loSeqNr, - "to": sender, - }) - } -} - -func (state *statePersistenceState[RI]) messageBlockSync(msg MessageBlockSync[RI], sender commontypes.OracleID) { - state.blockSyncState.logger.Debug("received MessageBlockSync", commontypes.LogFields{ - "sender": sender, - }) - req := state.blockSyncState.oracles[sender].inFlightRequest - if req == nil { - state.blockSyncState.logger.Warn("dropping unexpected MessageBlockSync", commontypes.LogFields{ - "nonce": msg.Nonce, - "sender": sender, - }) - return - } - - if msg.Nonce != req.message.Nonce { - state.blockSyncState.logger.Warn("dropping MessageBlockSync with unexpected nonce", commontypes.LogFields{ - "expectedNonce": req.message.Nonce, - "actualNonce": msg.Nonce, - "sender": sender, - }) - return - } - - // so that any future response with the same nonce will become invalid - state.blockSyncState.oracles[sender].inFlightRequest = nil - - // at this point we know we've received a response from the correct oracle - - // 1. if any of the following logic errors out, we will immediately notice - // and start re-requesting from where we left off, even if we partially - // persist the blocks in this response - // 2. if the logic succeeds, we'll move to requesting for the next sequence - // number, until we reach highestHeardSeqNr - defer state.tryComplete() - if len(msg.AttestedStateTransitionBlocks) > MaxBlocksSent { - state.blockSyncState.logger.Warn("dropping MessageBlockSync with more blocks than the maximum allowed number", commontypes.LogFields{ - "blockNum": len(msg.AttestedStateTransitionBlocks), - "expectedBlockNum": MaxBlocksSent, - "sender": sender, - }) - return - } - for i, astb := range msg.AttestedStateTransitionBlocks { - if astb.StateTransitionBlock.SeqNr() != req.message.HighestCommittedSeqNr+uint64(i)+1 { - state.blockSyncState.logger.Warn("dropping MessageBlockSync with out of order state transition blocks", commontypes.LogFields{ - "stateTransitionBlockSeqNr": astb.StateTransitionBlock.SeqNr(), - "sender": sender, - }) - return - } - } - for _, astb := range msg.AttestedStateTransitionBlocks { - if err := astb.Verify(state.config.ConfigDigest, state.config.OracleIdentities, state.config.ByzQuorumSize()); err != nil { - state.blockSyncState.logger.Warn("dropping MessageBlockSync with invalid attestation", commontypes.LogFields{ - "stateTransitionBlockSeqNr": astb.StateTransitionBlock.SeqNr(), - "sender": sender, - "error": err, - }) - return - } - } - - for _, astb := range msg.AttestedStateTransitionBlocks { - state.refreshHighestPersistedStateTransitionBlockSeqNr() - expectedSeqNr := state.highestPersistedStateTransitionBlockSeqNr + 1 - seqNr := astb.StateTransitionBlock.SeqNr() - - state.blockSyncState.logger.Debug("retrieved state transition block", commontypes.LogFields{ - "stateTransitionBlockSeqNr": seqNr, - }) - - if seqNr > expectedSeqNr { - - state.blockSyncState.logger.Warn("dropping MessageBlockSync which creates gaps in persisted blocks", commontypes.LogFields{ - "stateTransitionBlockSeqNr": astb.StateTransitionBlock.SeqNr(), - "highestPersistedStateTransitionBlockSeqNr": state.highestPersistedStateTransitionBlockSeqNr, - "sender": sender, - }) - return - } else if seqNr < expectedSeqNr { - state.blockSyncState.logger.Debug("no need to persist this block, we have done so already", commontypes.LogFields{ - "stateTransitionBlockSeqNr": astb.StateTransitionBlock.SeqNr(), - "highestPersistedStateTransitionBlock": state.highestPersistedStateTransitionBlockSeqNr, - }) - } else { - werr := state.persist(astb) - if werr != nil { - - { - rastb, rerr := state.database.ReadAttestedStateTransitionBlock(state.ctx, state.config.ConfigDigest, seqNr) - if rerr == nil && rastb.StateTransitionBlock.SeqNr() == seqNr { - - continue - } - } - - state.blockSyncState.logger.Error("error persisting state transition block", commontypes.LogFields{ - "stateTransitionBlockSeqNr": astb.StateTransitionBlock.SeqNr(), - "error": werr, - }) - return - } - } - } -} diff --git a/offchainreporting2plus/internal/ocr3_1/protocol/state_persistence.go b/offchainreporting2plus/internal/ocr3_1/protocol/state_persistence.go deleted file mode 100644 index 72f23228..00000000 --- a/offchainreporting2plus/internal/ocr3_1/protocol/state_persistence.go +++ /dev/null @@ -1,378 +0,0 @@ -package protocol - -import ( - "context" - "fmt" - "math" - "time" - - "github.com/smartcontractkit/libocr/commontypes" - "github.com/smartcontractkit/libocr/internal/loghelper" - "github.com/smartcontractkit/libocr/offchainreporting2plus/internal/common/scheduler" - "github.com/smartcontractkit/libocr/offchainreporting2plus/internal/config/ocr3config" - "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3_1types" -) - -func RunStatePersistence[RI any]( - ctx context.Context, - - chNetToStatePersistence <-chan MessageToStatePersistenceWithSender[RI], - chOutcomeGenerationToStatePersistence <-chan EventToStatePersistence[RI], - chReportAttestationToStatePersistence <-chan EventToStatePersistence[RI], - config ocr3config.SharedConfig, - database Database, - id commontypes.OracleID, - kvStore KeyValueStore, - logger loghelper.LoggerWithContext, - netSender NetworkSender[RI], - reportingPlugin ocr3_1types.ReportingPlugin[RI], - restoredState StatePersistenceState, - restoredHighestCommittedToKVSeqNr uint64, -) { - sched := scheduler.NewScheduler[EventToStatePersistence[RI]]() - defer sched.Close() - - newStatePersistenceState(ctx, chNetToStatePersistence, - chOutcomeGenerationToStatePersistence, - chReportAttestationToStatePersistence, - config, database, id, kvStore, logger, netSender, reportingPlugin, sched).run(restoredState) -} - -const maxPersistedAttestedStateTransitionBlocks int = math.MaxInt - -type statePersistenceState[RI any] struct { - ctx context.Context - - chNetToStatePersistence <-chan MessageToStatePersistenceWithSender[RI] - chOutcomeGenerationToStatePersistence <-chan EventToStatePersistence[RI] - chReportAttestationToStatePersistence <-chan EventToStatePersistence[RI] - tTryReplay <-chan time.Time - config ocr3config.SharedConfig - database Database - id commontypes.OracleID - kvStore KeyValueStore - logger loghelper.LoggerWithContext - netSender NetworkSender[RI] - reportingPlugin ocr3_1types.ReportingPlugin[RI] - - highestPersistedStateTransitionBlockSeqNr uint64 - highestHeardSeqNr uint64 - readyToSendBlockSyncReq bool - - blockSyncState blockSyncState[RI] - treeSyncState treeSyncState -} - -func (state *statePersistenceState[RI]) run(restoredState StatePersistenceState) { - state.highestPersistedStateTransitionBlockSeqNr = restoredState.HighestPersistedStateTransitionBlockSeqNr - state.logger.Info("StatePersistence: running", commontypes.LogFields{ - "restoredHighestPersistedStateTransitionBlockSeqNr": restoredState.HighestPersistedStateTransitionBlockSeqNr, - }) - - for { - select { - case msg := <-state.chNetToStatePersistence: - msg.msg.processStatePersistence(state, msg.sender) - case ev := <-state.chOutcomeGenerationToStatePersistence: - ev.processStatePersistence(state) - case ev := <-state.chReportAttestationToStatePersistence: - ev.processStatePersistence(state) - case ev := <-state.blockSyncState.scheduler.Scheduled(): - ev.processStatePersistence(state) - case <-state.tTryReplay: - state.eventTTryReplay() - case <-state.ctx.Done(): - } - - // ensure prompt exit - select { - case <-state.ctx.Done(): - state.logger.Info("StatePersistence: exiting", nil) - // state.scheduler.Close() - return - default: - } - } -} - -func (state *statePersistenceState[RI]) eventTTryReplay() { - state.logger.Trace("TTryReplay fired", nil) - - progressed := state.tryReplay() - - _, haveNext, retry := state.nextBlockToReplay() - if progressed || haveNext || retry { - - state.tTryReplay = time.After(0) - } -} - -func (state *statePersistenceState[RI]) tryReplay() bool { - block, ok, _ := state.nextBlockToReplay() - if !ok { - return false - } - return state.replayVerifiedBlock(block) -} - -func (state *statePersistenceState[RI]) replayVerifiedBlock(stb StateTransitionBlock) (success bool) { - writeSet := stb.StateTransitionOutputs.WriteSet - - seqNr := stb.SeqNr() - logger := state.logger.MakeChild(commontypes.LogFields{ - "replay": "YES", - "seqNr": seqNr, - }) - - logger.Trace("replaying state transition block", nil) - kvReadWriteTxn, err := state.kvStore.NewReadWriteTransaction(seqNr) - if err != nil { - logger.Error("could not open new kv transaction", commontypes.LogFields{ - "err": err, - }) - return - } - defer kvReadWriteTxn.Discard() - - for _, m := range writeSet { - - var err error - if m.Deleted { - err = kvReadWriteTxn.Delete(m.Key) - } else { - err = kvReadWriteTxn.Write(m.Key, m.Value) - } - if err != nil { - logger.Error("failed to write write-set modification", commontypes.LogFields{ - "error": err, - "seqNr": seqNr, - }) - return - } - } - - werr := kvReadWriteTxn.Commit() - kvReadWriteTxn.Discard() - - if werr != nil { - kvSeqNr, rerr := state.highestCommittedToKVSeqNr() - if rerr != nil { - logger.Error("failed to commit kv transaction, and then failed to read highest committed to kv seq nr", commontypes.LogFields{ - "werror": werr, - "rerror": rerr, - "seqNr": seqNr, - }) - return - } - if kvSeqNr < seqNr { - logger.Error("failed to commit kv transaction, but not due to conflict without outcome generation", commontypes.LogFields{ - "seqNr": seqNr, - "kvSeqNr": kvSeqNr, - "error": werr, - }) - - return - } else { - - return - } - } - success = true - return -} - -func (state *statePersistenceState[RI]) eventStateSyncRequest(ev EventStateSyncRequest[RI]) { - state.logger.Debug("received EventStateSyncRequest", commontypes.LogFields{ - "heardSeqNr": ev.SeqNr, - }) - state.tTryReplay = time.After(0) - state.heardSeqNr(ev.SeqNr) -} - -func (state *statePersistenceState[RI]) heardSeqNr(seqNr uint64) { - if seqNr > state.highestHeardSeqNr { - state.logger.Debug("highest heard sequence number increased", commontypes.LogFields{ - "old": state.highestHeardSeqNr, - "new": seqNr, - }) - state.highestHeardSeqNr = seqNr - state.highestHeardIncreased() - } -} - -func (state *statePersistenceState[RI]) refreshHighestPersistedStateTransitionBlockSeqNr() { - highestCommittedToKVSeqNr, err := state.highestCommittedToKVSeqNr() - if err != nil { - state.logger.Error("failed to get highest committed to kv seq nr during refresh", commontypes.LogFields{ - "error": err, - }) - return - } - if highestCommittedToKVSeqNr > state.highestPersistedStateTransitionBlockSeqNr { - - state.highestPersistedStateTransitionBlockSeqNr = highestCommittedToKVSeqNr - } -} - -func (state *statePersistenceState[RI]) persist(verifiedAstb AttestedStateTransitionBlock) error { - state.refreshHighestPersistedStateTransitionBlockSeqNr() - expectedSeqNr := state.highestPersistedStateTransitionBlockSeqNr + 1 - seqNr := verifiedAstb.StateTransitionBlock.SeqNr() - - if seqNr != expectedSeqNr { - - return fmt.Errorf("cannot persist out of order state transition block: expected %d, got %d", - expectedSeqNr, - seqNr, - ) - } - - err := state.database.WriteAttestedStateTransitionBlock( - state.ctx, - state.config.ConfigDigest, - seqNr, - verifiedAstb, - ) - if err != nil { - return fmt.Errorf("failed to write attested state transition block %d: %w", seqNr, err) - } - - state.highestPersistedStateTransitionBlockSeqNr = seqNr - state.logger.Trace("persisted block", commontypes.LogFields{ - "seqNr": seqNr, - }) - state.tTryReplay = time.After(0) - - err = state.database.WriteStatePersistenceState( - state.ctx, state.config.ConfigDigest, - StatePersistenceState{ - seqNr, - }, - ) - if err != nil { - return fmt.Errorf("failed to write state persistence state %d: %w", seqNr, err) - } - return nil -} - -func (state *statePersistenceState[RI]) highestCommittedToKVSeqNr() (uint64, error) { - return state.kvStore.HighestCommittedSeqNr() -} - -func (state *statePersistenceState[RI]) nextBlockToReplay() (block StateTransitionBlock, found bool, retry bool) { - committedToKVSeqNr, err := state.highestCommittedToKVSeqNr() - if err != nil { - state.logger.Error("failed to get highest committed to kv seq nr", commontypes.LogFields{ - "error": err, - }) - retry = true - return - } - nextSeqNr := committedToKVSeqNr + 1 - - astb, err := state.database.ReadAttestedStateTransitionBlock(state.ctx, state.config.ConfigDigest, nextSeqNr) - if err != nil { - state.logger.Error("failed to read attested state transition block from database", commontypes.LogFields{ - "nextSeqNr": nextSeqNr, - "error": err, - }) - retry = true - return - } - seqNr := astb.StateTransitionBlock.SeqNr() - if seqNr == 0 { - // block not found - state.logger.Trace("wanted next block to replay not found", commontypes.LogFields{ - "nextSeqNr": nextSeqNr, - }) - - return - } else if seqNr == nextSeqNr { - state.logger.Debug("next state transition block to replay", commontypes.LogFields{ - "nextSeqNr": nextSeqNr, - }) - - block = astb.StateTransitionBlock - found = true - return - } else { - state.logger.Critical("assumption violation, block in database has inconsistent seq nr", commontypes.LogFields{ - "expectedSeqNr": nextSeqNr, - "actualSeqNr": seqNr, - "block": astb, - }) - panic("") - } -} - -func (state *statePersistenceState[RI]) eventEventBlockSyncSummaryHeartbeat(ev EventBlockSyncSummaryHeartbeat[RI]) { - state.processBlockSyncSummaryHeartbeat() -} - -func (state *statePersistenceState[RI]) eventExpiredBlockSyncRequest(ev EventExpiredBlockSyncRequest[RI]) { - state.blockSyncState.logger.Debug("received eventExpiredBlockSyncRequest", commontypes.LogFields{ - "requestedFrom": ev.RequestedFrom, - }) - state.processExpiredBlockSyncRequest(ev.RequestedFrom, ev.Nonce) -} - -func (state *statePersistenceState[RI]) eventReadyToSendNextBlockSyncRequest(ev EventReadyToSendNextBlockSyncRequest[RI]) { - state.logger.Debug("received eventReadyToSendNextBlockSyncRequest", commontypes.LogFields{}) - state.readyToSendBlockSyncReq = true - state.trySendNextRequest() -} - -func newStatePersistenceState[RI any]( - ctx context.Context, - chNetToStatePersistence <-chan MessageToStatePersistenceWithSender[RI], - chOutcomeGenerationToStatePersistence <-chan EventToStatePersistence[RI], - chReportAttestationToStatePersistence <-chan EventToStatePersistence[RI], - config ocr3config.SharedConfig, - database Database, - id commontypes.OracleID, - kvStore KeyValueStore, - logger loghelper.LoggerWithContext, - netSender NetworkSender[RI], - reportingPlugin ocr3_1types.ReportingPlugin[RI], - scheduler *scheduler.Scheduler[EventToStatePersistence[RI]], -) *statePersistenceState[RI] { - oracles := make([]*blockSyncTargetOracle[RI], 0) - for i := 0; i < config.N(); i++ { - oracles = append(oracles, &blockSyncTargetOracle[RI]{ - 0, - time.Time{}, - true, - nil, - }) - } - - scheduler.ScheduleDelay(EventBlockSyncSummaryHeartbeat[RI]{}, DeltaBlockSyncHeartbeat) - - tTryReplay := time.After(0) - - return &statePersistenceState[RI]{ - ctx, - - chNetToStatePersistence, - chOutcomeGenerationToStatePersistence, - chReportAttestationToStatePersistence, - tTryReplay, - config, - database, - id, - kvStore, - logger.MakeUpdated(commontypes.LogFields{"proto": "state"}), - netSender, - reportingPlugin, - 0, - 0, - true, - - blockSyncState[RI]{ - logger.MakeUpdated(commontypes.LogFields{"proto": "stateBlockSync"}), - oracles, - scheduler, - }, - treeSyncState{}, - } -} diff --git a/offchainreporting2plus/internal/ocr3_1/protocol/state_sync.go b/offchainreporting2plus/internal/ocr3_1/protocol/state_sync.go new file mode 100644 index 00000000..ad99e471 --- /dev/null +++ b/offchainreporting2plus/internal/ocr3_1/protocol/state_sync.go @@ -0,0 +1,521 @@ +package protocol + +import ( + "context" + "math" + "slices" + "time" + + "github.com/google/btree" + "github.com/smartcontractkit/libocr/commontypes" + "github.com/smartcontractkit/libocr/internal/loghelper" + "github.com/smartcontractkit/libocr/offchainreporting2plus/internal/config/ocr3config" + "github.com/smartcontractkit/libocr/offchainreporting2plus/internal/ocr3_1/protocol/requestergadget" + "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3_1types" + "github.com/smartcontractkit/libocr/subprocesses" +) + +const ( + // An oracle sends a STATE-SYNC-SUMMARY message every DeltaStateSyncHeartbeat + DeltaStateSyncHeartbeat time.Duration = 1 * time.Second +) + +func RunStateSync[RI any]( + ctx context.Context, + + chNetToStateSync <-chan MessageToStateSyncWithSender[RI], + chOutcomeGenerationToStateSync <-chan EventToStateSync[RI], + chReportAttestationToStateSync <-chan EventToStateSync[RI], + config ocr3config.SharedConfig, + database Database, + id commontypes.OracleID, + kvDb KeyValueDatabase, + logger loghelper.LoggerWithContext, + netSender NetworkSender[RI], + reportingPlugin ocr3_1types.ReportingPlugin[RI], +) { + chNotificationToStateBlockReplay := make(chan struct{}) + chNotificationToStateDestroyIfNeeded := make(chan struct{}) + + subs := subprocesses.Subprocesses{} + defer subs.Wait() + subs.Go(func() { + RunStateSyncDestroyIfNeeded(ctx, logger, kvDb, chNotificationToStateDestroyIfNeeded) + }) + subs.Go(func() { + RunStateSyncReap(ctx, config, logger, database, kvDb) + }) + subs.Go(func() { + RunStateSyncBlockReplay(ctx, logger, kvDb, chNotificationToStateBlockReplay) + }) + + newStateSyncState(ctx, + chNetToStateSync, + chNotificationToStateBlockReplay, + chNotificationToStateDestroyIfNeeded, + chOutcomeGenerationToStateSync, + chReportAttestationToStateSync, + config, database, id, kvDb, logger, netSender).run() +} + +type syncMode int + +const ( + syncModeUnknown syncMode = iota + syncModeBlock + syncModeTree + syncModeFetchSnapshotBlock +) + +type stateSyncState[RI any] struct { + ctx context.Context + + chNetToStateSync <-chan MessageToStateSyncWithSender[RI] + chNotificationToStateBlockReplay chan<- struct{} + chNotificationToStateDestroyIfNeeded chan<- struct{} + chOutcomeGenerationToStateSync <-chan EventToStateSync[RI] + chReportAttestationToStateSync <-chan EventToStateSync[RI] + config ocr3config.SharedConfig + database Database + id commontypes.OracleID + kvDb KeyValueDatabase + logger loghelper.LoggerWithContext + netSender NetworkSender[RI] + + highestPersistedStateTransitionBlockSeqNr uint64 + lowestPersistedStateTransitionBlockSeqNr uint64 + highestCommittedSeqNr uint64 + + oracles []*syncOracle + + highestHeardSeqNr uint64 + + blockSyncState blockSyncState[RI] + treeSyncState treeSyncState[RI] + + syncMode syncMode + + tSendSummary <-chan time.Time +} + +type syncOracle struct { + // lowestPersistedSeqNr is the lowest sequence number the oracle still has an attested + // state transition block for + lowestPersistedSeqNr uint64 + // highestCommittedSeqNr is the highest sequence number the oracle has committed to + highestCommittedSeqNr uint64 + lastSummaryReceivedAt time.Time +} + +func (stasy *stateSyncState[RI]) run() { + stasy.refreshStateSyncState() + stasy.logger.Info("StateSync: running", commontypes.LogFields{ + "highestPersistedStateTransitionBlockSeqNr": stasy.highestPersistedStateTransitionBlockSeqNr, + }) + + for { + select { + case msg := <-stasy.chNetToStateSync: + msg.msg.processStateSync(stasy, msg.sender) + case ev := <-stasy.chOutcomeGenerationToStateSync: + ev.processStateSync(stasy) + case ev := <-stasy.chReportAttestationToStateSync: + ev.processStateSync(stasy) + case <-stasy.tSendSummary: + stasy.eventTSendSummaryTimeout() + case <-stasy.blockSyncState.blockRequesterGadget.Ticker(): + stasy.blockSyncState.blockRequesterGadget.Tick() + case <-stasy.treeSyncState.treeChunkRequesterGadget.Ticker(): + stasy.treeSyncState.treeChunkRequesterGadget.Tick() + case <-stasy.ctx.Done(): + } + + // ensure prompt exit + select { + case <-stasy.ctx.Done(): + stasy.logger.Info("StateSync: exiting", nil) + return + default: + } + } +} + +func (stasy *stateSyncState[RI]) pleaseTryToReplayBlock() { + select { + case stasy.chNotificationToStateBlockReplay <- struct{}{}: + default: + } +} + +func (stasy *stateSyncState[RI]) pleaseDestroyStateIfNeeded() { + select { + case stasy.chNotificationToStateDestroyIfNeeded <- struct{}{}: + default: + } +} + +func (stasy *stateSyncState[RI]) eventStateSyncRequest(ev EventStateSyncRequest[RI]) { + stasy.logger.Debug("received EventStateSyncRequest", commontypes.LogFields{ + "heardSeqNr": ev.SeqNr, + }) + + stasy.pleaseTryToReplayBlock() + + if ev.SeqNr <= stasy.highestHeardSeqNr { + return + } + + stasy.logger.Debug("highest heard sequence number increased from EventStateSyncRequest", commontypes.LogFields{ + "old": stasy.highestHeardSeqNr, + "new": ev.SeqNr, + }) + stasy.highestHeardSeqNr = ev.SeqNr + stasy.tryToKickStartSync() +} + +func (stasy *stateSyncState[RI]) refreshStateSyncState() (ok bool) { + kvReadTxn, err := stasy.kvDb.NewReadTransactionUnchecked() + if err != nil { + stasy.logger.Warn("failed to create new transaction", commontypes.LogFields{ + "error": err, + }) + return + } + defer kvReadTxn.Discard() + + highestCommittedToKVSeqNr, err := kvReadTxn.ReadHighestCommittedSeqNr() + if err != nil { + stasy.logger.Error("failed to get highest committed to kv seq nr during refresh", commontypes.LogFields{ + "error": err, + }) + return + } + + stasy.highestCommittedSeqNr = highestCommittedToKVSeqNr + + if highestCommittedToKVSeqNr > stasy.highestPersistedStateTransitionBlockSeqNr { + + stasy.highestPersistedStateTransitionBlockSeqNr = highestCommittedToKVSeqNr + stasy.reapBlockBuffer() + } + + lowestPersistedSeqNr, err := kvReadTxn.ReadLowestPersistedSeqNr() + if err != nil { + stasy.logger.Warn("failed to read lowest persisted seq nr", commontypes.LogFields{ + "error": err, + }) + return + } + stasy.lowestPersistedStateTransitionBlockSeqNr = lowestPersistedSeqNr + + treeSyncStatus, err := kvReadTxn.ReadTreeSyncStatus() + if err != nil { + stasy.logger.Warn("failed to read tree sync status", commontypes.LogFields{ + "error": err, + }) + return + } + stasy.treeSyncState.treeSyncPhase = treeSyncStatus.Phase + stasy.treeSyncState.targetSeqNr = treeSyncStatus.TargetSeqNr + stasy.treeSyncState.targetStateRootDigest = treeSyncStatus.TargetStateRootDigest + stasy.treeSyncState.pendingKeyDigestRanges = treeSyncStatus.PendingKeyDigestRanges + ok = true + return +} + +func (stasy *stateSyncState[RI]) eventTSendSummaryTimeout() { + defer func() { + stasy.tSendSummary = time.After(DeltaStateSyncHeartbeat) + }() + if !stasy.refreshStateSyncState() { + return + } + if stasy.treeSyncState.treeSyncPhase != TreeSyncPhaseInactive { + + stasy.netSender.Broadcast(MessageStateSyncSummary[RI]{ + math.MaxUint64, + 0, + }) + return + } + stasy.netSender.Broadcast(MessageStateSyncSummary[RI]{ + stasy.lowestPersistedStateTransitionBlockSeqNr, + stasy.highestCommittedSeqNr, + }) +} + +func (stasy *stateSyncState[RI]) messageStateSyncSummary(msg MessageStateSyncSummary[RI], sender commontypes.OracleID) { + stasy.logger.Debug("received messageStateSyncSummary", commontypes.LogFields{ + "sender": sender, + "msgLowestPersistedSeqNr": msg.LowestPersistedSeqNr, + "msgHighestCommittedSeqNr": msg.HighestCommittedSeqNr, + }) + stasy.oracles[sender] = &syncOracle{ + msg.LowestPersistedSeqNr, + msg.HighestCommittedSeqNr, + time.Now(), + } + stasy.updateHighestHeardFromSummaries() + + stasy.tryToKickStartSync() +} + +func (stasy *stateSyncState[RI]) summaryFreshnessCutoff() time.Duration { + return stasy.config.DeltaProgress / 4 +} + +type honestOraclePruneStatus int + +const ( + _ honestOraclePruneStatus = iota + honestOraclePruneStatusCannotDecideYet + honestOraclePruneStatusWouldNotPrune + honestOraclePruneStatusWouldPrune +) + +// for a given sequence number, is there guaranteed to be at least one honest oracle that could help us +// sync the committed state at `seqNr`? +// - honestOraclePruneStatusWouldNotPrune: yes +// - honestOraclePruneStatusWouldNotPrune: no +// - honestOraclePruneStatusCannotDecideYet: we don't have enough information to answer the question +func (stasy *stateSyncState[RI]) findSomeHonestOraclePruneStatus(seqNr uint64) honestOraclePruneStatus { + wouldNotPrune := 0 + wouldPrune := 0 + + for i, oracle := range stasy.oracles { + if time.Since(oracle.lastSummaryReceivedAt) > stasy.summaryFreshnessCutoff() { + + continue + } + + if commontypes.OracleID(i) == stasy.id { + + continue + } + + if oracle.lowestPersistedSeqNr <= seqNr { + wouldNotPrune++ + } else { + wouldPrune++ + } + } + + if wouldNotPrune > stasy.config.F { + return honestOraclePruneStatusWouldNotPrune + } else if wouldPrune > stasy.config.F { + return honestOraclePruneStatusWouldPrune + } else { + return honestOraclePruneStatusCannotDecideYet + } +} + +func (stasy *stateSyncState[RI]) updateHighestHeardFromSummaries() { + hiSeqNrs := make([]uint64, len(stasy.oracles)) + for i, oracle := range stasy.oracles { + + hiSeqNrs[i] = oracle.highestCommittedSeqNr + } + + slices.Sort(hiSeqNrs) + candidateHighestHeard := hiSeqNrs[len(hiSeqNrs)-1-stasy.config.F] + if candidateHighestHeard <= stasy.highestHeardSeqNr { + return + } + + stasy.logger.Debug("highest heard sequence number increased from MessageStateSyncSummary", commontypes.LogFields{ + "old": stasy.highestHeardSeqNr, + "new": candidateHighestHeard, + }) + stasy.highestHeardSeqNr = candidateHighestHeard +} + +type blockSyncOrTreeSyncDecision int + +const ( + _ blockSyncOrTreeSyncDecision = iota + blockSyncOrTreeSyncDecisionCannotDecideYet + blockSyncOrTreeSyncDecisionBlockSync + blockSyncOrTreeSyncDecisionTreeSync +) + +func (stasy *stateSyncState[RI]) decideBlockSyncOrTreeSyncBasedOnSummariesAndHighestHeard() blockSyncOrTreeSyncDecision { + + ourStartingPointSeqNr := max(stasy.highestCommittedSeqNr, stasy.highestPersistedStateTransitionBlockSeqNr) + 1 + switch stasy.findSomeHonestOraclePruneStatus(ourStartingPointSeqNr) { + case honestOraclePruneStatusWouldNotPrune: + return blockSyncOrTreeSyncDecisionBlockSync + case honestOraclePruneStatusWouldPrune: + return blockSyncOrTreeSyncDecisionTreeSync + case honestOraclePruneStatusCannotDecideYet: + } + return blockSyncOrTreeSyncDecisionCannotDecideYet +} + +func (stasy *stateSyncState[RI]) pickSomeTreeSyncTarget() (uint64, bool) { + if snapshotSeqNr(stasy.highestHeardSeqNr) == stasy.highestHeardSeqNr { + return stasy.highestHeardSeqNr, true + } else { + snapshotIndex := snapshotIndexFromSeqNr(stasy.highestHeardSeqNr) + if snapshotIndex > 0 { + return maxSeqNrWithSnapshotIndex(snapshotIndex - 1), true + } else { + return 0, false + } + } +} + +func (stasy *stateSyncState[RI]) needToRetargetTreeSync() bool { + switch stasy.findSomeHonestOraclePruneStatus(stasy.treeSyncState.targetSeqNr) { + case honestOraclePruneStatusWouldNotPrune: + return false + case honestOraclePruneStatusWouldPrune: + + return true + case honestOraclePruneStatusCannotDecideYet: + return false + } + return false +} + +func (stasy *stateSyncState[RI]) treeSyncCompleted() { + stasy.syncMode = syncModeUnknown + stasy.tryToKickStartSync() +} + +func (stasy *stateSyncState[RI]) treeSyncNeedsSnapshotBlock() { + stasy.syncMode = syncModeFetchSnapshotBlock + stasy.blockSyncState.blockRequesterGadget.PleaseRecheckPendingItems() +} + +func (stasy *stateSyncState[RI]) tryToKickStartSync() { + + switch stasy.syncMode { + case syncModeTree: + + stasy.evolveTreeSyncPhase() + return + case syncModeFetchSnapshotBlock: + + if stasy.needToRetargetTreeSync() { + stasy.logger.Warn("not guaranteed to be able to fetch the tree-sync target block, giving up", commontypes.LogFields{ + "targetSeqNr": stasy.treeSyncState.targetSeqNr, + }) + stasy.syncMode = syncModeUnknown + } else { + return + } + case syncModeBlock: + stasy.tryCompleteBlockSync() + stasy.blockSyncState.blockRequesterGadget.PleaseRecheckPendingItems() + + case syncModeUnknown: + + } + + if !stasy.refreshStateSyncState() { + stasy.logger.Warn("cannot kick start sync, failed to refresh stateSyncState", nil) + return + } + + decision := stasy.decideBlockSyncOrTreeSyncBasedOnSummariesAndHighestHeard() + + if decision == blockSyncOrTreeSyncDecisionCannotDecideYet { + stasy.logger.Debug("cannot decide whether to block-sync or tree-sync, yet", nil) + return + } + + if stasy.treeSyncState.treeSyncPhase != TreeSyncPhaseInactive || decision == blockSyncOrTreeSyncDecisionTreeSync { + stasy.logger.Debug("switching to tree-sync mode", nil) + stasy.syncMode = syncModeTree + stasy.evolveTreeSyncPhase() + return + } + + if decision == blockSyncOrTreeSyncDecisionBlockSync { + stasy.logger.Debug("switching to block-sync mode", nil) + stasy.syncMode = syncModeBlock + // requester gadget will take it from here + stasy.blockSyncState.blockRequesterGadget.PleaseRecheckPendingItems() + return + } +} + +func newStateSyncState[RI any]( + ctx context.Context, + chNetToStateSync <-chan MessageToStateSyncWithSender[RI], + chNotificationToStateBlockReplay chan<- struct{}, + chNotificationToStateDestroyIfNeeded chan<- struct{}, + chOutcomeGenerationToStateSync <-chan EventToStateSync[RI], + chReportAttestationToStateSync <-chan EventToStateSync[RI], + config ocr3config.SharedConfig, + database Database, + id commontypes.OracleID, + kvDb KeyValueDatabase, + logger loghelper.LoggerWithContext, + netSender NetworkSender[RI], +) *stateSyncState[RI] { + oracles := make([]*syncOracle, 0) + for i := 0; i < config.N(); i++ { + oracles = append(oracles, &syncOracle{ + 0, + 0, + time.Time{}, + }) + } + + stasy := &stateSyncState[RI]{ + ctx, + + chNetToStateSync, + chNotificationToStateBlockReplay, + chNotificationToStateDestroyIfNeeded, + chOutcomeGenerationToStateSync, + chReportAttestationToStateSync, + config, + database, + id, + kvDb, + logger.MakeUpdated(commontypes.LogFields{"proto": "stasy"}), + netSender, + 0, + 0, + 0, + + oracles, + 0, + + blockSyncState[RI]{ + logger.MakeUpdated(commontypes.LogFields{"proto": "stasy/block"}), + nil, // defined right below + btree.NewG(2, func(a AttestedStateTransitionBlock, b AttestedStateTransitionBlock) bool { + return a.StateTransitionBlock.SeqNr() < b.StateTransitionBlock.SeqNr() + }), + }, + treeSyncState[RI]{ + logger.MakeUpdated(commontypes.LogFields{"proto": "stasy/tree"}), + nil, // defined right below + TreeSyncPhaseInactive, + 0, + StateRootDigest{}, + PendingKeyDigestRanges{}, + }, + syncModeUnknown, + time.After(DeltaStateSyncHeartbeat), + } + + stasy.blockSyncState.blockRequesterGadget = requestergadget.NewRequesterGadget[seqNrRange]( + config.N(), + DeltaMinBlockSyncRequest, + stasy.sendBlockSyncRequest, + stasy.getPendingBlocksToRequest, + stasy.getBlockSyncSeeders, + ) + stasy.treeSyncState.treeChunkRequesterGadget = requestergadget.NewRequesterGadget[treeSyncChunkRequestItem]( + config.N(), + DeltaMinTreeSyncRequest, + stasy.sendTreeSyncChunkRequest, + stasy.getPendingTreeSyncChunksToRequest, + stasy.getTreeSyncChunkSeeders, + ) + return stasy +} diff --git a/offchainreporting2plus/internal/ocr3_1/protocol/state_sync_block.go b/offchainreporting2plus/internal/ocr3_1/protocol/state_sync_block.go new file mode 100644 index 00000000..8dca72fa --- /dev/null +++ b/offchainreporting2plus/internal/ocr3_1/protocol/state_sync_block.go @@ -0,0 +1,385 @@ +package protocol + +import ( + "time" + + "github.com/google/btree" + "github.com/smartcontractkit/libocr/commontypes" + "github.com/smartcontractkit/libocr/offchainreporting2plus/internal/ocr3_1/protocol/requestergadget" +) + +const ( + MaxBlocksPerBlockSyncResponse int = 10 + + // Minimum delay between two consecutive BLOCK-SYNC-REQ requests + DeltaMinBlockSyncRequest = 10 * time.Millisecond + // Maximum delay between a BLOCK-SYNC-REQ and a BLOCK-SYNC response. We'll try + // with another oracle if we don't get a response in this time. + DeltaMaxBlockSyncRequest time.Duration = 1 * time.Second + + // We are looking to pipeline fetches of a range of at most + // BlockSyncLookahead blocks at any given time + BlockSyncLookahead = 10_000 +) + +// Half-open range, i.e. [StartSeqNr, EndExclSeqNr) +type seqNrRange struct { + StartSeqNr uint64 + EndExclSeqNr uint64 +} + +type blockSyncState[RI any] struct { + logger commontypes.Logger + blockRequesterGadget *requestergadget.RequesterGadget[seqNrRange] + + sortedBlockBuffer *btree.BTreeG[AttestedStateTransitionBlock] +} + +func (stasy *stateSyncState[RI]) bufferBlock(block AttestedStateTransitionBlock) { + stasy.blockSyncState.sortedBlockBuffer.ReplaceOrInsert(block) +} + +func (stasy *stateSyncState[RI]) reapBlockBuffer() { + for { + minBlock, ok := stasy.blockSyncState.sortedBlockBuffer.Min() + if !ok { + return + } + + minBlockSeqNr := minBlock.StateTransitionBlock.SeqNr() + + if minBlockSeqNr <= stasy.highestPersistedStateTransitionBlockSeqNr { + stasy.blockSyncState.sortedBlockBuffer.DeleteMin() + } else { + break + } + } +} + +func (stasy *stateSyncState[RI]) getPendingBlocksToRequest() []seqNrRange { + switch stasy.syncMode { + case syncModeTree: + return nil + case syncModeUnknown: + return nil + case syncModeFetchSnapshotBlock: + if stasy.treeSyncState.targetSeqNr == 0 { + stasy.blockSyncState.logger.Critical("assumption violation: tree-sync target sequence number is 0", nil) + return nil + } + return []seqNrRange{{stasy.treeSyncState.targetSeqNr, stasy.treeSyncState.targetSeqNr + 1}} + case syncModeBlock: + } + + if stasy.highestHeardSeqNr <= stasy.highestPersistedStateTransitionBlockSeqNr { + // We are already synced. + return nil + } + + var pending []seqNrRange + + stasy.reapBlockBuffer() + + lastSeqNr := stasy.highestPersistedStateTransitionBlockSeqNr + stasy.blockSyncState.sortedBlockBuffer.Ascend(func(astb AttestedStateTransitionBlock) bool { + seqNr := astb.StateTransitionBlock.SeqNr() + if lastSeqNr+1 < seqNr { + // [lastSeqNr+1..seqNr) (exclusive) is a gap to fill + + for rangeStartSeqNr := lastSeqNr + 1; rangeStartSeqNr < seqNr; rangeStartSeqNr += uint64(MaxBlocksPerBlockSyncResponse) { + rangeEndExclSeqNr := rangeStartSeqNr + uint64(MaxBlocksPerBlockSyncResponse) + if rangeEndExclSeqNr > seqNr { + rangeEndExclSeqNr = seqNr + } + pending = append(pending, seqNrRange{rangeStartSeqNr, rangeEndExclSeqNr}) + } + } + lastSeqNr = seqNr + return true + }) + + for rangeStartSeqNr := lastSeqNr + 1; rangeStartSeqNr <= stasy.highestPersistedStateTransitionBlockSeqNr+BlockSyncLookahead && rangeStartSeqNr <= stasy.highestHeardSeqNr; rangeStartSeqNr += uint64(MaxBlocksPerBlockSyncResponse) { + rangeEndExclSeqNr := rangeStartSeqNr + uint64(MaxBlocksPerBlockSyncResponse) + if rangeEndExclSeqNr > stasy.highestPersistedStateTransitionBlockSeqNr+BlockSyncLookahead { + rangeEndExclSeqNr = stasy.highestPersistedStateTransitionBlockSeqNr + BlockSyncLookahead + } + // no check for rangeEndExclSeqNr > stasy.highestHeardSeqNr, because there is no harm in asking for more than exists + pending = append(pending, seqNrRange{rangeStartSeqNr, rangeEndExclSeqNr}) + } + + return pending +} + +func (stasy *stateSyncState[RI]) getBlockSyncSeeders(_ seqNrRange) map[commontypes.OracleID]struct{} { + seeders := make(map[commontypes.OracleID]struct{}) + for oid := range stasy.oracles { + + if commontypes.OracleID(oid) == stasy.id { + continue + } + seeders[commontypes.OracleID(oid)] = struct{}{} + } + return seeders +} + +func (stasy *stateSyncState[RI]) sendBlockSyncRequest(seqNrRange seqNrRange, target commontypes.OracleID) (*requestergadget.RequestInfo, bool) { + stasy.blockSyncState.logger.Debug("sending MessageBlockSyncRequest", commontypes.LogFields{ + "seqNrRange": seqNrRange, + "target": target, + }) + msg := MessageBlockSyncRequest[RI]{ + nil, // TODO: consider using a sentinel value here, e.g. "EmptyRequestHandleForInboundResponse" + seqNrRange.StartSeqNr, + seqNrRange.EndExclSeqNr, + } + stasy.netSender.SendTo(msg, target) + return &requestergadget.RequestInfo{ + time.Now().Add(DeltaMaxBlockSyncRequest), + }, true +} + +func (stasy *stateSyncState[RI]) messageBlockSyncRequest(msg MessageBlockSyncRequest[RI], sender commontypes.OracleID) { + if !(msg.StartSeqNr < msg.EndExclSeqNr) { + stasy.blockSyncState.logger.Warn("dropping MessageBlockSyncRequest with invalid loSeqNr and hiSeqNr", commontypes.LogFields{ + "sender": sender, + "requestStartSeqNr": msg.StartSeqNr, + "requestEndExclSeqNr": msg.EndExclSeqNr, + }) + return + } + + stasy.blockSyncState.logger.Debug("received MessageBlockSyncRequest", commontypes.LogFields{ + "sender": sender, + "requestStartSeqNr": msg.StartSeqNr, + "requestEndExclSeqNr": msg.EndExclSeqNr, + }) + + var maxBlocksInResponse int + { + maxBlocksInResponseU64 := msg.EndExclSeqNr - msg.StartSeqNr + if maxBlocksInResponseU64 > uint64(MaxBlocksPerBlockSyncResponse) { + maxBlocksInResponseU64 = uint64(MaxBlocksPerBlockSyncResponse) + } + // now we are sure that maxBlocksInResponseU64 will fit an int + maxBlocksInResponse = int(maxBlocksInResponseU64) + } + + tx, err := stasy.kvDb.NewReadTransactionUnchecked() + if err != nil { + stasy.blockSyncState.logger.Error("failed to create read transaction", commontypes.LogFields{ + "error": err, + }) + return + } + defer tx.Discard() + + astbs, _, err := tx.ReadAttestedStateTransitionBlocks(msg.StartSeqNr, maxBlocksInResponse) + if err != nil { + stasy.blockSyncState.logger.Error("failed to read attested state transition blocks", commontypes.LogFields{ + "error": err, + }) + return + } + + for i, astb := range astbs { + seqNr := astb.StateTransitionBlock.SeqNr() + var expectedSeqNr uint64 + if i == 0 { + expectedSeqNr = msg.StartSeqNr + } else { + expectedSeqNr = astbs[i-1].StateTransitionBlock.SeqNr() + 1 + } + if seqNr != expectedSeqNr { + astbs = nil + break // do not produce gap + } + } + + if len(astbs) > 0 { + stasy.blockSyncState.logger.Debug("sending MessageBlockSyncResponse", commontypes.LogFields{ + "highestPersisted": stasy.highestPersistedStateTransitionBlockSeqNr, + "lowestPersisted": stasy.lowestPersistedStateTransitionBlockSeqNr, + "requestStartSeqNr": msg.StartSeqNr, + "requestEndExclSeqNr": msg.EndExclSeqNr, + "responseStartSeqNr": astbs[0].StateTransitionBlock.SeqNr(), + "responseEndExclSeqNr": astbs[len(astbs)-1].StateTransitionBlock.SeqNr() + 1, + "to": sender, + }) + stasy.netSender.SendTo(MessageBlockSyncResponse[RI]{ + msg.RequestHandle, + msg.StartSeqNr, + msg.EndExclSeqNr, + astbs, + }, sender) + } else { + stasy.blockSyncState.logger.Debug("no blocks to send, sending an empty MessageBlockSyncResponse to indicate go-away", commontypes.LogFields{ + "highestPersisted": stasy.highestPersistedStateTransitionBlockSeqNr, + "lowestPersisted": stasy.lowestPersistedStateTransitionBlockSeqNr, + "requestStartSeqNr": msg.StartSeqNr, + "requestEndExclSeqNr": msg.EndExclSeqNr, + "to": sender, + }) + stasy.netSender.SendTo(MessageBlockSyncResponse[RI]{ + msg.RequestHandle, + msg.StartSeqNr, + msg.EndExclSeqNr, + astbs, + }, sender) + } +} + +func (stasy *stateSyncState[RI]) messageBlockSyncResponse(msg MessageBlockSyncResponse[RI], sender commontypes.OracleID) { + requestSeqNrRange := seqNrRange{msg.RequestStartSeqNr, msg.RequestEndExclSeqNr} + + if !stasy.blockSyncState.blockRequesterGadget.CheckAndMarkResponse(requestSeqNrRange, sender) { + stasy.blockSyncState.logger.Warn("dropping MessageBlockSyncResponse, not allowed", commontypes.LogFields{ + "sender": sender, + "requestSeqNrRange": requestSeqNrRange, + }) + return + } + + if len(msg.AttestedStateTransitionBlocks) == 0 { + stasy.blockSyncState.logger.Debug("dropping MessageBlockSyncResponse, go-away", commontypes.LogFields{ + "sender": sender, + "requestSeqNrRange": requestSeqNrRange, + }) + stasy.blockSyncState.blockRequesterGadget.MarkGoAwayResponse(requestSeqNrRange, sender) + return + } + + stasy.blockSyncState.logger.Debug("received MessageBlockSyncResponse", commontypes.LogFields{ + "sender": sender, + }) + + switch stasy.syncMode { + case syncModeFetchSnapshotBlock: + break + case syncModeBlock: + break + case syncModeTree: + return + case syncModeUnknown: + return + } + + for i, astb := range msg.AttestedStateTransitionBlocks { + if astb.StateTransitionBlock.SeqNr() != msg.RequestStartSeqNr+uint64(i) { + stasy.blockSyncState.logger.Warn("dropping MessageBlockSyncResponse with out of order state transition blocks", commontypes.LogFields{ + "sender": sender, + "requestStartSeqNr": msg.RequestStartSeqNr, + "requestEndExclSeqNr": msg.RequestEndExclSeqNr, + "stateTransitionBlockSeqNr": astb.StateTransitionBlock.SeqNr(), + }) + stasy.blockSyncState.blockRequesterGadget.MarkBadResponse(requestSeqNrRange, sender) + return + } + + if !(astb.StateTransitionBlock.SeqNr() < msg.RequestEndExclSeqNr) { + stasy.blockSyncState.logger.Warn("dropping MessageBlockSyncResponse with state transition block seqNr that is too large", commontypes.LogFields{ + "sender": sender, + "requestStartSeqNr": msg.RequestStartSeqNr, + "requestEndExclSeqNr": msg.RequestEndExclSeqNr, + "stateTransitionBlockSeqNr": astb.StateTransitionBlock.SeqNr(), + }) + stasy.blockSyncState.blockRequesterGadget.MarkBadResponse(requestSeqNrRange, sender) + return + } + + if err := astb.Verify(stasy.config.ConfigDigest, stasy.config.OracleIdentities, stasy.config.ByzQuorumSize()); err != nil { + stasy.blockSyncState.logger.Warn("dropping MessageBlockSyncResponse with invalid attestation", commontypes.LogFields{ + "sender": sender, + "requestStartSeqNr": msg.RequestStartSeqNr, + "requestEndExclSeqNr": msg.RequestEndExclSeqNr, + "stateTransitionBlockSeqNr": astb.StateTransitionBlock.SeqNr(), + "error": err, + }) + stasy.blockSyncState.blockRequesterGadget.MarkBadResponse(requestSeqNrRange, sender) + return + } + } + + stasy.blockSyncState.blockRequesterGadget.MarkGoodResponse(requestSeqNrRange, sender) + + if stasy.syncMode == syncModeFetchSnapshotBlock { + err := stasy.acceptTreeSyncTargetBlockFromBlockSync(msg.AttestedStateTransitionBlocks[0]) + if err != nil { + stasy.blockSyncState.logger.Error("error accepting tree-sync target block from block sync, will try again", commontypes.LogFields{ + "error": err, + }) + } + return + } + + for _, astb := range msg.AttestedStateTransitionBlocks { + stasy.blockSyncState.logger.Debug("buffering state transition block", commontypes.LogFields{ + "stateTransitionBlockSeqNr": astb.StateTransitionBlock.SeqNr(), + }) + stasy.bufferBlock(astb) + } + + stasy.tryCompleteBlockSync() +} + +func (stasy *stateSyncState[RI]) tryCompleteBlockSync() { + + stasy.refreshStateSyncState() + + stasy.reapBlockBuffer() + + minBlock, ok := stasy.blockSyncState.sortedBlockBuffer.Min() + if !ok { + return + } + if minBlock.StateTransitionBlock.SeqNr() != stasy.highestPersistedStateTransitionBlockSeqNr+1 { + return + } + + tx, err := stasy.kvDb.NewUnserializedReadWriteTransactionUnchecked() + if err != nil { + stasy.blockSyncState.logger.Error("failed to create read transaction", commontypes.LogFields{ + "error": err, + }) + return + } + defer tx.Discard() + + lastSeqNr := stasy.highestPersistedStateTransitionBlockSeqNr + for { + astb, ok := stasy.blockSyncState.sortedBlockBuffer.Min() + if !ok { + break + } + seqNr := astb.StateTransitionBlock.SeqNr() + + if seqNr != lastSeqNr+1 { + break + } + + stasy.blockSyncState.logger.Debug("writing state transition block", commontypes.LogFields{ + "stateTransitionBlockSeqNr": seqNr, + }) + + err := tx.WriteAttestedStateTransitionBlock(seqNr, astb) + if err != nil { + stasy.blockSyncState.logger.Error("error writing state transition block", commontypes.LogFields{ + "stateTransitionBlockSeqNr": astb.StateTransitionBlock.SeqNr(), + "error": err, + }) + return + } + + lastSeqNr = seqNr + stasy.blockSyncState.sortedBlockBuffer.DeleteMin() + } + + err = tx.Commit() + if err != nil { + stasy.blockSyncState.logger.Error("error committing transaction", commontypes.LogFields{ + "error": err, + }) + return + } + stasy.highestPersistedStateTransitionBlockSeqNr = lastSeqNr + stasy.pleaseTryToReplayBlock() +} diff --git a/offchainreporting2plus/internal/ocr3_1/protocol/state_sync_block_replay.go b/offchainreporting2plus/internal/ocr3_1/protocol/state_sync_block_replay.go new file mode 100644 index 00000000..fdec85bb --- /dev/null +++ b/offchainreporting2plus/internal/ocr3_1/protocol/state_sync_block_replay.go @@ -0,0 +1,139 @@ +package protocol + +import ( + "context" + "fmt" + "time" + + "github.com/smartcontractkit/libocr/commontypes" + "github.com/smartcontractkit/libocr/internal/loghelper" +) + +const ( + stateBlockReplayInterval = 10 * time.Second + stateBlockReplayFastFollowOnError = stateBlockReplayInterval / 10 + + maxBlocksToReplayInOneGo = 100 +) + +func tryReplay(ctx context.Context, kvDb KeyValueDatabase, logger loghelper.LoggerWithContext) error { + kvReadTxn, err := kvDb.NewReadTransactionUnchecked() + if err != nil { + return fmt.Errorf("failed to create read transaction") + } + defer kvReadTxn.Discard() + + committedSeqNr, err := kvReadTxn.ReadHighestCommittedSeqNr() + if err != nil { + return fmt.Errorf("failed to read highest committed seq nr: %w", err) + } + + for { + astbsToReplay, more, err := getReplayableBlocks(kvReadTxn, committedSeqNr) + if err != nil { + return fmt.Errorf("failed to get blocks to replay: %w", err) + } + + for _, astb := range astbsToReplay { + block := astb.StateTransitionBlock + seqNr := block.SeqNr() + + logger.Trace("StateBlockReplay: trying to replay block", commontypes.LogFields{ + "seqNr": seqNr, + }) + + err := func() error { + tx, err := kvDb.NewSerializedReadWriteTransaction(seqNr) + if err != nil { + return fmt.Errorf("failed to create kv read/write transaction: %w", err) + } + defer tx.Discard() + + // next block found, has been verified before being persisted so we don't check again + err = replayVerifiedBlock(logger, tx, &block) + if err != nil { + return fmt.Errorf("failed to replay verified block %d: %w", seqNr, err) + } + err = tx.Commit() + if err != nil { + return fmt.Errorf("failed to commit transaction: %w", err) + } + return nil + }() + if err != nil { + return fmt.Errorf("failed to replay block %d: %w", seqNr, err) + } + logger.Debug("StateBlockReplay: 🐌✅ committed", commontypes.LogFields{ + "seqNr": seqNr, + }) + committedSeqNr = seqNr + } + + if !more { + break + } + if ctx.Err() != nil { + return ctx.Err() + } + } + return nil +} + +func getReplayableBlocks(kvReadTxn KeyValueDatabaseReadTransaction, committedSeqNr uint64) ([]AttestedStateTransitionBlock, bool, error) { + blocks, more, err := kvReadTxn.ReadAttestedStateTransitionBlocks(committedSeqNr+1, maxBlocksToReplayInOneGo) + if err != nil { + return nil, false, fmt.Errorf("failed to read attested state transition blocks: %w", err) + } + return blocks, more, nil +} + +func replayVerifiedBlock(logger loghelper.LoggerWithContext, kvReadWriteTxn KeyValueDatabaseReadWriteTransaction, stb *StateTransitionBlock) error { + seqNr := stb.SeqNr() + logger = logger.MakeChild(commontypes.LogFields{ + "replay": "YES", + "seqNr": seqNr, + }) + + logger.Trace("replaying state transition block", nil) + + stateRootDigest, err := kvReadWriteTxn.ApplyWriteSet(stb.StateTransitionOutputs.WriteSet) + if err != nil { + return fmt.Errorf("failed to apply write set for seq nr %d: %w", seqNr, err) + } + + if stateRootDigest != stb.StateRootDigest { + return fmt.Errorf("state root digest mismatch from block replay for seq nr %d: expected %s, actual %s", seqNr, stb.StateRootDigest, stateRootDigest) + } + + return nil +} + +func RunStateSyncBlockReplay( + ctx context.Context, + logger loghelper.LoggerWithContext, + kvDb KeyValueDatabase, + chNotificationFromStateSync <-chan struct{}, +) { + chDone := ctx.Done() + chTick := time.After(0) + + for { + select { + case <-chTick: + case <-chNotificationFromStateSync: + case <-chDone: + return + } + + logger.Trace("StateBlockReplay: calling tryReplay", nil) + err := tryReplay(ctx, kvDb, logger) + if err != nil { + logger.Warn("StateBlockReplay: failed while trying to replay blocks", commontypes.LogFields{ + "error": err, + }) + chTick = time.After(stateBlockReplayFastFollowOnError) + } else { + chTick = time.After(stateBlockReplayInterval) + } + } +} diff --git a/offchainreporting2plus/internal/ocr3_1/protocol/state_sync_destroy_if_needed.go b/offchainreporting2plus/internal/ocr3_1/protocol/state_sync_destroy_if_needed.go new file mode 100644 index 00000000..5839f406 --- /dev/null +++ b/offchainreporting2plus/internal/ocr3_1/protocol/state_sync_destroy_if_needed.go @@ -0,0 +1,120 @@ +package protocol + +import ( + "context" + "fmt" + + "github.com/smartcontractkit/libocr/commontypes" + "github.com/smartcontractkit/libocr/internal/loghelper" +) + +const ( + maxStateKeysToDestroyInSingleTransaction = 1_000_000 +) + +type destroyStateIfNeededResult int + +const ( + _ destroyStateIfNeededResult = iota + destroyStateIfNeededResultDone + destroyStateIfNeededResultDoneButNeedMore + destroyStateIfNeededResultNotNeeded + destroyStateIfNeededResultError +) + +func destroyStateIfNeeded(kvDb KeyValueDatabase, logger commontypes.Logger) (destroyStateIfNeededResult, error) { + tx, err := kvDb.NewSerializedReadWriteTransactionUnchecked() + if err != nil { + return destroyStateIfNeededResultError, fmt.Errorf("failed to create read/write transaction: %w", err) + } + defer tx.Discard() + + treeSyncStatus, err := tx.ReadTreeSyncStatus() + if err != nil { + return destroyStateIfNeededResultError, fmt.Errorf("failed to read tree sync status: %w", err) + } + + logger.Info("StateDestroyIfNeeded: read tree sync status", commontypes.LogFields{ + "treeSyncStatus": treeSyncStatus, + }) + + if treeSyncStatus.Phase != TreeSyncPhaseWaiting { + return destroyStateIfNeededResultNotNeeded, nil + } + + done, err := tx.DestructiveDestroyForTreeSync(maxStateKeysToDestroyInSingleTransaction) + if err != nil { + return destroyStateIfNeededResultError, fmt.Errorf("failed to delete everything but tree sync status: %w", err) + } + + if done { + err := tx.WriteTreeSyncStatus(TreeSyncStatus{ + TreeSyncPhaseActive, + treeSyncStatus.TargetSeqNr, + treeSyncStatus.TargetStateRootDigest, + treeSyncStatus.PendingKeyDigestRanges, + }) + if err != nil { + return destroyStateIfNeededResultError, fmt.Errorf("failed to write tree sync status after being done destroying state: %w", err) + } + } + + if err := tx.Commit(); err != nil { + return destroyStateIfNeededResultError, fmt.Errorf("failed to commit transaction: %w", err) + } + + if done { + return destroyStateIfNeededResultDone, nil + } else { + return destroyStateIfNeededResultDoneButNeedMore, nil + } +} + +func RunStateSyncDestroyIfNeeded( + ctx context.Context, + logger loghelper.LoggerWithContext, + kvDb KeyValueDatabase, + chNotificationFromStateSync <-chan struct{}, +) { + logger = logger.MakeChild(commontypes.LogFields{"proto": "stateSyncDestroyIfNeeded"}) + + chDone := ctx.Done() + + for { + select { + case <-chNotificationFromStateSync: + case <-chDone: + return + } + + for { + logger.Trace("RunStateSyncDestroyIfNeeded: destroying state if needed...", nil) + destroyStateIfNeededResult, err := destroyStateIfNeeded(kvDb, logger) + + followupImmediately := false + switch destroyStateIfNeededResult { + case destroyStateIfNeededResultDone: + logger.Info("RunStateSyncDestroyIfNeeded: destroyed state 💣", nil) + case destroyStateIfNeededResultDoneButNeedMore: + logger.Debug("RunStateSyncDestroyIfNeeded: destroyed state, but need more", nil) + followupImmediately = true + case destroyStateIfNeededResultNotNeeded: + logger.Trace("RunStateSyncDestroyIfNeeded: not needed to destroy state", nil) + case destroyStateIfNeededResultError: + logger.Warn("RunStateSyncDestroyIfNeeded: failed to destroy state", commontypes.LogFields{ + "error": err, + }) + } + + if !followupImmediately { + break + } + + select { + case <-chDone: + return + default: + } + } + } +} diff --git a/offchainreporting2plus/internal/ocr3_1/protocol/state_sync_reap.go b/offchainreporting2plus/internal/ocr3_1/protocol/state_sync_reap.go new file mode 100644 index 00000000..9dbb63b2 --- /dev/null +++ b/offchainreporting2plus/internal/ocr3_1/protocol/state_sync_reap.go @@ -0,0 +1,216 @@ +package protocol + +import ( + "context" + "fmt" + "time" + + "github.com/smartcontractkit/libocr/commontypes" + "github.com/smartcontractkit/libocr/internal/loghelper" + "github.com/smartcontractkit/libocr/offchainreporting2plus/internal/config/ocr3config" +) + +const ( + stateReapInterval = 10 * time.Second + stateReapFastFollowOnError = 120 * time.Millisecond + + maxBlocksToReapInOneGo = 100_000 + maxTreeNodesToReapInOneGo = 10_000 + maxTreeRootsToReapInOneGo = 100_000 +) + +func reapState(ctx context.Context, kvDb KeyValueDatabase, logger commontypes.Logger) (done bool, err error) { + + tx, err := kvDb.NewUnserializedReadWriteTransactionUnchecked() + if err != nil { + return false, fmt.Errorf("failed to create read/write transaction: %w", err) + } + defer tx.Discard() + + treeSyncStatus, err := tx.ReadTreeSyncStatus() + if err != nil { + return false, fmt.Errorf("failed to read tree sync status: %w", err) + } + if treeSyncStatus.Phase != TreeSyncPhaseInactive { + return false, fmt.Errorf("tree sync is not inactive") + } + highestCommittedSeqNr, err := tx.ReadHighestCommittedSeqNr() + if err != nil { + return false, fmt.Errorf("failed to read highest committed seq nr: %w", err) + } + + lowestPersistedSeqNr, err := tx.ReadLowestPersistedSeqNr() + if err != nil { + return false, fmt.Errorf("failed to read lowest persisted seq nr: %w", err) + } + + desiredLowestPersistedSeqNr := desiredLowestPersistedSeqNr(highestCommittedSeqNr) + if desiredLowestPersistedSeqNr > lowestPersistedSeqNr { + logger.Info("RunStateSyncReap: new lowest persisted seq nr", commontypes.LogFields{ + "desiredLowestPersistedSeqNr": desiredLowestPersistedSeqNr, + "lowestPersistedSeqNr": lowestPersistedSeqNr, + }) + + // write new lowest persisted seq nr first + if err := tx.WriteLowestPersistedSeqNr(desiredLowestPersistedSeqNr); err != nil { + return false, fmt.Errorf("failed to write lowest persisted seq nr: %w", err) + } + if err := tx.Commit(); err != nil { + return false, fmt.Errorf("failed to commit transaction: %w", err) + } + } else { + tx.Discard() + } + + // Reap unneeded blocks + + logger.Info("RunStateSyncReap: reaping blocks", commontypes.LogFields{ + "desiredLowestPersistedSeqNr": desiredLowestPersistedSeqNr, + "lowestPersistedSeqNr": lowestPersistedSeqNr, + }) + + for { + done, err := reapBlocks(kvDb, desiredLowestPersistedSeqNr) + if err != nil { + return false, fmt.Errorf("failed to reap blocks: %w", err) + } + if ctx.Err() != nil { + return false, ctx.Err() + } + if done { + break + } + } + + // Reap unneeded tree nodes + + logger.Info("RunStateSyncReap: reaping stale nodes from tree", commontypes.LogFields{ + "desiredLowestPersistedSeqNr": desiredLowestPersistedSeqNr, + }) + + for { + done, err := reapTreeNodes(kvDb, desiredLowestPersistedSeqNr) + if err != nil { + return false, fmt.Errorf("failed to reap tree nodes: %w", err) + } + if ctx.Err() != nil { + return false, ctx.Err() + } + if done { + break + } + } + + logger.Info("RunStateSyncReap: reaping stale roots", commontypes.LogFields{ + "desiredLowestPersistedSeqNr": desiredLowestPersistedSeqNr, + }) + + for { + done, err := reapTreeRoots(kvDb, desiredLowestPersistedSeqNr) + if err != nil { + return false, fmt.Errorf("failed to reap tree roots: %w", err) + } + if ctx.Err() != nil { + return false, ctx.Err() + } + if done { + break + } + } + + return true, nil +} + +func reapBlocks(kvDb KeyValueDatabase, desiredLowestPersistedSeqNr uint64) (done bool, err error) { + tx, err := kvDb.NewUnserializedReadWriteTransactionUnchecked() + if err != nil { + return false, fmt.Errorf("failed to create read/write transaction: %w", err) + } + defer tx.Discard() + + if desiredLowestPersistedSeqNr == 0 { + return true, nil + } + + done, err = tx.DeleteAttestedStateTransitionBlocks(desiredLowestPersistedSeqNr-1, maxBlocksToReapInOneGo) + if err != nil { + return false, fmt.Errorf("failed to delete stale blocks: %w", err) + } + err = tx.Commit() + if err != nil { + return false, fmt.Errorf("failed to commit transaction: %w", err) + } + + return done, nil +} + +func reapTreeNodes(kvDb KeyValueDatabase, desiredLowestPersistedSeqNr uint64) (done bool, err error) { + tx, err := kvDb.NewUnserializedReadWriteTransactionUnchecked() + if err != nil { + return false, fmt.Errorf("failed to create read/write transaction: %w", err) + } + defer tx.Discard() + + done, err = tx.DeleteStaleNodes(RootVersion(desiredLowestPersistedSeqNr), maxTreeNodesToReapInOneGo) + if err != nil { + return false, fmt.Errorf("failed to delete stale nodes: %w", err) + } + err = tx.Commit() + if err != nil { + return false, fmt.Errorf("failed to commit transaction: %w", err) + } + + return done, nil +} + +func reapTreeRoots(kvDb KeyValueDatabase, desiredLowestPersistedSeqNr uint64) (done bool, err error) { + tx, err := kvDb.NewUnserializedReadWriteTransactionUnchecked() + if err != nil { + return false, fmt.Errorf("failed to create read/write transaction: %w", err) + } + defer tx.Discard() + + done, err = tx.DeleteRoots(RootVersion(desiredLowestPersistedSeqNr), maxTreeRootsToReapInOneGo) + if err != nil { + return false, fmt.Errorf("failed to delete roots: %w", err) + } + err = tx.Commit() + if err != nil { + return false, fmt.Errorf("failed to commit transaction: %w", err) + } + + return done, nil +} + +func RunStateSyncReap( + ctx context.Context, + config ocr3config.SharedConfig, + logger loghelper.LoggerWithContext, + database Database, + kvDb KeyValueDatabase, +) { + chDone := ctx.Done() + chTick := time.After(0) + + for { + select { + case <-chTick: + case <-chDone: + return + } + + logger.Info("RunStateSyncReap: calling reapState", nil) + done, err := reapState(ctx, kvDb, logger) + if err != nil { + logger.Warn("RunStateSyncReap: failed to reap state. Will retry soon.", commontypes.LogFields{ + "error": err, + "waitBeforeRetry": stateReapFastFollowOnError.String(), + }) + chTick = time.After(stateReapFastFollowOnError) + } else if !done { + chTick = time.After(0) + } else { + chTick = time.After(stateReapInterval) + } + } +} diff --git a/offchainreporting2plus/internal/ocr3_1/protocol/state_sync_snapshot.go b/offchainreporting2plus/internal/ocr3_1/protocol/state_sync_snapshot.go new file mode 100644 index 00000000..d8b6da64 --- /dev/null +++ b/offchainreporting2plus/internal/ocr3_1/protocol/state_sync_snapshot.go @@ -0,0 +1,52 @@ +package protocol + +const ( + SnapshotInterval = 128 + // MaxHistoricalSnapshotsRetained must be a non-zero value, denoting the + // number of complete snapshots prior to the current (potentially + // incomplete) one that will be retained to help other oracles with state + // sync. All blocks starting from the highest block of the earliest retained + // snapshot will be retained. + MaxHistoricalSnapshotsRetained = 64 +) + +func snapshotIndexFromSeqNr(seqNr uint64) uint64 { + if seqNr == 0 { + return 0 + } + return (seqNr + SnapshotInterval - 1) / SnapshotInterval +} + +func maxSeqNrWithSnapshotIndex(snapshotIndex uint64) uint64 { + if snapshotIndex == 0 { + return 0 + } + return snapshotIndex * SnapshotInterval +} + +func desiredLowestPersistedSeqNr(highestCommittedSeqNr uint64) uint64 { + highestSnapshotIndex := snapshotIndexFromSeqNr(highestCommittedSeqNr) + var lowestDesiredSnapshotIndex uint64 + if highestSnapshotIndex > MaxHistoricalSnapshotsRetained { + lowestDesiredSnapshotIndex = highestSnapshotIndex - MaxHistoricalSnapshotsRetained + } else { + lowestDesiredSnapshotIndex = 0 + } + return maxSeqNrWithSnapshotIndex(lowestDesiredSnapshotIndex) +} + +func snapshotSeqNr(seqNr uint64) uint64 { + return maxSeqNrWithSnapshotIndex(snapshotIndexFromSeqNr(seqNr)) +} + +// prevRootVersion returns the version number of the JMT root referring to the +// state as of seqNr - 1. This is used as the "old version" for writing the +// modifications of seqNr. We only maintain trees with versions that are +// multiples of SnapshotInterval. +func PrevRootVersion(seqNr uint64) uint64 { + return snapshotSeqNr(seqNr - 1) +} + +func RootVersion(seqNr uint64) uint64 { + return snapshotSeqNr(seqNr) +} diff --git a/offchainreporting2plus/internal/ocr3_1/protocol/state_sync_tree.go b/offchainreporting2plus/internal/ocr3_1/protocol/state_sync_tree.go new file mode 100644 index 00000000..daae7254 --- /dev/null +++ b/offchainreporting2plus/internal/ocr3_1/protocol/state_sync_tree.go @@ -0,0 +1,589 @@ +package protocol + +import ( + "bytes" + "fmt" + "time" + + "github.com/smartcontractkit/libocr/commontypes" + "github.com/smartcontractkit/libocr/internal/jmt" + "github.com/smartcontractkit/libocr/offchainreporting2plus/internal/ocr3_1/protocol/requestergadget" + "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3_1types" +) + +const ( + // Maximum delay between a TREE-SYNC-REQ and TREE-SYNC-CHUNK response. We'll try + // with another oracle if we don't get a response in this time. + DeltaMaxTreeSyncRequest time.Duration = 1 * time.Second + // Minimum delay between two consecutive BLOCK-SYNC-REQ requests + DeltaMinTreeSyncRequest = 10 * time.Millisecond + + // The maximum number of key-value pairs that an oracle will send in a single tree-sync chunk + MaxTreeSyncChunkKeys = 128 + // Maximum number of bytes in of the combined keys and values length in a chunk. + + MaxTreeSyncChunkKeysPlusValuesLength = 2 * (ocr3_1types.MaxMaxKeyValueKeyLength + ocr3_1types.MaxMaxKeyValueValueLength) + + MaxMaxParallelTreeSyncChunkFetches = 8 +) + +func (stasy *stateSyncState[RI]) maxParallelTreeSyncChunkFetches() int { + return max(1, min(MaxMaxParallelTreeSyncChunkFetches, stasy.config.N()-1)) +} + +func (stasy *stateSyncState[RI]) newPendingKeyDigestRanges() PendingKeyDigestRanges { + numSplits := stasy.maxParallelTreeSyncChunkFetches() + split, err := splitKeyDigestSpaceEvenly(numSplits) + if err != nil { + stasy.logger.Error("failed to create even key digest range split, reverting to a single range", commontypes.LogFields{ + "numSplits": numSplits, + "error": err, + }) + return NewPendingKeyDigestRanges([]KeyDigestRange{{jmt.MinDigest, jmt.MaxDigest}}) + } + return NewPendingKeyDigestRanges(split) +} + +type treeSyncChunkRequestItem struct { + targetSeqNr uint64 + keyDigestRange KeyDigestRange +} + +type treeSyncState[RI any] struct { + logger commontypes.Logger + treeChunkRequesterGadget *requestergadget.RequesterGadget[treeSyncChunkRequestItem] + + treeSyncPhase TreeSyncPhase + targetSeqNr uint64 + targetStateRootDigest StateRootDigest + + pendingKeyDigestRanges PendingKeyDigestRanges +} + +func (stasy *stateSyncState[RI]) sendTreeSyncChunkRequest(item treeSyncChunkRequestItem, target commontypes.OracleID) (*requestergadget.RequestInfo, bool) { + if stasy.syncMode != syncModeTree || stasy.treeSyncState.treeSyncPhase != TreeSyncPhaseActive || stasy.treeSyncState.targetStateRootDigest == (StateRootDigest{}) { + return nil, false + } + stasy.treeSyncState.logger.Debug("sending MessageTreeSyncChunkRequest", commontypes.LogFields{ + "targetSeqNr": item.targetSeqNr, + "keyDigestRange": item.keyDigestRange, + "target": target, + }) + msg := MessageTreeSyncChunkRequest[RI]{ + nil, + item.targetSeqNr, + item.keyDigestRange.StartIndex, + item.keyDigestRange.EndInclIndex, + } + stasy.netSender.SendTo(msg, target) + return &requestergadget.RequestInfo{ + time.Now().Add(DeltaMaxTreeSyncRequest), + }, true +} + +func (stasy *stateSyncState[RI]) getPendingTreeSyncChunksToRequest() []treeSyncChunkRequestItem { + if stasy.syncMode != syncModeTree || stasy.treeSyncState.treeSyncPhase != TreeSyncPhaseActive || stasy.treeSyncState.targetStateRootDigest == (StateRootDigest{}) { + return nil + } + var pending []treeSyncChunkRequestItem + for _, keyDigestRange := range stasy.treeSyncState.pendingKeyDigestRanges.All() { + pending = append(pending, treeSyncChunkRequestItem{stasy.treeSyncState.targetSeqNr, keyDigestRange}) + } + return pending +} + +func (stasy *stateSyncState[RI]) getTreeSyncChunkSeeders(_ treeSyncChunkRequestItem) map[commontypes.OracleID]struct{} { + seeders := make(map[commontypes.OracleID]struct{}) + for oid := range stasy.oracles { + + if commontypes.OracleID(oid) == stasy.id { + continue + } + seeders[commontypes.OracleID(oid)] = struct{}{} + } + return seeders +} + +func (stasy *stateSyncState[RI]) evolveTreeSyncPhase() { + if stasy.syncMode != syncModeTree { + return + } + + if !stasy.refreshStateSyncState() { + return + } + + stasy.treeSyncState.logger.Debug("trying to evolve tree-sync phase", commontypes.LogFields{ + "phase": stasy.treeSyncState.treeSyncPhase, + "targetSeqNr": stasy.treeSyncState.targetSeqNr, + }) + + switch stasy.treeSyncState.treeSyncPhase { + case TreeSyncPhaseInactive: + newTargetSeqNr, found := stasy.pickSomeTreeSyncTarget() + if !found { + return + } + stasy.treeSyncState.logger.Debug("initializing new tree-sync", commontypes.LogFields{ + "newTargetSeqNr": newTargetSeqNr, + }) + + newTreeSyncStatus := TreeSyncStatus{ + TreeSyncPhaseWaiting, + newTargetSeqNr, + StateRootDigest{}, + stasy.newPendingKeyDigestRanges(), + } + + kvReadWriteTxn, err := stasy.kvDb.NewSerializedReadWriteTransactionUnchecked() + if err != nil { + return + } + defer kvReadWriteTxn.Discard() + + if err := kvReadWriteTxn.WriteTreeSyncStatus(newTreeSyncStatus); err != nil { + stasy.treeSyncState.logger.Error("failed to write tree-sync status", commontypes.LogFields{ + "err": err, + }) + return + } + if err := kvReadWriteTxn.Commit(); err != nil { + stasy.treeSyncState.logger.Error("failed to commit", commontypes.LogFields{ + "err": err, + }) + return + } + stasy.refreshStateSyncState() + return + case TreeSyncPhaseWaiting: + stasy.treeSyncState.logger.Debug("tree-sync waiting for key-value store cleanup 🧹", nil) + stasy.pleaseDestroyStateIfNeeded() + return + case TreeSyncPhaseActive: + if stasy.needToRetargetTreeSync() { + stasy.treeSyncState.logger.Debug("not enough oracles to help us tree-sync to current target, we must re-target", commontypes.LogFields{ + "targetSeqNr": stasy.treeSyncState.targetSeqNr, + }) + + newTargetSeqNr, found := stasy.pickSomeTreeSyncTarget() + if !found { + return + } + + stasy.treeSyncState.logger.Debug("tree-sync needed to re-target, and we found a new target", commontypes.LogFields{ + "targetSeqNr": stasy.treeSyncState.targetSeqNr, + "newTargetSeqNr": newTargetSeqNr, + }) + + newTreeSyncStatus := TreeSyncStatus{ + TreeSyncPhaseWaiting, + newTargetSeqNr, + StateRootDigest{}, + stasy.newPendingKeyDigestRanges(), + } + + kvReadWriteTxn, err := stasy.kvDb.NewSerializedReadWriteTransactionUnchecked() + if err != nil { + return + } + defer kvReadWriteTxn.Discard() + + if err := kvReadWriteTxn.WriteTreeSyncStatus(newTreeSyncStatus); err != nil { + stasy.treeSyncState.logger.Error("failed to write tree-sync status", commontypes.LogFields{ + "err": err, + }) + return + } + if err := kvReadWriteTxn.Commit(); err != nil { + stasy.treeSyncState.logger.Error("failed to commit", commontypes.LogFields{ + "err": err, + }) + return + } + stasy.refreshStateSyncState() + return + } else { + // our target seq nr is fine, and we are active + // yield to block sync to fetch the target state root digest if necessary + if stasy.treeSyncState.targetStateRootDigest == (StateRootDigest{}) { + stasy.treeSyncState.logger.Debug("tree-sync yielding to block-sync to fetch the target state root digest", commontypes.LogFields{ + "targetSeqNr": stasy.treeSyncState.targetSeqNr, + }) + stasy.treeSyncNeedsSnapshotBlock() + return + } + + } + } +} + +func (stasy *stateSyncState[RI]) acceptTreeSyncTargetBlockFromBlockSync(block AttestedStateTransitionBlock) error { + if stasy.syncMode != syncModeFetchSnapshotBlock { + return fmt.Errorf("not in fetch snapshot block mode") + } + + if !stasy.refreshStateSyncState() { + return fmt.Errorf("not accepting block without refreshed state") + } + + if stasy.treeSyncState.treeSyncPhase != TreeSyncPhaseActive { + return fmt.Errorf("not accepting block in unexpected tree-sync phase %v", stasy.treeSyncState.treeSyncPhase) + } + + seqNr := block.StateTransitionBlock.SeqNr() + if seqNr != stasy.treeSyncState.targetSeqNr { + return fmt.Errorf("tree-sync target block sequence number does not match expected target sequence number") + } + + stateRootDigest := block.StateTransitionBlock.StateRootDigest + + kvReadWriteTxn, err := stasy.kvDb.NewSerializedReadWriteTransactionUnchecked() + if err != nil { + return fmt.Errorf("failed to create kv read/write transaction: %w", err) + } + defer kvReadWriteTxn.Discard() + if err = kvReadWriteTxn.WriteTreeSyncStatus(TreeSyncStatus{ + stasy.treeSyncState.treeSyncPhase, + stasy.treeSyncState.targetSeqNr, + stateRootDigest, + stasy.treeSyncState.pendingKeyDigestRanges, + }); err != nil { + return fmt.Errorf("failed to write tree-sync status: %w", err) + } + + if err = kvReadWriteTxn.WriteAttestedStateTransitionBlock(seqNr, block); err != nil { + return fmt.Errorf("failed to write attested state transition block: %w", err) + } + + if err = kvReadWriteTxn.Commit(); err != nil { + return fmt.Errorf("failed to commit: %w", err) + } + + stasy.treeSyncState.logger.Debug("tree-sync accepted verified state root digest", commontypes.LogFields{ + "targetSeqNr": seqNr, + "rootDigest": stateRootDigest, + }) + + stasy.syncMode = syncModeTree + stasy.refreshStateSyncState() + stasy.treeSyncState.treeChunkRequesterGadget.PleaseRecheckPendingItems() + return nil +} + +func (stasy *stateSyncState[RI]) messageTreeSyncChunkRequest(msg MessageTreeSyncChunkRequest[RI], sender commontypes.OracleID) { + stasy.treeSyncState.logger.Debug("received MessageTreeSyncChunkRequest", commontypes.LogFields{ + "sender": sender, + "toSeqNr": msg.ToSeqNr, + "startIndex": msg.StartIndex, + }) + + if !mustTakeSnapshot(msg.ToSeqNr) { + stasy.treeSyncState.logger.Warn("dropping MessageTreeSyncChunkRequest with invalid SeqNr", commontypes.LogFields{ + "toSeqNr": msg.ToSeqNr, + }) + return + } + + kvReadTxn, err := stasy.kvDb.NewReadTransactionUnchecked() + if err != nil { + stasy.logger.Warn("failed to create new transaction", commontypes.LogFields{ + "error": err, + }) + return + } + defer kvReadTxn.Discard() + + highestCommittedSeqNr, err := kvReadTxn.ReadHighestCommittedSeqNr() + if err != nil { + stasy.logger.Warn("failed to read highest committed seq nr", commontypes.LogFields{ + "error": err, + }) + return + } + + lowestPersistedSeqNr, err := kvReadTxn.ReadLowestPersistedSeqNr() + if err != nil { + stasy.logger.Warn("failed to read lowest persisted seq nr", commontypes.LogFields{ + "error": err, + }) + return + } + + treeSyncStatus, err := kvReadTxn.ReadTreeSyncStatus() + if err != nil { + stasy.logger.Warn("failed to read tree sync status", commontypes.LogFields{ + "error": err, + }) + return + } + + if treeSyncStatus.Phase != TreeSyncPhaseInactive || !(lowestPersistedSeqNr <= msg.ToSeqNr && msg.ToSeqNr <= highestCommittedSeqNr) { + kvReadTxn.Discard() + stasy.treeSyncState.logger.Debug("sending MessageTreeSyncChunkResponse to go-away", commontypes.LogFields{ + "sender": sender, + "toSeqNr": msg.ToSeqNr, + "lowestPersistedSeqNr": lowestPersistedSeqNr, + "highestCommittedSeqNr": highestCommittedSeqNr, + "treeSyncPhase": treeSyncStatus.Phase, + }) + stasy.netSender.SendTo(MessageTreeSyncChunkResponse[RI]{ + msg.RequestHandle, + msg.ToSeqNr, + msg.StartIndex, + jmt.Digest{}, + true, + jmt.Digest{}, + nil, + nil, + }, sender) + return + } + + endInclIndex, boundingLeaves, keyValues, err := kvReadTxn.ReadTreeSyncChunk( + msg.ToSeqNr, + msg.StartIndex, + msg.EndInclIndex, + ) + if err != nil { + stasy.treeSyncState.logger.Warn("failed to read chunk", commontypes.LogFields{ + "sender": sender, + "ToSeqNr": msg.ToSeqNr, + "startIndex": msg.StartIndex, + "err": err, + }) + return + } + + chunk := MessageTreeSyncChunkResponse[RI]{ + msg.RequestHandle, + msg.ToSeqNr, + msg.StartIndex, + msg.EndInclIndex, + false, + endInclIndex, + keyValues, + boundingLeaves, + } + + stasy.treeSyncState.logger.Debug("sent MessageTreeSyncChunkResponse", commontypes.LogFields{ + "target": sender, + "toSeqNr": msg.ToSeqNr, + "startIndex": fmt.Sprintf("%x", msg.StartIndex), + "endInclIndex": fmt.Sprintf("%x", endInclIndex), + "proofLen": proofLen(boundingLeaves), + "keyValuesCount": len(keyValues), + }) + + stasy.netSender.SendTo(chunk, sender) +} + +func proofLen(boundingLeaves []jmt.BoundingLeaf) int { + proofLen := 0 + for _, bl := range boundingLeaves { + proofLen += len(bl.Siblings) + } + return proofLen +} + +func (stasy *stateSyncState[RI]) messageTreeSyncChunkResponse(msg MessageTreeSyncChunkResponse[RI], sender commontypes.OracleID) { + msgSeqNr := msg.ToSeqNr + requestedKeyDigestRange := KeyDigestRange{msg.StartIndex, msg.RequestEndInclIndex} + item := treeSyncChunkRequestItem{ + msgSeqNr, + requestedKeyDigestRange, + } + if !stasy.treeSyncState.treeChunkRequesterGadget.CheckAndMarkResponse(item, sender) { + stasy.treeSyncState.logger.Warn("dropping MessageTreeSyncChunkResponse: check and mark response failed", commontypes.LogFields{ + "sender": sender, + "msgSeqNr": msgSeqNr, + }) + return + } + + if msg.GoAway { + stasy.treeSyncState.logger.Warn("dropping MessageTreeSyncChunkResponse: go-away", commontypes.LogFields{ + "sender": sender, + "msgSeqNr": msgSeqNr, + }) + stasy.treeSyncState.treeChunkRequesterGadget.MarkGoAwayResponse(item, sender) + return + } + + if !(bytes.Compare(msg.EndInclIndex[:], msg.RequestEndInclIndex[:]) <= 0) { + stasy.treeSyncState.logger.Warn("dropping MessageTreeSyncChunkResponse: end incl index is out of bounds", commontypes.LogFields{ + "sender": sender, + "msgSeqNr": msgSeqNr, + "requestEndInclIndex": msg.RequestEndInclIndex, + "endInclIndex": msg.EndInclIndex, + }) + stasy.treeSyncState.treeChunkRequesterGadget.MarkBadResponse(item, sender) + return + } + + receivedKeyDigestRange := KeyDigestRange{msg.StartIndex, msg.EndInclIndex} + + stasy.treeSyncState.logger.Debug("received MessageTreeSyncChunkResponse", commontypes.LogFields{ + "sender": sender, + "startIndex": fmt.Sprintf("%x", msg.StartIndex), + "endInclIndex": fmt.Sprintf("%x", msg.EndInclIndex), + "proofLen": proofLen(msg.BoundingLeaves), + "keyValuesCount": len(msg.KeyValues), + }) + + if !stasy.refreshStateSyncState() { + stasy.treeSyncState.logger.Warn("dropping MessageTreeSyncChunkResponse: could not refresh state", commontypes.LogFields{ + "sender": sender, + "msgSeqNr": msgSeqNr, + }) + return + } + + // Make sure that we already have the target state root digest + if stasy.treeSyncState.targetStateRootDigest == (StateRootDigest{}) { + stasy.treeSyncState.logger.Warn("dropping MessageTreeSyncChunkResponse: we do not have target state root digest, yet", commontypes.LogFields{ + "sender": sender, + "msgSeqNr": msgSeqNr, + }) + return + } + + // We have not sent a tree sync request yet or the response arrived way too late and we have already synced + if stasy.treeSyncState.treeSyncPhase != TreeSyncPhaseActive { + stasy.treeSyncState.logger.Warn("dropping MessageTreeSyncChunkResponse: tree-sync is not active", commontypes.LogFields{ + "sender": sender, + "msgSeqNr": msgSeqNr, + "phase": stasy.treeSyncState.treeSyncPhase, + "mode": stasy.syncMode, + }) + return + } + + // Check that target seqNrs match + if stasy.treeSyncState.targetSeqNr != msgSeqNr { + stasy.treeSyncState.logger.Warn("dropping MessageTreeSyncChunkResponse: message SeqNr does not match expected target", commontypes.LogFields{ + "sender": sender, + "msgSeqNr": msgSeqNr, + "expectedSeqNr": stasy.treeSyncState.targetSeqNr, + }) + return + } + + kvReadWriteTxn, err := stasy.kvDb.NewSerializedReadWriteTransactionUnchecked() + if err != nil { + stasy.treeSyncState.logger.Warn("could not create kv read/write transaction", commontypes.LogFields{ + "err": err, + }) + } + defer kvReadWriteTxn.Discard() + + // Verify and write chunk + verifyAndWriteTreeSyncChunkResult, err := kvReadWriteTxn.VerifyAndWriteTreeSyncChunk( + stasy.treeSyncState.targetStateRootDigest, + stasy.treeSyncState.targetSeqNr, + msg.StartIndex, + msg.EndInclIndex, + msg.BoundingLeaves, + msg.KeyValues, + ) + + switch verifyAndWriteTreeSyncChunkResult { + case VerifyAndWriteTreeSyncChunkResultUnrelatedError: + stasy.treeSyncState.logger.Warn("failed to apply chunk", commontypes.LogFields{ + "sender": sender, + "err": err, + }) + return + case VerifyAndWriteTreeSyncChunkResultByzantine: + stasy.treeSyncState.logger.Warn("byzantine chunk, marking as bad response", commontypes.LogFields{ + "sender": sender, + "startIndex": fmt.Sprintf("%x", msg.StartIndex), + "err": err, + }) + stasy.treeSyncState.treeChunkRequesterGadget.MarkBadResponse(item, sender) + return + case VerifyAndWriteTreeSyncChunkResultOkComplete: + stasy.treeSyncState.treeChunkRequesterGadget.MarkGoodResponse(item, sender) + + if err = kvReadWriteTxn.WriteTreeSyncStatus(TreeSyncStatus{ + TreeSyncPhaseInactive, + 0, + (StateRootDigest{}), + PendingKeyDigestRanges{}, + }); err != nil { + stasy.treeSyncState.logger.Error("failed to write tree-sync status", commontypes.LogFields{ + "sender": sender, + "err": err, + }) + return + } + if err = kvReadWriteTxn.WriteLowestPersistedSeqNr(stasy.treeSyncState.targetSeqNr); err != nil { + stasy.treeSyncState.logger.Error("failed to write lowest persisted seq nr", commontypes.LogFields{ + "sender": sender, + "err": err, + }) + return + } + if err = kvReadWriteTxn.WriteHighestCommittedSeqNr(stasy.treeSyncState.targetSeqNr); err != nil { + stasy.treeSyncState.logger.Error("failed to write highest committed sequence number", commontypes.LogFields{ + "sender": sender, + "err": err, + }) + return + } + if err = kvReadWriteTxn.Commit(); err != nil { + stasy.treeSyncState.logger.Error("failed to commit", commontypes.LogFields{ + "sender": sender, + "err": err, + }) + return + } + stasy.treeSyncState.logger.Info("tree synchronization to snapshot completed 🌲", commontypes.LogFields{ + "sender": sender, + "targetSeqNr": stasy.treeSyncState.targetSeqNr, + "rootDigest": fmt.Sprintf("%x", stasy.treeSyncState.targetStateRootDigest), + }) + stasy.treeSyncCompleted() + return + case VerifyAndWriteTreeSyncChunkResultOkNeedMore: + stasy.treeSyncState.treeChunkRequesterGadget.MarkGoodResponse(item, sender) + + updatedPendingKeyDigestRanges := stasy.treeSyncState.pendingKeyDigestRanges.WithReceivedRange(receivedKeyDigestRange) + + if err = kvReadWriteTxn.WriteTreeSyncStatus(TreeSyncStatus{ + stasy.treeSyncState.treeSyncPhase, + stasy.treeSyncState.targetSeqNr, + stasy.treeSyncState.targetStateRootDigest, + updatedPendingKeyDigestRanges, + }); err != nil { + stasy.treeSyncState.logger.Error("failed to write tree-sync status", commontypes.LogFields{ + "sender": sender, + "err": err, + }) + return + } + if err = kvReadWriteTxn.Commit(); err != nil { + stasy.treeSyncState.logger.Error("failed to commit", commontypes.LogFields{ + "sender": sender, + "err": err, + }) + return + } + + stasy.treeSyncState.logger.Debug("applied chunk 🍃", commontypes.LogFields{ + "sender": sender, + "startIndex": fmt.Sprintf("%x", msg.StartIndex), + "endInclIndex": fmt.Sprintf("%x", msg.EndInclIndex), + "keyValuesCount": len(msg.KeyValues), + "pendingKeyDigestRanges": fmt.Sprintf("%x", updatedPendingKeyDigestRanges), + }) + + stasy.treeSyncState.pendingKeyDigestRanges = updatedPendingKeyDigestRanges + stasy.treeSyncState.treeChunkRequesterGadget.PleaseRecheckPendingItems() + return + } + panic("unreachable") +} + +func mustTakeSnapshot(seqNr uint64) bool { + return seqNr%SnapshotInterval == 0 +} diff --git a/offchainreporting2plus/internal/ocr3_1/protocol/state_sync_tree_ranges.go b/offchainreporting2plus/internal/ocr3_1/protocol/state_sync_tree_ranges.go new file mode 100644 index 00000000..73bb0db7 --- /dev/null +++ b/offchainreporting2plus/internal/ocr3_1/protocol/state_sync_tree_ranges.go @@ -0,0 +1,94 @@ +package protocol + +import ( + "bytes" + "fmt" + "slices" + + "github.com/smartcontractkit/libocr/internal/jmt" +) + +// KeyDigestRange represents a contiguous range [StartIndex, EndInclIndex] in +// the key digest space that needs to be fetched during tree synchronization. +type KeyDigestRange struct { + StartIndex jmt.Digest + EndInclIndex jmt.Digest +} + +// splitKeyDigestSpaceEvenly splits the key digest space [0, max] into n even ranges. +// Returns an error if n >= 256 (split granularity would exceed byte precision). +func splitKeyDigestSpaceEvenly(n int) ([]KeyDigestRange, error) { + if n < 1 { + return nil, fmt.Errorf("n must be at least 1, got %d", n) + } + if n >= 256 { + return nil, fmt.Errorf("n must be less than 256, got %d", n) + } + + ranges := make([]KeyDigestRange, 0, n) + startIndices := make([]jmt.Digest, 0, n) + + for i := range n { + var startIndex jmt.Digest + startIndex[0] = byte(i * 256 / n) + startIndices = append(startIndices, startIndex) + } + + for i, startIndex := range startIndices { + var endInclIndex jmt.Digest + if i+1 == len(startIndices) { + endInclIndex = jmt.MaxDigest + } else { + var ok bool + endInclIndex, ok = jmt.DecrementDigest(startIndices[i+1]) + if !ok { + return nil, fmt.Errorf("unexpected: could not decrement nonzero digest") + } + } + ranges = append(ranges, KeyDigestRange{startIndex, endInclIndex}) + } + return ranges, nil +} + +// PendingKeyDigestRanges tracks which key digest ranges still need to be +// fetched during tree synchronization. As chunks are received, the +// corresponding ranges are removed or updated. +type PendingKeyDigestRanges struct { + ranges []KeyDigestRange +} + +func NewPendingKeyDigestRanges(ranges []KeyDigestRange) PendingKeyDigestRanges { + return PendingKeyDigestRanges{ranges} +} + +// WithReceivedRange returns a new PendingKeyDigestRanges with the given range +// marked as received. Does not mutate the receiver. +func (pkdr PendingKeyDigestRanges) WithReceivedRange(receivedRange KeyDigestRange) PendingKeyDigestRanges { + // Find the range with the startIndex of the received chunk + i := slices.IndexFunc(pkdr.ranges, func(r KeyDigestRange) bool { + return r.StartIndex == receivedRange.StartIndex + }) + if i == -1 { + // Range not found - return unchanged + return pkdr + } + + // Make a copy of the ranges slice to avoid mutating the original + newRanges := slices.Clone(pkdr.ranges) + + nextStartIndex, ok := jmt.IncrementDigest(receivedRange.EndInclIndex) + if !ok || bytes.Compare(receivedRange.EndInclIndex[:], newRanges[i].EndInclIndex[:]) >= 0 { + // The received range covers the entire pending range - remove it + newRanges = slices.Delete(newRanges, i, i+1) + } else { + // The received range covers only part of the pending range - update it + newRanges[i].StartIndex = nextStartIndex + } + + return PendingKeyDigestRanges{newRanges} +} + +// All returns all pending key digest ranges that still need to be fetched. +func (pkdr PendingKeyDigestRanges) All() []KeyDigestRange { + return pkdr.ranges +} diff --git a/offchainreporting2plus/internal/ocr3_1/protocol/state_tree_synchronization.go b/offchainreporting2plus/internal/ocr3_1/protocol/state_tree_synchronization.go deleted file mode 100644 index 8607f90c..00000000 --- a/offchainreporting2plus/internal/ocr3_1/protocol/state_tree_synchronization.go +++ /dev/null @@ -1,8 +0,0 @@ -package protocol - -type treeSyncState struct{} - -func (state *statePersistenceState[RI]) startTreeSync() { - //TODO implement me - panic("implement me") -} diff --git a/offchainreporting2plus/internal/ocr3_1/protocol/types.go b/offchainreporting2plus/internal/ocr3_1/protocol/types.go index c949c0c9..938a1dc2 100644 --- a/offchainreporting2plus/internal/ocr3_1/protocol/types.go +++ b/offchainreporting2plus/internal/ocr3_1/protocol/types.go @@ -1,28 +1,46 @@ package protocol import ( - "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3_1types" + "github.com/smartcontractkit/libocr/internal/jmt" "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3types" "github.com/smartcontractkit/libocr/offchainreporting2plus/types" ) +type KeyValuePair = jmt.KeyValue + +type StateRootDigest = jmt.Digest + type AttestedReportMany[RI any] struct { ReportWithInfo ocr3types.ReportWithInfo[RI] AttributedSignatures []types.AttributedOnchainSignature } -type StateTransitionBlock struct { - Epoch uint64 - BlockSeqNr uint64 - StateTransitionInputsDigest StateTransitionInputsDigest - StateTransitionOutputs StateTransitionOutputs - ReportsPlusPrecursor ocr3_1types.ReportsPlusPrecursor +type KeyValuePairWithDeletions struct { + Key []byte + Value []byte + Deleted bool } -func (stb *StateTransitionBlock) SeqNr() uint64 { - return stb.BlockSeqNr +type StateTransitionOutputs struct { + WriteSet []KeyValuePairWithDeletions } -type StateTransitionOutputs struct { - WriteSet []KeyValuePair +type TreeSyncPhase int + +const ( + // Tree sync was never started, or was completed. Regardless, it's not + // happening right now. + TreeSyncPhaseInactive TreeSyncPhase = iota + // Tree sync is waiting for the necessary parts of the key-value store to be + // cleaned up before it can start. + TreeSyncPhaseWaiting + // Tree sync is actively progressing now. + TreeSyncPhaseActive +) + +type TreeSyncStatus struct { + Phase TreeSyncPhase + TargetSeqNr uint64 + TargetStateRootDigest StateRootDigest + PendingKeyDigestRanges PendingKeyDigestRanges } diff --git a/offchainreporting2plus/internal/ocr3_1/serialization/offchainreporting3_1_db.pb.go b/offchainreporting2plus/internal/ocr3_1/serialization/offchainreporting3_1_db.pb.go index 27f51d7e..c470175f 100644 --- a/offchainreporting2plus/internal/ocr3_1/serialization/offchainreporting3_1_db.pb.go +++ b/offchainreporting2plus/internal/ocr3_1/serialization/offchainreporting3_1_db.pb.go @@ -20,6 +20,183 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +type TreeSyncPhase int32 + +const ( + TreeSyncPhase_TREE_SYNC_PHASE_INACTIVE TreeSyncPhase = 0 + TreeSyncPhase_TREE_SYNC_PHASE_WAITING TreeSyncPhase = 1 + TreeSyncPhase_TREE_SYNC_PHASE_ACTIVE TreeSyncPhase = 2 +) + +// Enum value maps for TreeSyncPhase. +var ( + TreeSyncPhase_name = map[int32]string{ + 0: "TREE_SYNC_PHASE_INACTIVE", + 1: "TREE_SYNC_PHASE_WAITING", + 2: "TREE_SYNC_PHASE_ACTIVE", + } + TreeSyncPhase_value = map[string]int32{ + "TREE_SYNC_PHASE_INACTIVE": 0, + "TREE_SYNC_PHASE_WAITING": 1, + "TREE_SYNC_PHASE_ACTIVE": 2, + } +) + +func (x TreeSyncPhase) Enum() *TreeSyncPhase { + p := new(TreeSyncPhase) + *p = x + return p +} + +func (x TreeSyncPhase) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TreeSyncPhase) Descriptor() protoreflect.EnumDescriptor { + return file_offchainreporting3_1_db_proto_enumTypes[0].Descriptor() +} + +func (TreeSyncPhase) Type() protoreflect.EnumType { + return &file_offchainreporting3_1_db_proto_enumTypes[0] +} + +func (x TreeSyncPhase) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TreeSyncPhase.Descriptor instead. +func (TreeSyncPhase) EnumDescriptor() ([]byte, []int) { + return file_offchainreporting3_1_db_proto_rawDescGZIP(), []int{0} +} + +// KeyDigestRange represents a contiguous range [start_index, end_incl_index] +// in the key digest space. +type KeyDigestRange struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + StartIndex []byte `protobuf:"bytes,1,opt,name=start_index,json=startIndex,proto3" json:"start_index,omitempty"` + EndInclIndex []byte `protobuf:"bytes,2,opt,name=end_incl_index,json=endInclIndex,proto3" json:"end_incl_index,omitempty"` +} + +func (x *KeyDigestRange) Reset() { + *x = KeyDigestRange{} + if protoimpl.UnsafeEnabled { + mi := &file_offchainreporting3_1_db_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyDigestRange) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyDigestRange) ProtoMessage() {} + +func (x *KeyDigestRange) ProtoReflect() protoreflect.Message { + mi := &file_offchainreporting3_1_db_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyDigestRange.ProtoReflect.Descriptor instead. +func (*KeyDigestRange) Descriptor() ([]byte, []int) { + return file_offchainreporting3_1_db_proto_rawDescGZIP(), []int{0} +} + +func (x *KeyDigestRange) GetStartIndex() []byte { + if x != nil { + return x.StartIndex + } + return nil +} + +func (x *KeyDigestRange) GetEndInclIndex() []byte { + if x != nil { + return x.EndInclIndex + } + return nil +} + +type TreeSyncStatus struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Phase TreeSyncPhase `protobuf:"varint,1,opt,name=phase,proto3,enum=offchainreporting3_1.TreeSyncPhase" json:"phase,omitempty"` + TargetSeqNr uint64 `protobuf:"varint,2,opt,name=target_seq_nr,json=targetSeqNr,proto3" json:"target_seq_nr,omitempty"` + TargetStateRootDigest []byte `protobuf:"bytes,3,opt,name=target_state_root_digest,json=targetStateRootDigest,proto3" json:"target_state_root_digest,omitempty"` + PendingKeyDigestRanges []*KeyDigestRange `protobuf:"bytes,4,rep,name=pending_key_digest_ranges,json=pendingKeyDigestRanges,proto3" json:"pending_key_digest_ranges,omitempty"` +} + +func (x *TreeSyncStatus) Reset() { + *x = TreeSyncStatus{} + if protoimpl.UnsafeEnabled { + mi := &file_offchainreporting3_1_db_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TreeSyncStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TreeSyncStatus) ProtoMessage() {} + +func (x *TreeSyncStatus) ProtoReflect() protoreflect.Message { + mi := &file_offchainreporting3_1_db_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TreeSyncStatus.ProtoReflect.Descriptor instead. +func (*TreeSyncStatus) Descriptor() ([]byte, []int) { + return file_offchainreporting3_1_db_proto_rawDescGZIP(), []int{1} +} + +func (x *TreeSyncStatus) GetPhase() TreeSyncPhase { + if x != nil { + return x.Phase + } + return TreeSyncPhase_TREE_SYNC_PHASE_INACTIVE +} + +func (x *TreeSyncStatus) GetTargetSeqNr() uint64 { + if x != nil { + return x.TargetSeqNr + } + return 0 +} + +func (x *TreeSyncStatus) GetTargetStateRootDigest() []byte { + if x != nil { + return x.TargetStateRootDigest + } + return nil +} + +func (x *TreeSyncStatus) GetPendingKeyDigestRanges() []*KeyDigestRange { + if x != nil { + return x.PendingKeyDigestRanges + } + return nil +} + type PacemakerState struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -32,7 +209,7 @@ type PacemakerState struct { func (x *PacemakerState) Reset() { *x = PacemakerState{} if protoimpl.UnsafeEnabled { - mi := &file_offchainreporting3_1_db_proto_msgTypes[0] + mi := &file_offchainreporting3_1_db_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -45,7 +222,7 @@ func (x *PacemakerState) String() string { func (*PacemakerState) ProtoMessage() {} func (x *PacemakerState) ProtoReflect() protoreflect.Message { - mi := &file_offchainreporting3_1_db_proto_msgTypes[0] + mi := &file_offchainreporting3_1_db_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -58,7 +235,7 @@ func (x *PacemakerState) ProtoReflect() protoreflect.Message { // Deprecated: Use PacemakerState.ProtoReflect.Descriptor instead. func (*PacemakerState) Descriptor() ([]byte, []int) { - return file_offchainreporting3_1_db_proto_rawDescGZIP(), []int{0} + return file_offchainreporting3_1_db_proto_rawDescGZIP(), []int{2} } func (x *PacemakerState) GetEpoch() uint64 { @@ -75,31 +252,33 @@ func (x *PacemakerState) GetHighestSentNewEpochWish() uint64 { return 0 } -type StatePersistenceState struct { +type BlobMeta struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - HighestPersistedStateTransitionBlockSeqNr uint64 `protobuf:"varint,1,opt,name=highest_persisted_state_transition_block_seq_nr,json=highestPersistedStateTransitionBlockSeqNr,proto3" json:"highest_persisted_state_transition_block_seq_nr,omitempty"` + PayloadLength uint64 `protobuf:"varint,1,opt,name=payload_length,json=payloadLength,proto3" json:"payload_length,omitempty"` + ChunkHaves []bool `protobuf:"varint,2,rep,packed,name=chunk_haves,json=chunkHaves,proto3" json:"chunk_haves,omitempty"` + ExpirySeqNr uint64 `protobuf:"varint,3,opt,name=expiry_seq_nr,json=expirySeqNr,proto3" json:"expiry_seq_nr,omitempty"` } -func (x *StatePersistenceState) Reset() { - *x = StatePersistenceState{} +func (x *BlobMeta) Reset() { + *x = BlobMeta{} if protoimpl.UnsafeEnabled { - mi := &file_offchainreporting3_1_db_proto_msgTypes[1] + mi := &file_offchainreporting3_1_db_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *StatePersistenceState) String() string { +func (x *BlobMeta) String() string { return protoimpl.X.MessageStringOf(x) } -func (*StatePersistenceState) ProtoMessage() {} +func (*BlobMeta) ProtoMessage() {} -func (x *StatePersistenceState) ProtoReflect() protoreflect.Message { - mi := &file_offchainreporting3_1_db_proto_msgTypes[1] +func (x *BlobMeta) ProtoReflect() protoreflect.Message { + mi := &file_offchainreporting3_1_db_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -110,14 +289,28 @@ func (x *StatePersistenceState) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use StatePersistenceState.ProtoReflect.Descriptor instead. -func (*StatePersistenceState) Descriptor() ([]byte, []int) { - return file_offchainreporting3_1_db_proto_rawDescGZIP(), []int{1} +// Deprecated: Use BlobMeta.ProtoReflect.Descriptor instead. +func (*BlobMeta) Descriptor() ([]byte, []int) { + return file_offchainreporting3_1_db_proto_rawDescGZIP(), []int{3} +} + +func (x *BlobMeta) GetPayloadLength() uint64 { + if x != nil { + return x.PayloadLength + } + return 0 +} + +func (x *BlobMeta) GetChunkHaves() []bool { + if x != nil { + return x.ChunkHaves + } + return nil } -func (x *StatePersistenceState) GetHighestPersistedStateTransitionBlockSeqNr() uint64 { +func (x *BlobMeta) GetExpirySeqNr() uint64 { if x != nil { - return x.HighestPersistedStateTransitionBlockSeqNr + return x.ExpirySeqNr } return 0 } @@ -128,23 +321,51 @@ var file_offchainreporting3_1_db_proto_rawDesc = []byte{ 0x0a, 0x1d, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x5f, 0x64, 0x62, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, - 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x22, 0x64, 0x0a, 0x0e, 0x50, 0x61, 0x63, 0x65, 0x6d, 0x61, 0x6b, - 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x3c, 0x0a, - 0x1b, 0x68, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x65, - 0x77, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x5f, 0x77, 0x69, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x17, 0x68, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x53, 0x65, 0x6e, 0x74, 0x4e, - 0x65, 0x77, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x57, 0x69, 0x73, 0x68, 0x22, 0x7b, 0x0a, 0x15, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x50, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x12, 0x62, 0x0a, 0x2f, 0x68, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x5f, - 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, - 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, - 0x5f, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x29, 0x68, - 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x50, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x6c, - 0x6f, 0x63, 0x6b, 0x53, 0x65, 0x71, 0x4e, 0x72, 0x42, 0x11, 0x5a, 0x0f, 0x2e, 0x3b, 0x73, 0x65, - 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x22, 0x57, 0x0a, 0x0e, 0x4b, 0x65, 0x79, 0x44, 0x69, 0x67, 0x65, + 0x73, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x24, 0x0a, 0x0e, 0x65, 0x6e, 0x64, 0x5f, + 0x69, 0x6e, 0x63, 0x6c, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x0c, 0x65, 0x6e, 0x64, 0x49, 0x6e, 0x63, 0x6c, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x22, 0x89, + 0x02, 0x0a, 0x0e, 0x54, 0x72, 0x65, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x39, 0x0a, 0x05, 0x70, 0x68, 0x61, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x23, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, + 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x54, 0x72, 0x65, 0x65, 0x53, 0x79, 0x6e, 0x63, + 0x50, 0x68, 0x61, 0x73, 0x65, 0x52, 0x05, 0x70, 0x68, 0x61, 0x73, 0x65, 0x12, 0x22, 0x0a, 0x0d, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x53, 0x65, 0x71, 0x4e, 0x72, + 0x12, 0x37, 0x0a, 0x18, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x15, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x6f, 0x6f, 0x74, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x5f, 0x0a, 0x19, 0x70, 0x65, 0x6e, + 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x5f, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, + 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, + 0x33, 0x5f, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x52, 0x61, 0x6e, + 0x67, 0x65, 0x52, 0x16, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x44, 0x69, + 0x67, 0x65, 0x73, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x22, 0x64, 0x0a, 0x0e, 0x50, 0x61, + 0x63, 0x65, 0x6d, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, + 0x63, 0x68, 0x12, 0x3c, 0x0a, 0x1b, 0x68, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x65, + 0x6e, 0x74, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x5f, 0x77, 0x69, 0x73, + 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x68, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, + 0x53, 0x65, 0x6e, 0x74, 0x4e, 0x65, 0x77, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x57, 0x69, 0x73, 0x68, + 0x22, 0x76, 0x0a, 0x08, 0x42, 0x6c, 0x6f, 0x62, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, + 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x4c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x5f, 0x68, 0x61, 0x76, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x08, 0x52, 0x0a, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x48, + 0x61, 0x76, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x65, 0x78, 0x70, 0x69, 0x72, 0x79, 0x5f, 0x73, + 0x65, 0x71, 0x5f, 0x6e, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x65, 0x78, 0x70, + 0x69, 0x72, 0x79, 0x53, 0x65, 0x71, 0x4e, 0x72, 0x2a, 0x66, 0x0a, 0x0d, 0x54, 0x72, 0x65, 0x65, + 0x53, 0x79, 0x6e, 0x63, 0x50, 0x68, 0x61, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x18, 0x54, 0x52, 0x45, + 0x45, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x50, 0x48, 0x41, 0x53, 0x45, 0x5f, 0x49, 0x4e, 0x41, + 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x52, 0x45, 0x45, 0x5f, + 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x50, 0x48, 0x41, 0x53, 0x45, 0x5f, 0x57, 0x41, 0x49, 0x54, 0x49, + 0x4e, 0x47, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x52, 0x45, 0x45, 0x5f, 0x53, 0x59, 0x4e, + 0x43, 0x5f, 0x50, 0x48, 0x41, 0x53, 0x45, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x02, + 0x42, 0x11, 0x5a, 0x0f, 0x2e, 0x3b, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -159,17 +380,23 @@ func file_offchainreporting3_1_db_proto_rawDescGZIP() []byte { return file_offchainreporting3_1_db_proto_rawDescData } -var file_offchainreporting3_1_db_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_offchainreporting3_1_db_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_offchainreporting3_1_db_proto_msgTypes = make([]protoimpl.MessageInfo, 4) var file_offchainreporting3_1_db_proto_goTypes = []interface{}{ - (*PacemakerState)(nil), // 0: offchainreporting3_1.PacemakerState - (*StatePersistenceState)(nil), // 1: offchainreporting3_1.StatePersistenceState + (TreeSyncPhase)(0), // 0: offchainreporting3_1.TreeSyncPhase + (*KeyDigestRange)(nil), // 1: offchainreporting3_1.KeyDigestRange + (*TreeSyncStatus)(nil), // 2: offchainreporting3_1.TreeSyncStatus + (*PacemakerState)(nil), // 3: offchainreporting3_1.PacemakerState + (*BlobMeta)(nil), // 4: offchainreporting3_1.BlobMeta } var file_offchainreporting3_1_db_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 0, // 0: offchainreporting3_1.TreeSyncStatus.phase:type_name -> offchainreporting3_1.TreeSyncPhase + 1, // 1: offchainreporting3_1.TreeSyncStatus.pending_key_digest_ranges:type_name -> offchainreporting3_1.KeyDigestRange + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_offchainreporting3_1_db_proto_init() } @@ -179,7 +406,7 @@ func file_offchainreporting3_1_db_proto_init() { } if !protoimpl.UnsafeEnabled { file_offchainreporting3_1_db_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PacemakerState); i { + switch v := v.(*KeyDigestRange); i { case 0: return &v.state case 1: @@ -191,7 +418,31 @@ func file_offchainreporting3_1_db_proto_init() { } } file_offchainreporting3_1_db_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StatePersistenceState); i { + switch v := v.(*TreeSyncStatus); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_offchainreporting3_1_db_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PacemakerState); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_offchainreporting3_1_db_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlobMeta); i { case 0: return &v.state case 1: @@ -208,13 +459,14 @@ func file_offchainreporting3_1_db_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_offchainreporting3_1_db_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, + NumEnums: 1, + NumMessages: 4, NumExtensions: 0, NumServices: 0, }, GoTypes: file_offchainreporting3_1_db_proto_goTypes, DependencyIndexes: file_offchainreporting3_1_db_proto_depIdxs, + EnumInfos: file_offchainreporting3_1_db_proto_enumTypes, MessageInfos: file_offchainreporting3_1_db_proto_msgTypes, }.Build() File_offchainreporting3_1_db_proto = out.File diff --git a/offchainreporting2plus/internal/ocr3_1/serialization/offchainreporting3_1_jmt.pb.go b/offchainreporting2plus/internal/ocr3_1/serialization/offchainreporting3_1_jmt.pb.go new file mode 100644 index 00000000..0039a4b8 --- /dev/null +++ b/offchainreporting2plus/internal/ocr3_1/serialization/offchainreporting3_1_jmt.pb.go @@ -0,0 +1,444 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.31.0 +// protoc v4.25.1 +// source: offchainreporting3_1_jmt.proto + +package serialization + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Node struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Node: + // + // *Node_InternalNode + // *Node_LeafNode + Node isNode_Node `protobuf_oneof:"node"` +} + +func (x *Node) Reset() { + *x = Node{} + if protoimpl.UnsafeEnabled { + mi := &file_offchainreporting3_1_jmt_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Node) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Node) ProtoMessage() {} + +func (x *Node) ProtoReflect() protoreflect.Message { + mi := &file_offchainreporting3_1_jmt_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Node.ProtoReflect.Descriptor instead. +func (*Node) Descriptor() ([]byte, []int) { + return file_offchainreporting3_1_jmt_proto_rawDescGZIP(), []int{0} +} + +func (m *Node) GetNode() isNode_Node { + if m != nil { + return m.Node + } + return nil +} + +func (x *Node) GetInternalNode() *InternalNode { + if x, ok := x.GetNode().(*Node_InternalNode); ok { + return x.InternalNode + } + return nil +} + +func (x *Node) GetLeafNode() *LeafNode { + if x, ok := x.GetNode().(*Node_LeafNode); ok { + return x.LeafNode + } + return nil +} + +type isNode_Node interface { + isNode_Node() +} + +type Node_InternalNode struct { + InternalNode *InternalNode `protobuf:"bytes,1,opt,name=internal_node,json=internalNode,proto3,oneof"` +} + +type Node_LeafNode struct { + LeafNode *LeafNode `protobuf:"bytes,2,opt,name=leaf_node,json=leafNode,proto3,oneof"` +} + +func (*Node_InternalNode) isNode_Node() {} + +func (*Node_LeafNode) isNode_Node() {} + +type InternalNode struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Maps nibbles to children. The nibble for children[i] is the index of the + // i-th 1 bit from LSB. + ChildrenBitmap uint32 `protobuf:"varint,1,opt,name=children_bitmap,json=childrenBitmap,proto3" json:"children_bitmap,omitempty"` + // Only non-nil children are included in this array, in nibble order. + Children []*InternalNodeChild `protobuf:"bytes,2,rep,name=children,proto3" json:"children,omitempty"` +} + +func (x *InternalNode) Reset() { + *x = InternalNode{} + if protoimpl.UnsafeEnabled { + mi := &file_offchainreporting3_1_jmt_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InternalNode) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InternalNode) ProtoMessage() {} + +func (x *InternalNode) ProtoReflect() protoreflect.Message { + mi := &file_offchainreporting3_1_jmt_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InternalNode.ProtoReflect.Descriptor instead. +func (*InternalNode) Descriptor() ([]byte, []int) { + return file_offchainreporting3_1_jmt_proto_rawDescGZIP(), []int{1} +} + +func (x *InternalNode) GetChildrenBitmap() uint32 { + if x != nil { + return x.ChildrenBitmap + } + return 0 +} + +func (x *InternalNode) GetChildren() []*InternalNodeChild { + if x != nil { + return x.Children + } + return nil +} + +type InternalNodeChild struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Version uint64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + Digest []byte `protobuf:"bytes,2,opt,name=digest,proto3" json:"digest,omitempty"` + IsLeaf bool `protobuf:"varint,3,opt,name=is_leaf,json=isLeaf,proto3" json:"is_leaf,omitempty"` +} + +func (x *InternalNodeChild) Reset() { + *x = InternalNodeChild{} + if protoimpl.UnsafeEnabled { + mi := &file_offchainreporting3_1_jmt_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InternalNodeChild) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InternalNodeChild) ProtoMessage() {} + +func (x *InternalNodeChild) ProtoReflect() protoreflect.Message { + mi := &file_offchainreporting3_1_jmt_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InternalNodeChild.ProtoReflect.Descriptor instead. +func (*InternalNodeChild) Descriptor() ([]byte, []int) { + return file_offchainreporting3_1_jmt_proto_rawDescGZIP(), []int{2} +} + +func (x *InternalNodeChild) GetVersion() uint64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *InternalNodeChild) GetDigest() []byte { + if x != nil { + return x.Digest + } + return nil +} + +func (x *InternalNodeChild) GetIsLeaf() bool { + if x != nil { + return x.IsLeaf + } + return false +} + +type LeafNode struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + KeyDigest []byte `protobuf:"bytes,1,opt,name=key_digest,json=keyDigest,proto3" json:"key_digest,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + ValueDigest []byte `protobuf:"bytes,3,opt,name=value_digest,json=valueDigest,proto3" json:"value_digest,omitempty"` + Value []byte `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *LeafNode) Reset() { + *x = LeafNode{} + if protoimpl.UnsafeEnabled { + mi := &file_offchainreporting3_1_jmt_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LeafNode) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LeafNode) ProtoMessage() {} + +func (x *LeafNode) ProtoReflect() protoreflect.Message { + mi := &file_offchainreporting3_1_jmt_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LeafNode.ProtoReflect.Descriptor instead. +func (*LeafNode) Descriptor() ([]byte, []int) { + return file_offchainreporting3_1_jmt_proto_rawDescGZIP(), []int{3} +} + +func (x *LeafNode) GetKeyDigest() []byte { + if x != nil { + return x.KeyDigest + } + return nil +} + +func (x *LeafNode) GetKey() []byte { + if x != nil { + return x.Key + } + return nil +} + +func (x *LeafNode) GetValueDigest() []byte { + if x != nil { + return x.ValueDigest + } + return nil +} + +func (x *LeafNode) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +var File_offchainreporting3_1_jmt_proto protoreflect.FileDescriptor + +var file_offchainreporting3_1_jmt_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, + 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x5f, 0x6a, 0x6d, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x14, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, + 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x22, 0x98, 0x01, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, + 0x49, 0x0a, 0x0d, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x6e, 0x6f, 0x64, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, + 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x49, 0x6e, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x3d, 0x0a, 0x09, 0x6c, 0x65, + 0x61, 0x66, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, + 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x4c, 0x65, 0x61, 0x66, 0x4e, 0x6f, 0x64, 0x65, 0x48, 0x00, 0x52, + 0x08, 0x6c, 0x65, 0x61, 0x66, 0x4e, 0x6f, 0x64, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6e, 0x6f, 0x64, + 0x65, 0x22, 0x7c, 0x0a, 0x0c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4e, 0x6f, 0x64, + 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x5f, 0x62, 0x69, + 0x74, 0x6d, 0x61, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x63, 0x68, 0x69, 0x6c, + 0x64, 0x72, 0x65, 0x6e, 0x42, 0x69, 0x74, 0x6d, 0x61, 0x70, 0x12, 0x43, 0x0a, 0x08, 0x63, 0x68, + 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, + 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, + 0x33, 0x5f, 0x31, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4e, 0x6f, 0x64, 0x65, + 0x43, 0x68, 0x69, 0x6c, 0x64, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x22, + 0x5e, 0x0a, 0x11, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x43, + 0x68, 0x69, 0x6c, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, + 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, + 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x73, 0x5f, 0x6c, 0x65, 0x61, + 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x69, 0x73, 0x4c, 0x65, 0x61, 0x66, 0x22, + 0x74, 0x0a, 0x08, 0x4c, 0x65, 0x61, 0x66, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6b, + 0x65, 0x79, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x09, 0x6b, 0x65, 0x79, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x21, 0x0a, 0x0c, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x11, 0x5a, 0x0f, 0x2e, 0x3b, 0x73, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_offchainreporting3_1_jmt_proto_rawDescOnce sync.Once + file_offchainreporting3_1_jmt_proto_rawDescData = file_offchainreporting3_1_jmt_proto_rawDesc +) + +func file_offchainreporting3_1_jmt_proto_rawDescGZIP() []byte { + file_offchainreporting3_1_jmt_proto_rawDescOnce.Do(func() { + file_offchainreporting3_1_jmt_proto_rawDescData = protoimpl.X.CompressGZIP(file_offchainreporting3_1_jmt_proto_rawDescData) + }) + return file_offchainreporting3_1_jmt_proto_rawDescData +} + +var file_offchainreporting3_1_jmt_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_offchainreporting3_1_jmt_proto_goTypes = []interface{}{ + (*Node)(nil), // 0: offchainreporting3_1.Node + (*InternalNode)(nil), // 1: offchainreporting3_1.InternalNode + (*InternalNodeChild)(nil), // 2: offchainreporting3_1.InternalNodeChild + (*LeafNode)(nil), // 3: offchainreporting3_1.LeafNode +} +var file_offchainreporting3_1_jmt_proto_depIdxs = []int32{ + 1, // 0: offchainreporting3_1.Node.internal_node:type_name -> offchainreporting3_1.InternalNode + 3, // 1: offchainreporting3_1.Node.leaf_node:type_name -> offchainreporting3_1.LeafNode + 2, // 2: offchainreporting3_1.InternalNode.children:type_name -> offchainreporting3_1.InternalNodeChild + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_offchainreporting3_1_jmt_proto_init() } +func file_offchainreporting3_1_jmt_proto_init() { + if File_offchainreporting3_1_jmt_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_offchainreporting3_1_jmt_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Node); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_offchainreporting3_1_jmt_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InternalNode); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_offchainreporting3_1_jmt_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InternalNodeChild); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_offchainreporting3_1_jmt_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LeafNode); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_offchainreporting3_1_jmt_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*Node_InternalNode)(nil), + (*Node_LeafNode)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_offchainreporting3_1_jmt_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_offchainreporting3_1_jmt_proto_goTypes, + DependencyIndexes: file_offchainreporting3_1_jmt_proto_depIdxs, + MessageInfos: file_offchainreporting3_1_jmt_proto_msgTypes, + }.Build() + File_offchainreporting3_1_jmt_proto = out.File + file_offchainreporting3_1_jmt_proto_rawDesc = nil + file_offchainreporting3_1_jmt_proto_goTypes = nil + file_offchainreporting3_1_jmt_proto_depIdxs = nil +} diff --git a/offchainreporting2plus/internal/ocr3_1/serialization/offchainreporting3_1_messages.pb.go b/offchainreporting2plus/internal/ocr3_1/serialization/offchainreporting3_1_messages.pb.go index 4493cd5d..60238629 100644 --- a/offchainreporting2plus/internal/ocr3_1/serialization/offchainreporting3_1_messages.pb.go +++ b/offchainreporting2plus/internal/ocr3_1/serialization/offchainreporting3_1_messages.pb.go @@ -39,12 +39,14 @@ type MessageWrapper struct { // *MessageWrapper_MessageCertifiedCommitRequest // *MessageWrapper_MessageCertifiedCommit // *MessageWrapper_MessageBlockSyncRequest - // *MessageWrapper_MessageBlockSync - // *MessageWrapper_MessageBlockSyncSummary + // *MessageWrapper_MessageBlockSyncResponse + // *MessageWrapper_MessageStateSyncSummary + // *MessageWrapper_MessageTreeSyncChunkRequest + // *MessageWrapper_MessageTreeSyncChunkResponse // *MessageWrapper_MessageBlobOffer + // *MessageWrapper_MessageBlobOfferResponse // *MessageWrapper_MessageBlobChunkRequest // *MessageWrapper_MessageBlobChunkResponse - // *MessageWrapper_MessageBlobAvailable Msg isMessageWrapper_Msg `protobuf_oneof:"msg"` } @@ -171,16 +173,30 @@ func (x *MessageWrapper) GetMessageBlockSyncRequest() *MessageBlockSyncRequest { return nil } -func (x *MessageWrapper) GetMessageBlockSync() *MessageBlockSync { - if x, ok := x.GetMsg().(*MessageWrapper_MessageBlockSync); ok { - return x.MessageBlockSync +func (x *MessageWrapper) GetMessageBlockSyncResponse() *MessageBlockSyncResponse { + if x, ok := x.GetMsg().(*MessageWrapper_MessageBlockSyncResponse); ok { + return x.MessageBlockSyncResponse } return nil } -func (x *MessageWrapper) GetMessageBlockSyncSummary() *MessageBlockSyncSummary { - if x, ok := x.GetMsg().(*MessageWrapper_MessageBlockSyncSummary); ok { - return x.MessageBlockSyncSummary +func (x *MessageWrapper) GetMessageStateSyncSummary() *MessageStateSyncSummary { + if x, ok := x.GetMsg().(*MessageWrapper_MessageStateSyncSummary); ok { + return x.MessageStateSyncSummary + } + return nil +} + +func (x *MessageWrapper) GetMessageTreeSyncChunkRequest() *MessageTreeSyncChunkRequest { + if x, ok := x.GetMsg().(*MessageWrapper_MessageTreeSyncChunkRequest); ok { + return x.MessageTreeSyncChunkRequest + } + return nil +} + +func (x *MessageWrapper) GetMessageTreeSyncChunkResponse() *MessageTreeSyncChunkResponse { + if x, ok := x.GetMsg().(*MessageWrapper_MessageTreeSyncChunkResponse); ok { + return x.MessageTreeSyncChunkResponse } return nil } @@ -192,6 +208,13 @@ func (x *MessageWrapper) GetMessageBlobOffer() *MessageBlobOffer { return nil } +func (x *MessageWrapper) GetMessageBlobOfferResponse() *MessageBlobOfferResponse { + if x, ok := x.GetMsg().(*MessageWrapper_MessageBlobOfferResponse); ok { + return x.MessageBlobOfferResponse + } + return nil +} + func (x *MessageWrapper) GetMessageBlobChunkRequest() *MessageBlobChunkRequest { if x, ok := x.GetMsg().(*MessageWrapper_MessageBlobChunkRequest); ok { return x.MessageBlobChunkRequest @@ -206,13 +229,6 @@ func (x *MessageWrapper) GetMessageBlobChunkResponse() *MessageBlobChunkResponse return nil } -func (x *MessageWrapper) GetMessageBlobAvailable() *MessageBlobAvailable { - if x, ok := x.GetMsg().(*MessageWrapper_MessageBlobAvailable); ok { - return x.MessageBlobAvailable - } - return nil -} - type isMessageWrapper_Msg interface { isMessageWrapper_Msg() } @@ -265,28 +281,36 @@ type MessageWrapper_MessageBlockSyncRequest struct { MessageBlockSyncRequest *MessageBlockSyncRequest `protobuf:"bytes,28,opt,name=message_block_sync_request,json=messageBlockSyncRequest,proto3,oneof"` } -type MessageWrapper_MessageBlockSync struct { - MessageBlockSync *MessageBlockSync `protobuf:"bytes,29,opt,name=message_block_sync,json=messageBlockSync,proto3,oneof"` +type MessageWrapper_MessageBlockSyncResponse struct { + MessageBlockSyncResponse *MessageBlockSyncResponse `protobuf:"bytes,29,opt,name=message_block_sync_response,json=messageBlockSyncResponse,proto3,oneof"` +} + +type MessageWrapper_MessageStateSyncSummary struct { + MessageStateSyncSummary *MessageStateSyncSummary `protobuf:"bytes,30,opt,name=message_state_sync_summary,json=messageStateSyncSummary,proto3,oneof"` } -type MessageWrapper_MessageBlockSyncSummary struct { - MessageBlockSyncSummary *MessageBlockSyncSummary `protobuf:"bytes,30,opt,name=message_block_sync_summary,json=messageBlockSyncSummary,proto3,oneof"` +type MessageWrapper_MessageTreeSyncChunkRequest struct { + MessageTreeSyncChunkRequest *MessageTreeSyncChunkRequest `protobuf:"bytes,31,opt,name=message_tree_sync_chunk_request,json=messageTreeSyncChunkRequest,proto3,oneof"` +} + +type MessageWrapper_MessageTreeSyncChunkResponse struct { + MessageTreeSyncChunkResponse *MessageTreeSyncChunkResponse `protobuf:"bytes,32,opt,name=message_tree_sync_chunk_response,json=messageTreeSyncChunkResponse,proto3,oneof"` } type MessageWrapper_MessageBlobOffer struct { - MessageBlobOffer *MessageBlobOffer `protobuf:"bytes,31,opt,name=message_blob_offer,json=messageBlobOffer,proto3,oneof"` + MessageBlobOffer *MessageBlobOffer `protobuf:"bytes,33,opt,name=message_blob_offer,json=messageBlobOffer,proto3,oneof"` } -type MessageWrapper_MessageBlobChunkRequest struct { - MessageBlobChunkRequest *MessageBlobChunkRequest `protobuf:"bytes,32,opt,name=message_blob_chunk_request,json=messageBlobChunkRequest,proto3,oneof"` +type MessageWrapper_MessageBlobOfferResponse struct { + MessageBlobOfferResponse *MessageBlobOfferResponse `protobuf:"bytes,34,opt,name=message_blob_offer_response,json=messageBlobOfferResponse,proto3,oneof"` } -type MessageWrapper_MessageBlobChunkResponse struct { - MessageBlobChunkResponse *MessageBlobChunkResponse `protobuf:"bytes,33,opt,name=message_blob_chunk_response,json=messageBlobChunkResponse,proto3,oneof"` +type MessageWrapper_MessageBlobChunkRequest struct { + MessageBlobChunkRequest *MessageBlobChunkRequest `protobuf:"bytes,35,opt,name=message_blob_chunk_request,json=messageBlobChunkRequest,proto3,oneof"` } -type MessageWrapper_MessageBlobAvailable struct { - MessageBlobAvailable *MessageBlobAvailable `protobuf:"bytes,34,opt,name=message_blob_available,json=messageBlobAvailable,proto3,oneof"` +type MessageWrapper_MessageBlobChunkResponse struct { + MessageBlobChunkResponse *MessageBlobChunkResponse `protobuf:"bytes,36,opt,name=message_blob_chunk_response,json=messageBlobChunkResponse,proto3,oneof"` } func (*MessageWrapper_MessageNewEpochWish) isMessageWrapper_Msg() {} @@ -313,18 +337,22 @@ func (*MessageWrapper_MessageCertifiedCommit) isMessageWrapper_Msg() {} func (*MessageWrapper_MessageBlockSyncRequest) isMessageWrapper_Msg() {} -func (*MessageWrapper_MessageBlockSync) isMessageWrapper_Msg() {} +func (*MessageWrapper_MessageBlockSyncResponse) isMessageWrapper_Msg() {} + +func (*MessageWrapper_MessageStateSyncSummary) isMessageWrapper_Msg() {} + +func (*MessageWrapper_MessageTreeSyncChunkRequest) isMessageWrapper_Msg() {} -func (*MessageWrapper_MessageBlockSyncSummary) isMessageWrapper_Msg() {} +func (*MessageWrapper_MessageTreeSyncChunkResponse) isMessageWrapper_Msg() {} func (*MessageWrapper_MessageBlobOffer) isMessageWrapper_Msg() {} +func (*MessageWrapper_MessageBlobOfferResponse) isMessageWrapper_Msg() {} + func (*MessageWrapper_MessageBlobChunkRequest) isMessageWrapper_Msg() {} func (*MessageWrapper_MessageBlobChunkResponse) isMessageWrapper_Msg() {} -func (*MessageWrapper_MessageBlobAvailable) isMessageWrapper_Msg() {} - type MessageNewEpochWish struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -959,8 +987,8 @@ type MessageBlockSyncRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - HighestCommittedSeqNr uint64 `protobuf:"varint,1,opt,name=highest_committed_seq_nr,json=highestCommittedSeqNr,proto3" json:"highest_committed_seq_nr,omitempty"` - Nonce uint64 `protobuf:"varint,2,opt,name=nonce,proto3" json:"nonce,omitempty"` + StartSeqNr uint64 `protobuf:"varint,1,opt,name=start_seq_nr,json=startSeqNr,proto3" json:"start_seq_nr,omitempty"` + EndExclSeqNr uint64 `protobuf:"varint,2,opt,name=end_excl_seq_nr,json=endExclSeqNr,proto3" json:"end_excl_seq_nr,omitempty"` } func (x *MessageBlockSyncRequest) Reset() { @@ -995,31 +1023,32 @@ func (*MessageBlockSyncRequest) Descriptor() ([]byte, []int) { return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{12} } -func (x *MessageBlockSyncRequest) GetHighestCommittedSeqNr() uint64 { +func (x *MessageBlockSyncRequest) GetStartSeqNr() uint64 { if x != nil { - return x.HighestCommittedSeqNr + return x.StartSeqNr } return 0 } -func (x *MessageBlockSyncRequest) GetNonce() uint64 { +func (x *MessageBlockSyncRequest) GetEndExclSeqNr() uint64 { if x != nil { - return x.Nonce + return x.EndExclSeqNr } return 0 } -type MessageBlockSync struct { +type MessageBlockSyncResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - AttestedStateTransitionBlocks []*AttestedStateTransitionBlock `protobuf:"bytes,1,rep,name=attested_state_transition_blocks,json=attestedStateTransitionBlocks,proto3" json:"attested_state_transition_blocks,omitempty"` - Nonce uint64 `protobuf:"varint,2,opt,name=nonce,proto3" json:"nonce,omitempty"` + RequestStartSeqNr uint64 `protobuf:"varint,1,opt,name=request_start_seq_nr,json=requestStartSeqNr,proto3" json:"request_start_seq_nr,omitempty"` + RequestEndExclSeqNr uint64 `protobuf:"varint,2,opt,name=request_end_excl_seq_nr,json=requestEndExclSeqNr,proto3" json:"request_end_excl_seq_nr,omitempty"` + AttestedStateTransitionBlocks []*AttestedStateTransitionBlock `protobuf:"bytes,3,rep,name=attested_state_transition_blocks,json=attestedStateTransitionBlocks,proto3" json:"attested_state_transition_blocks,omitempty"` } -func (x *MessageBlockSync) Reset() { - *x = MessageBlockSync{} +func (x *MessageBlockSyncResponse) Reset() { + *x = MessageBlockSyncResponse{} if protoimpl.UnsafeEnabled { mi := &file_offchainreporting3_1_messages_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1027,13 +1056,13 @@ func (x *MessageBlockSync) Reset() { } } -func (x *MessageBlockSync) String() string { +func (x *MessageBlockSyncResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*MessageBlockSync) ProtoMessage() {} +func (*MessageBlockSyncResponse) ProtoMessage() {} -func (x *MessageBlockSync) ProtoReflect() protoreflect.Message { +func (x *MessageBlockSyncResponse) ProtoReflect() protoreflect.Message { mi := &file_offchainreporting3_1_messages_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1045,35 +1074,43 @@ func (x *MessageBlockSync) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use MessageBlockSync.ProtoReflect.Descriptor instead. -func (*MessageBlockSync) Descriptor() ([]byte, []int) { +// Deprecated: Use MessageBlockSyncResponse.ProtoReflect.Descriptor instead. +func (*MessageBlockSyncResponse) Descriptor() ([]byte, []int) { return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{13} } -func (x *MessageBlockSync) GetAttestedStateTransitionBlocks() []*AttestedStateTransitionBlock { +func (x *MessageBlockSyncResponse) GetRequestStartSeqNr() uint64 { if x != nil { - return x.AttestedStateTransitionBlocks + return x.RequestStartSeqNr } - return nil + return 0 } -func (x *MessageBlockSync) GetNonce() uint64 { +func (x *MessageBlockSyncResponse) GetRequestEndExclSeqNr() uint64 { if x != nil { - return x.Nonce + return x.RequestEndExclSeqNr } return 0 } -type MessageBlockSyncSummary struct { +func (x *MessageBlockSyncResponse) GetAttestedStateTransitionBlocks() []*AttestedStateTransitionBlock { + if x != nil { + return x.AttestedStateTransitionBlocks + } + return nil +} + +type MessageStateSyncSummary struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - LowestPersistedSeqNr uint64 `protobuf:"varint,1,opt,name=lowest_persisted_seq_nr,json=lowestPersistedSeqNr,proto3" json:"lowest_persisted_seq_nr,omitempty"` + LowestPersistedSeqNr uint64 `protobuf:"varint,1,opt,name=lowest_persisted_seq_nr,json=lowestPersistedSeqNr,proto3" json:"lowest_persisted_seq_nr,omitempty"` + HighestCommittedSeqNr uint64 `protobuf:"varint,2,opt,name=highest_committed_seq_nr,json=highestCommittedSeqNr,proto3" json:"highest_committed_seq_nr,omitempty"` } -func (x *MessageBlockSyncSummary) Reset() { - *x = MessageBlockSyncSummary{} +func (x *MessageStateSyncSummary) Reset() { + *x = MessageStateSyncSummary{} if protoimpl.UnsafeEnabled { mi := &file_offchainreporting3_1_messages_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1081,13 +1118,13 @@ func (x *MessageBlockSyncSummary) Reset() { } } -func (x *MessageBlockSyncSummary) String() string { +func (x *MessageStateSyncSummary) String() string { return protoimpl.X.MessageStringOf(x) } -func (*MessageBlockSyncSummary) ProtoMessage() {} +func (*MessageStateSyncSummary) ProtoMessage() {} -func (x *MessageBlockSyncSummary) ProtoReflect() protoreflect.Message { +func (x *MessageStateSyncSummary) ProtoReflect() protoreflect.Message { mi := &file_offchainreporting3_1_messages_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1099,18 +1136,348 @@ func (x *MessageBlockSyncSummary) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use MessageBlockSyncSummary.ProtoReflect.Descriptor instead. -func (*MessageBlockSyncSummary) Descriptor() ([]byte, []int) { +// Deprecated: Use MessageStateSyncSummary.ProtoReflect.Descriptor instead. +func (*MessageStateSyncSummary) Descriptor() ([]byte, []int) { return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{14} } -func (x *MessageBlockSyncSummary) GetLowestPersistedSeqNr() uint64 { +func (x *MessageStateSyncSummary) GetLowestPersistedSeqNr() uint64 { if x != nil { return x.LowestPersistedSeqNr } return 0 } +func (x *MessageStateSyncSummary) GetHighestCommittedSeqNr() uint64 { + if x != nil { + return x.HighestCommittedSeqNr + } + return 0 +} + +type MessageTreeSyncChunkRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ToSeqNr uint64 `protobuf:"varint,1,opt,name=to_seq_nr,json=toSeqNr,proto3" json:"to_seq_nr,omitempty"` + StartIndex []byte `protobuf:"bytes,2,opt,name=start_index,json=startIndex,proto3" json:"start_index,omitempty"` + EndInclIndex []byte `protobuf:"bytes,3,opt,name=end_incl_index,json=endInclIndex,proto3" json:"end_incl_index,omitempty"` +} + +func (x *MessageTreeSyncChunkRequest) Reset() { + *x = MessageTreeSyncChunkRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_offchainreporting3_1_messages_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MessageTreeSyncChunkRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageTreeSyncChunkRequest) ProtoMessage() {} + +func (x *MessageTreeSyncChunkRequest) ProtoReflect() protoreflect.Message { + mi := &file_offchainreporting3_1_messages_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MessageTreeSyncChunkRequest.ProtoReflect.Descriptor instead. +func (*MessageTreeSyncChunkRequest) Descriptor() ([]byte, []int) { + return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{15} +} + +func (x *MessageTreeSyncChunkRequest) GetToSeqNr() uint64 { + if x != nil { + return x.ToSeqNr + } + return 0 +} + +func (x *MessageTreeSyncChunkRequest) GetStartIndex() []byte { + if x != nil { + return x.StartIndex + } + return nil +} + +func (x *MessageTreeSyncChunkRequest) GetEndInclIndex() []byte { + if x != nil { + return x.EndInclIndex + } + return nil +} + +type KeyValuePair struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *KeyValuePair) Reset() { + *x = KeyValuePair{} + if protoimpl.UnsafeEnabled { + mi := &file_offchainreporting3_1_messages_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyValuePair) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyValuePair) ProtoMessage() {} + +func (x *KeyValuePair) ProtoReflect() protoreflect.Message { + mi := &file_offchainreporting3_1_messages_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyValuePair.ProtoReflect.Descriptor instead. +func (*KeyValuePair) Descriptor() ([]byte, []int) { + return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{16} +} + +func (x *KeyValuePair) GetKey() []byte { + if x != nil { + return x.Key + } + return nil +} + +func (x *KeyValuePair) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +type LeafKeyAndValueDigests struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + KeyDigest []byte `protobuf:"bytes,1,opt,name=key_digest,json=keyDigest,proto3" json:"key_digest,omitempty"` + ValueDigest []byte `protobuf:"bytes,2,opt,name=value_digest,json=valueDigest,proto3" json:"value_digest,omitempty"` +} + +func (x *LeafKeyAndValueDigests) Reset() { + *x = LeafKeyAndValueDigests{} + if protoimpl.UnsafeEnabled { + mi := &file_offchainreporting3_1_messages_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LeafKeyAndValueDigests) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LeafKeyAndValueDigests) ProtoMessage() {} + +func (x *LeafKeyAndValueDigests) ProtoReflect() protoreflect.Message { + mi := &file_offchainreporting3_1_messages_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LeafKeyAndValueDigests.ProtoReflect.Descriptor instead. +func (*LeafKeyAndValueDigests) Descriptor() ([]byte, []int) { + return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{17} +} + +func (x *LeafKeyAndValueDigests) GetKeyDigest() []byte { + if x != nil { + return x.KeyDigest + } + return nil +} + +func (x *LeafKeyAndValueDigests) GetValueDigest() []byte { + if x != nil { + return x.ValueDigest + } + return nil +} + +type BoundingLeaf struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Leaf *LeafKeyAndValueDigests `protobuf:"bytes,1,opt,name=leaf,proto3" json:"leaf,omitempty"` + Siblings [][]byte `protobuf:"bytes,2,rep,name=siblings,proto3" json:"siblings,omitempty"` +} + +func (x *BoundingLeaf) Reset() { + *x = BoundingLeaf{} + if protoimpl.UnsafeEnabled { + mi := &file_offchainreporting3_1_messages_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BoundingLeaf) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BoundingLeaf) ProtoMessage() {} + +func (x *BoundingLeaf) ProtoReflect() protoreflect.Message { + mi := &file_offchainreporting3_1_messages_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BoundingLeaf.ProtoReflect.Descriptor instead. +func (*BoundingLeaf) Descriptor() ([]byte, []int) { + return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{18} +} + +func (x *BoundingLeaf) GetLeaf() *LeafKeyAndValueDigests { + if x != nil { + return x.Leaf + } + return nil +} + +func (x *BoundingLeaf) GetSiblings() [][]byte { + if x != nil { + return x.Siblings + } + return nil +} + +type MessageTreeSyncChunkResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ToSeqNr uint64 `protobuf:"varint,1,opt,name=to_seq_nr,json=toSeqNr,proto3" json:"to_seq_nr,omitempty"` + StartIndex []byte `protobuf:"bytes,2,opt,name=start_index,json=startIndex,proto3" json:"start_index,omitempty"` + RequestEndInclIndex []byte `protobuf:"bytes,3,opt,name=request_end_incl_index,json=requestEndInclIndex,proto3" json:"request_end_incl_index,omitempty"` + GoAway bool `protobuf:"varint,4,opt,name=go_away,json=goAway,proto3" json:"go_away,omitempty"` + EndInclIndex []byte `protobuf:"bytes,5,opt,name=end_incl_index,json=endInclIndex,proto3" json:"end_incl_index,omitempty"` + KeyValues []*KeyValuePair `protobuf:"bytes,6,rep,name=key_values,json=keyValues,proto3" json:"key_values,omitempty"` + BoundingLeaves []*BoundingLeaf `protobuf:"bytes,7,rep,name=bounding_leaves,json=boundingLeaves,proto3" json:"bounding_leaves,omitempty"` +} + +func (x *MessageTreeSyncChunkResponse) Reset() { + *x = MessageTreeSyncChunkResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_offchainreporting3_1_messages_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MessageTreeSyncChunkResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageTreeSyncChunkResponse) ProtoMessage() {} + +func (x *MessageTreeSyncChunkResponse) ProtoReflect() protoreflect.Message { + mi := &file_offchainreporting3_1_messages_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MessageTreeSyncChunkResponse.ProtoReflect.Descriptor instead. +func (*MessageTreeSyncChunkResponse) Descriptor() ([]byte, []int) { + return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{19} +} + +func (x *MessageTreeSyncChunkResponse) GetToSeqNr() uint64 { + if x != nil { + return x.ToSeqNr + } + return 0 +} + +func (x *MessageTreeSyncChunkResponse) GetStartIndex() []byte { + if x != nil { + return x.StartIndex + } + return nil +} + +func (x *MessageTreeSyncChunkResponse) GetRequestEndInclIndex() []byte { + if x != nil { + return x.RequestEndInclIndex + } + return nil +} + +func (x *MessageTreeSyncChunkResponse) GetGoAway() bool { + if x != nil { + return x.GoAway + } + return false +} + +func (x *MessageTreeSyncChunkResponse) GetEndInclIndex() []byte { + if x != nil { + return x.EndInclIndex + } + return nil +} + +func (x *MessageTreeSyncChunkResponse) GetKeyValues() []*KeyValuePair { + if x != nil { + return x.KeyValues + } + return nil +} + +func (x *MessageTreeSyncChunkResponse) GetBoundingLeaves() []*BoundingLeaf { + if x != nil { + return x.BoundingLeaves + } + return nil +} + type EpochStartProof struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1123,7 +1490,7 @@ type EpochStartProof struct { func (x *EpochStartProof) Reset() { *x = EpochStartProof{} if protoimpl.UnsafeEnabled { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[15] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1136,7 +1503,7 @@ func (x *EpochStartProof) String() string { func (*EpochStartProof) ProtoMessage() {} func (x *EpochStartProof) ProtoReflect() protoreflect.Message { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[15] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1149,7 +1516,7 @@ func (x *EpochStartProof) ProtoReflect() protoreflect.Message { // Deprecated: Use EpochStartProof.ProtoReflect.Descriptor instead. func (*EpochStartProof) Descriptor() ([]byte, []int) { - return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{15} + return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{20} } func (x *EpochStartProof) GetHighestCertified() *CertifiedPrepareOrCommit { @@ -1181,7 +1548,7 @@ type CertifiedPrepareOrCommit struct { func (x *CertifiedPrepareOrCommit) Reset() { *x = CertifiedPrepareOrCommit{} if protoimpl.UnsafeEnabled { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[16] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1194,7 +1561,7 @@ func (x *CertifiedPrepareOrCommit) String() string { func (*CertifiedPrepareOrCommit) ProtoMessage() {} func (x *CertifiedPrepareOrCommit) ProtoReflect() protoreflect.Message { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[16] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1207,7 +1574,7 @@ func (x *CertifiedPrepareOrCommit) ProtoReflect() protoreflect.Message { // Deprecated: Use CertifiedPrepareOrCommit.ProtoReflect.Descriptor instead. func (*CertifiedPrepareOrCommit) Descriptor() ([]byte, []int) { - return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{16} + return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{21} } func (m *CertifiedPrepareOrCommit) GetPrepareOrCommit() isCertifiedPrepareOrCommit_PrepareOrCommit { @@ -1256,14 +1623,15 @@ type CertifiedPrepare struct { SeqNr uint64 `protobuf:"varint,2,opt,name=seq_nr,json=seqNr,proto3" json:"seq_nr,omitempty"` StateTransitionInputsDigest []byte `protobuf:"bytes,3,opt,name=state_transition_inputs_digest,json=stateTransitionInputsDigest,proto3" json:"state_transition_inputs_digest,omitempty"` StateTransitionOutputs *StateTransitionOutputs `protobuf:"bytes,4,opt,name=state_transition_outputs,json=stateTransitionOutputs,proto3" json:"state_transition_outputs,omitempty"` - ReportsPlusPrecursor []byte `protobuf:"bytes,5,opt,name=reports_plus_precursor,json=reportsPlusPrecursor,proto3" json:"reports_plus_precursor,omitempty"` - PrepareQuorumCertificate []*AttributedPrepareSignature `protobuf:"bytes,6,rep,name=prepare_quorum_certificate,json=prepareQuorumCertificate,proto3" json:"prepare_quorum_certificate,omitempty"` + StateRootDigest []byte `protobuf:"bytes,5,opt,name=state_root_digest,json=stateRootDigest,proto3" json:"state_root_digest,omitempty"` + ReportsPlusPrecursor []byte `protobuf:"bytes,6,opt,name=reports_plus_precursor,json=reportsPlusPrecursor,proto3" json:"reports_plus_precursor,omitempty"` + PrepareQuorumCertificate []*AttributedPrepareSignature `protobuf:"bytes,7,rep,name=prepare_quorum_certificate,json=prepareQuorumCertificate,proto3" json:"prepare_quorum_certificate,omitempty"` } func (x *CertifiedPrepare) Reset() { *x = CertifiedPrepare{} if protoimpl.UnsafeEnabled { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[17] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1276,7 +1644,7 @@ func (x *CertifiedPrepare) String() string { func (*CertifiedPrepare) ProtoMessage() {} func (x *CertifiedPrepare) ProtoReflect() protoreflect.Message { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[17] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1289,7 +1657,7 @@ func (x *CertifiedPrepare) ProtoReflect() protoreflect.Message { // Deprecated: Use CertifiedPrepare.ProtoReflect.Descriptor instead. func (*CertifiedPrepare) Descriptor() ([]byte, []int) { - return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{17} + return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{22} } func (x *CertifiedPrepare) GetEpoch() uint64 { @@ -1320,6 +1688,13 @@ func (x *CertifiedPrepare) GetStateTransitionOutputs() *StateTransitionOutputs { return nil } +func (x *CertifiedPrepare) GetStateRootDigest() []byte { + if x != nil { + return x.StateRootDigest + } + return nil +} + func (x *CertifiedPrepare) GetReportsPlusPrecursor() []byte { if x != nil { return x.ReportsPlusPrecursor @@ -1343,14 +1718,15 @@ type CertifiedCommit struct { SeqNr uint64 `protobuf:"varint,2,opt,name=seq_nr,json=seqNr,proto3" json:"seq_nr,omitempty"` StateTransitionInputsDigest []byte `protobuf:"bytes,3,opt,name=state_transition_inputs_digest,json=stateTransitionInputsDigest,proto3" json:"state_transition_inputs_digest,omitempty"` StateTransitionOutputs *StateTransitionOutputs `protobuf:"bytes,4,opt,name=state_transition_outputs,json=stateTransitionOutputs,proto3" json:"state_transition_outputs,omitempty"` - ReportsPlusPrecursor []byte `protobuf:"bytes,5,opt,name=reports_plus_precursor,json=reportsPlusPrecursor,proto3" json:"reports_plus_precursor,omitempty"` - CommitQuorumCertificate []*AttributedCommitSignature `protobuf:"bytes,6,rep,name=commit_quorum_certificate,json=commitQuorumCertificate,proto3" json:"commit_quorum_certificate,omitempty"` + StateRootDigest []byte `protobuf:"bytes,5,opt,name=state_root_digest,json=stateRootDigest,proto3" json:"state_root_digest,omitempty"` + ReportsPlusPrecursor []byte `protobuf:"bytes,6,opt,name=reports_plus_precursor,json=reportsPlusPrecursor,proto3" json:"reports_plus_precursor,omitempty"` + CommitQuorumCertificate []*AttributedCommitSignature `protobuf:"bytes,7,rep,name=commit_quorum_certificate,json=commitQuorumCertificate,proto3" json:"commit_quorum_certificate,omitempty"` } func (x *CertifiedCommit) Reset() { *x = CertifiedCommit{} if protoimpl.UnsafeEnabled { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[18] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1363,7 +1739,7 @@ func (x *CertifiedCommit) String() string { func (*CertifiedCommit) ProtoMessage() {} func (x *CertifiedCommit) ProtoReflect() protoreflect.Message { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[18] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1376,7 +1752,7 @@ func (x *CertifiedCommit) ProtoReflect() protoreflect.Message { // Deprecated: Use CertifiedCommit.ProtoReflect.Descriptor instead. func (*CertifiedCommit) Descriptor() ([]byte, []int) { - return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{18} + return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{23} } func (x *CertifiedCommit) GetEpoch() uint64 { @@ -1407,6 +1783,13 @@ func (x *CertifiedCommit) GetStateTransitionOutputs() *StateTransitionOutputs { return nil } +func (x *CertifiedCommit) GetStateRootDigest() []byte { + if x != nil { + return x.StateRootDigest + } + return nil +} + func (x *CertifiedCommit) GetReportsPlusPrecursor() []byte { if x != nil { return x.ReportsPlusPrecursor @@ -1430,14 +1813,15 @@ type CertifiedCommittedReports struct { SeqNr uint64 `protobuf:"varint,2,opt,name=seq_nr,json=seqNr,proto3" json:"seq_nr,omitempty"` StateTransitionInputsDigest []byte `protobuf:"bytes,3,opt,name=state_transition_inputs_digest,json=stateTransitionInputsDigest,proto3" json:"state_transition_inputs_digest,omitempty"` StateTransitionOutputDigest []byte `protobuf:"bytes,4,opt,name=state_transition_output_digest,json=stateTransitionOutputDigest,proto3" json:"state_transition_output_digest,omitempty"` - ReportsPlusPrecursor []byte `protobuf:"bytes,5,opt,name=reports_plus_precursor,json=reportsPlusPrecursor,proto3" json:"reports_plus_precursor,omitempty"` - CommitQuorumCertificate []*AttributedCommitSignature `protobuf:"bytes,6,rep,name=commit_quorum_certificate,json=commitQuorumCertificate,proto3" json:"commit_quorum_certificate,omitempty"` + StateRootDigest []byte `protobuf:"bytes,5,opt,name=state_root_digest,json=stateRootDigest,proto3" json:"state_root_digest,omitempty"` + ReportsPlusPrecursor []byte `protobuf:"bytes,6,opt,name=reports_plus_precursor,json=reportsPlusPrecursor,proto3" json:"reports_plus_precursor,omitempty"` + CommitQuorumCertificate []*AttributedCommitSignature `protobuf:"bytes,7,rep,name=commit_quorum_certificate,json=commitQuorumCertificate,proto3" json:"commit_quorum_certificate,omitempty"` } func (x *CertifiedCommittedReports) Reset() { *x = CertifiedCommittedReports{} if protoimpl.UnsafeEnabled { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[19] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1450,7 +1834,7 @@ func (x *CertifiedCommittedReports) String() string { func (*CertifiedCommittedReports) ProtoMessage() {} func (x *CertifiedCommittedReports) ProtoReflect() protoreflect.Message { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[19] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1463,7 +1847,7 @@ func (x *CertifiedCommittedReports) ProtoReflect() protoreflect.Message { // Deprecated: Use CertifiedCommittedReports.ProtoReflect.Descriptor instead. func (*CertifiedCommittedReports) Descriptor() ([]byte, []int) { - return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{19} + return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{24} } func (x *CertifiedCommittedReports) GetCommitEpoch() uint64 { @@ -1494,6 +1878,13 @@ func (x *CertifiedCommittedReports) GetStateTransitionOutputDigest() []byte { return nil } +func (x *CertifiedCommittedReports) GetStateRootDigest() []byte { + if x != nil { + return x.StateRootDigest + } + return nil +} + func (x *CertifiedCommittedReports) GetReportsPlusPrecursor() []byte { if x != nil { return x.ReportsPlusPrecursor @@ -1521,7 +1912,7 @@ type HighestCertifiedTimestamp struct { func (x *HighestCertifiedTimestamp) Reset() { *x = HighestCertifiedTimestamp{} if protoimpl.UnsafeEnabled { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[20] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1534,7 +1925,7 @@ func (x *HighestCertifiedTimestamp) String() string { func (*HighestCertifiedTimestamp) ProtoMessage() {} func (x *HighestCertifiedTimestamp) ProtoReflect() protoreflect.Message { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[20] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1547,7 +1938,7 @@ func (x *HighestCertifiedTimestamp) ProtoReflect() protoreflect.Message { // Deprecated: Use HighestCertifiedTimestamp.ProtoReflect.Descriptor instead. func (*HighestCertifiedTimestamp) Descriptor() ([]byte, []int) { - return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{20} + return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{25} } func (x *HighestCertifiedTimestamp) GetSeqNr() uint64 { @@ -1583,7 +1974,7 @@ type AttributedSignedHighestCertifiedTimestamp struct { func (x *AttributedSignedHighestCertifiedTimestamp) Reset() { *x = AttributedSignedHighestCertifiedTimestamp{} if protoimpl.UnsafeEnabled { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[21] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1596,7 +1987,7 @@ func (x *AttributedSignedHighestCertifiedTimestamp) String() string { func (*AttributedSignedHighestCertifiedTimestamp) ProtoMessage() {} func (x *AttributedSignedHighestCertifiedTimestamp) ProtoReflect() protoreflect.Message { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[21] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1609,7 +2000,7 @@ func (x *AttributedSignedHighestCertifiedTimestamp) ProtoReflect() protoreflect. // Deprecated: Use AttributedSignedHighestCertifiedTimestamp.ProtoReflect.Descriptor instead. func (*AttributedSignedHighestCertifiedTimestamp) Descriptor() ([]byte, []int) { - return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{21} + return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{26} } func (x *AttributedSignedHighestCertifiedTimestamp) GetSignedHighestCertifiedTimestamp() *SignedHighestCertifiedTimestamp { @@ -1638,7 +2029,7 @@ type SignedHighestCertifiedTimestamp struct { func (x *SignedHighestCertifiedTimestamp) Reset() { *x = SignedHighestCertifiedTimestamp{} if protoimpl.UnsafeEnabled { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[22] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1651,7 +2042,7 @@ func (x *SignedHighestCertifiedTimestamp) String() string { func (*SignedHighestCertifiedTimestamp) ProtoMessage() {} func (x *SignedHighestCertifiedTimestamp) ProtoReflect() protoreflect.Message { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[22] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1664,7 +2055,7 @@ func (x *SignedHighestCertifiedTimestamp) ProtoReflect() protoreflect.Message { // Deprecated: Use SignedHighestCertifiedTimestamp.ProtoReflect.Descriptor instead. func (*SignedHighestCertifiedTimestamp) Descriptor() ([]byte, []int) { - return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{22} + return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{27} } func (x *SignedHighestCertifiedTimestamp) GetHighestCertifiedTimestamp() *HighestCertifiedTimestamp { @@ -1693,7 +2084,7 @@ type AttributedObservation struct { func (x *AttributedObservation) Reset() { *x = AttributedObservation{} if protoimpl.UnsafeEnabled { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[23] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1706,7 +2097,7 @@ func (x *AttributedObservation) String() string { func (*AttributedObservation) ProtoMessage() {} func (x *AttributedObservation) ProtoReflect() protoreflect.Message { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[23] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1719,7 +2110,7 @@ func (x *AttributedObservation) ProtoReflect() protoreflect.Message { // Deprecated: Use AttributedObservation.ProtoReflect.Descriptor instead. func (*AttributedObservation) Descriptor() ([]byte, []int) { - return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{23} + return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{28} } func (x *AttributedObservation) GetObservation() []byte { @@ -1748,7 +2139,7 @@ type AttributedSignedObservation struct { func (x *AttributedSignedObservation) Reset() { *x = AttributedSignedObservation{} if protoimpl.UnsafeEnabled { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[24] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1761,7 +2152,7 @@ func (x *AttributedSignedObservation) String() string { func (*AttributedSignedObservation) ProtoMessage() {} func (x *AttributedSignedObservation) ProtoReflect() protoreflect.Message { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[24] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1774,7 +2165,7 @@ func (x *AttributedSignedObservation) ProtoReflect() protoreflect.Message { // Deprecated: Use AttributedSignedObservation.ProtoReflect.Descriptor instead. func (*AttributedSignedObservation) Descriptor() ([]byte, []int) { - return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{24} + return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{29} } func (x *AttributedSignedObservation) GetSignedObservation() *SignedObservation { @@ -1803,7 +2194,7 @@ type SignedObservation struct { func (x *SignedObservation) Reset() { *x = SignedObservation{} if protoimpl.UnsafeEnabled { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[25] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1816,7 +2207,7 @@ func (x *SignedObservation) String() string { func (*SignedObservation) ProtoMessage() {} func (x *SignedObservation) ProtoReflect() protoreflect.Message { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[25] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1829,7 +2220,7 @@ func (x *SignedObservation) ProtoReflect() protoreflect.Message { // Deprecated: Use SignedObservation.ProtoReflect.Descriptor instead. func (*SignedObservation) Descriptor() ([]byte, []int) { - return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{25} + return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{30} } func (x *SignedObservation) GetObservation() []byte { @@ -1858,7 +2249,7 @@ type AttributedPrepareSignature struct { func (x *AttributedPrepareSignature) Reset() { *x = AttributedPrepareSignature{} if protoimpl.UnsafeEnabled { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[26] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1871,7 +2262,7 @@ func (x *AttributedPrepareSignature) String() string { func (*AttributedPrepareSignature) ProtoMessage() {} func (x *AttributedPrepareSignature) ProtoReflect() protoreflect.Message { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[26] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1884,7 +2275,7 @@ func (x *AttributedPrepareSignature) ProtoReflect() protoreflect.Message { // Deprecated: Use AttributedPrepareSignature.ProtoReflect.Descriptor instead. func (*AttributedPrepareSignature) Descriptor() ([]byte, []int) { - return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{26} + return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{31} } func (x *AttributedPrepareSignature) GetSignature() []byte { @@ -1913,7 +2304,7 @@ type AttributedCommitSignature struct { func (x *AttributedCommitSignature) Reset() { *x = AttributedCommitSignature{} if protoimpl.UnsafeEnabled { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[27] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1926,7 +2317,7 @@ func (x *AttributedCommitSignature) String() string { func (*AttributedCommitSignature) ProtoMessage() {} func (x *AttributedCommitSignature) ProtoReflect() protoreflect.Message { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[27] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1939,7 +2330,7 @@ func (x *AttributedCommitSignature) ProtoReflect() protoreflect.Message { // Deprecated: Use AttributedCommitSignature.ProtoReflect.Descriptor instead. func (*AttributedCommitSignature) Descriptor() ([]byte, []int) { - return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{27} + return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{32} } func (x *AttributedCommitSignature) GetSignature() []byte { @@ -1968,7 +2359,7 @@ type AttestedStateTransitionBlock struct { func (x *AttestedStateTransitionBlock) Reset() { *x = AttestedStateTransitionBlock{} if protoimpl.UnsafeEnabled { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[28] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1981,7 +2372,7 @@ func (x *AttestedStateTransitionBlock) String() string { func (*AttestedStateTransitionBlock) ProtoMessage() {} func (x *AttestedStateTransitionBlock) ProtoReflect() protoreflect.Message { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[28] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1994,7 +2385,7 @@ func (x *AttestedStateTransitionBlock) ProtoReflect() protoreflect.Message { // Deprecated: Use AttestedStateTransitionBlock.ProtoReflect.Descriptor instead. func (*AttestedStateTransitionBlock) Descriptor() ([]byte, []int) { - return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{28} + return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{33} } func (x *AttestedStateTransitionBlock) GetStateTransitionBlock() *StateTransitionBlock { @@ -2020,13 +2411,14 @@ type StateTransitionBlock struct { SeqNr uint64 `protobuf:"varint,2,opt,name=seq_nr,json=seqNr,proto3" json:"seq_nr,omitempty"` StateTransitionInputsDigest []byte `protobuf:"bytes,3,opt,name=state_transition_inputs_digest,json=stateTransitionInputsDigest,proto3" json:"state_transition_inputs_digest,omitempty"` StateTransitionOutputs *StateTransitionOutputs `protobuf:"bytes,4,opt,name=state_transition_outputs,json=stateTransitionOutputs,proto3" json:"state_transition_outputs,omitempty"` - ReportsPlusPrecursor []byte `protobuf:"bytes,5,opt,name=reports_plus_precursor,json=reportsPlusPrecursor,proto3" json:"reports_plus_precursor,omitempty"` + StateRootDigest []byte `protobuf:"bytes,5,opt,name=state_root_digest,json=stateRootDigest,proto3" json:"state_root_digest,omitempty"` + ReportsPlusPrecursor []byte `protobuf:"bytes,6,opt,name=reports_plus_precursor,json=reportsPlusPrecursor,proto3" json:"reports_plus_precursor,omitempty"` } func (x *StateTransitionBlock) Reset() { *x = StateTransitionBlock{} if protoimpl.UnsafeEnabled { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[29] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2039,7 +2431,7 @@ func (x *StateTransitionBlock) String() string { func (*StateTransitionBlock) ProtoMessage() {} func (x *StateTransitionBlock) ProtoReflect() protoreflect.Message { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[29] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2052,7 +2444,7 @@ func (x *StateTransitionBlock) ProtoReflect() protoreflect.Message { // Deprecated: Use StateTransitionBlock.ProtoReflect.Descriptor instead. func (*StateTransitionBlock) Descriptor() ([]byte, []int) { - return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{29} + return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{34} } func (x *StateTransitionBlock) GetEpoch() uint64 { @@ -2083,6 +2475,13 @@ func (x *StateTransitionBlock) GetStateTransitionOutputs() *StateTransitionOutpu return nil } +func (x *StateTransitionBlock) GetStateRootDigest() []byte { + if x != nil { + return x.StateRootDigest + } + return nil +} + func (x *StateTransitionBlock) GetReportsPlusPrecursor() []byte { if x != nil { return x.ReportsPlusPrecursor @@ -2101,7 +2500,7 @@ type StateTransitionOutputs struct { func (x *StateTransitionOutputs) Reset() { *x = StateTransitionOutputs{} if protoimpl.UnsafeEnabled { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[30] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2114,7 +2513,7 @@ func (x *StateTransitionOutputs) String() string { func (*StateTransitionOutputs) ProtoMessage() {} func (x *StateTransitionOutputs) ProtoReflect() protoreflect.Message { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[30] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2127,7 +2526,7 @@ func (x *StateTransitionOutputs) ProtoReflect() protoreflect.Message { // Deprecated: Use StateTransitionOutputs.ProtoReflect.Descriptor instead. func (*StateTransitionOutputs) Descriptor() ([]byte, []int) { - return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{30} + return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{35} } func (x *StateTransitionOutputs) GetWriteSet() []*KeyValueModification { @@ -2150,7 +2549,7 @@ type KeyValueModification struct { func (x *KeyValueModification) Reset() { *x = KeyValueModification{} if protoimpl.UnsafeEnabled { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[31] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2163,7 +2562,7 @@ func (x *KeyValueModification) String() string { func (*KeyValueModification) ProtoMessage() {} func (x *KeyValueModification) ProtoReflect() protoreflect.Message { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[31] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2176,7 +2575,7 @@ func (x *KeyValueModification) ProtoReflect() protoreflect.Message { // Deprecated: Use KeyValueModification.ProtoReflect.Descriptor instead. func (*KeyValueModification) Descriptor() ([]byte, []int) { - return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{31} + return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{36} } func (x *KeyValueModification) GetKey() []byte { @@ -2215,7 +2614,7 @@ type StateTransitionInputs struct { func (x *StateTransitionInputs) Reset() { *x = StateTransitionInputs{} if protoimpl.UnsafeEnabled { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[32] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2228,7 +2627,7 @@ func (x *StateTransitionInputs) String() string { func (*StateTransitionInputs) ProtoMessage() {} func (x *StateTransitionInputs) ProtoReflect() protoreflect.Message { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[32] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2241,7 +2640,7 @@ func (x *StateTransitionInputs) ProtoReflect() protoreflect.Message { // Deprecated: Use StateTransitionInputs.ProtoReflect.Descriptor instead. func (*StateTransitionInputs) Descriptor() ([]byte, []int) { - return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{32} + return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{37} } func (x *StateTransitionInputs) GetSeqNr() uint64 { @@ -2287,13 +2686,12 @@ type MessageBlobOffer struct { ChunkDigests [][]byte `protobuf:"bytes,1,rep,name=chunk_digests,json=chunkDigests,proto3" json:"chunk_digests,omitempty"` PayloadLength uint64 `protobuf:"varint,2,opt,name=payload_length,json=payloadLength,proto3" json:"payload_length,omitempty"` ExpirySeqNr uint64 `protobuf:"varint,3,opt,name=expiry_seq_nr,json=expirySeqNr,proto3" json:"expiry_seq_nr,omitempty"` - Submitter uint32 `protobuf:"varint,4,opt,name=submitter,proto3" json:"submitter,omitempty"` } func (x *MessageBlobOffer) Reset() { *x = MessageBlobOffer{} if protoimpl.UnsafeEnabled { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[33] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2306,7 +2704,7 @@ func (x *MessageBlobOffer) String() string { func (*MessageBlobOffer) ProtoMessage() {} func (x *MessageBlobOffer) ProtoReflect() protoreflect.Message { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[33] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2319,7 +2717,7 @@ func (x *MessageBlobOffer) ProtoReflect() protoreflect.Message { // Deprecated: Use MessageBlobOffer.ProtoReflect.Descriptor instead. func (*MessageBlobOffer) Descriptor() ([]byte, []int) { - return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{33} + return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{38} } func (x *MessageBlobOffer) GetChunkDigests() [][]byte { @@ -2343,13 +2741,6 @@ func (x *MessageBlobOffer) GetExpirySeqNr() uint64 { return 0 } -func (x *MessageBlobOffer) GetSubmitter() uint32 { - if x != nil { - return x.Submitter - } - return 0 -} - type MessageBlobChunkRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -2362,7 +2753,7 @@ type MessageBlobChunkRequest struct { func (x *MessageBlobChunkRequest) Reset() { *x = MessageBlobChunkRequest{} if protoimpl.UnsafeEnabled { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[34] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2375,7 +2766,7 @@ func (x *MessageBlobChunkRequest) String() string { func (*MessageBlobChunkRequest) ProtoMessage() {} func (x *MessageBlobChunkRequest) ProtoReflect() protoreflect.Message { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[34] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2388,7 +2779,7 @@ func (x *MessageBlobChunkRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use MessageBlobChunkRequest.ProtoReflect.Descriptor instead. func (*MessageBlobChunkRequest) Descriptor() ([]byte, []int) { - return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{34} + return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{39} } func (x *MessageBlobChunkRequest) GetBlobDigest() []byte { @@ -2412,13 +2803,14 @@ type MessageBlobChunkResponse struct { BlobDigest []byte `protobuf:"bytes,1,opt,name=blob_digest,json=blobDigest,proto3" json:"blob_digest,omitempty"` ChunkIndex uint64 `protobuf:"varint,2,opt,name=chunk_index,json=chunkIndex,proto3" json:"chunk_index,omitempty"` - Chunk []byte `protobuf:"bytes,3,opt,name=chunk,proto3" json:"chunk,omitempty"` + GoAway bool `protobuf:"varint,3,opt,name=go_away,json=goAway,proto3" json:"go_away,omitempty"` + Chunk []byte `protobuf:"bytes,4,opt,name=chunk,proto3" json:"chunk,omitempty"` } func (x *MessageBlobChunkResponse) Reset() { *x = MessageBlobChunkResponse{} if protoimpl.UnsafeEnabled { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[35] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2431,7 +2823,7 @@ func (x *MessageBlobChunkResponse) String() string { func (*MessageBlobChunkResponse) ProtoMessage() {} func (x *MessageBlobChunkResponse) ProtoReflect() protoreflect.Message { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[35] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2444,7 +2836,7 @@ func (x *MessageBlobChunkResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use MessageBlobChunkResponse.ProtoReflect.Descriptor instead. func (*MessageBlobChunkResponse) Descriptor() ([]byte, []int) { - return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{35} + return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{40} } func (x *MessageBlobChunkResponse) GetBlobDigest() []byte { @@ -2461,6 +2853,13 @@ func (x *MessageBlobChunkResponse) GetChunkIndex() uint64 { return 0 } +func (x *MessageBlobChunkResponse) GetGoAway() bool { + if x != nil { + return x.GoAway + } + return false +} + func (x *MessageBlobChunkResponse) GetChunk() []byte { if x != nil { return x.Chunk @@ -2468,32 +2867,33 @@ func (x *MessageBlobChunkResponse) GetChunk() []byte { return nil } -type MessageBlobAvailable struct { +type MessageBlobOfferResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - BlobDigest []byte `protobuf:"bytes,1,opt,name=blob_digest,json=blobDigest,proto3" json:"blob_digest,omitempty"` - Signature []byte `protobuf:"bytes,2,opt,name=signature,proto3" json:"signature,omitempty"` + BlobDigest []byte `protobuf:"bytes,1,opt,name=blob_digest,json=blobDigest,proto3" json:"blob_digest,omitempty"` + RejectOffer bool `protobuf:"varint,2,opt,name=reject_offer,json=rejectOffer,proto3" json:"reject_offer,omitempty"` + Signature []byte `protobuf:"bytes,3,opt,name=signature,proto3" json:"signature,omitempty"` } -func (x *MessageBlobAvailable) Reset() { - *x = MessageBlobAvailable{} +func (x *MessageBlobOfferResponse) Reset() { + *x = MessageBlobOfferResponse{} if protoimpl.UnsafeEnabled { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[36] + mi := &file_offchainreporting3_1_messages_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *MessageBlobAvailable) String() string { +func (x *MessageBlobOfferResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*MessageBlobAvailable) ProtoMessage() {} +func (*MessageBlobOfferResponse) ProtoMessage() {} -func (x *MessageBlobAvailable) ProtoReflect() protoreflect.Message { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[36] +func (x *MessageBlobOfferResponse) ProtoReflect() protoreflect.Message { + mi := &file_offchainreporting3_1_messages_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2504,87 +2904,39 @@ func (x *MessageBlobAvailable) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use MessageBlobAvailable.ProtoReflect.Descriptor instead. -func (*MessageBlobAvailable) Descriptor() ([]byte, []int) { - return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{36} +// Deprecated: Use MessageBlobOfferResponse.ProtoReflect.Descriptor instead. +func (*MessageBlobOfferResponse) Descriptor() ([]byte, []int) { + return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{41} } -func (x *MessageBlobAvailable) GetBlobDigest() []byte { +func (x *MessageBlobOfferResponse) GetBlobDigest() []byte { if x != nil { return x.BlobDigest } return nil } -func (x *MessageBlobAvailable) GetSignature() []byte { +func (x *MessageBlobOfferResponse) GetRejectOffer() bool { if x != nil { - return x.Signature - } - return nil -} - -type AttributedBlobAvailabilitySignature struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Signature []byte `protobuf:"bytes,1,opt,name=signature,proto3" json:"signature,omitempty"` - Signer uint32 `protobuf:"varint,2,opt,name=signer,proto3" json:"signer,omitempty"` -} - -func (x *AttributedBlobAvailabilitySignature) Reset() { - *x = AttributedBlobAvailabilitySignature{} - if protoimpl.UnsafeEnabled { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AttributedBlobAvailabilitySignature) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AttributedBlobAvailabilitySignature) ProtoMessage() {} - -func (x *AttributedBlobAvailabilitySignature) ProtoReflect() protoreflect.Message { - mi := &file_offchainreporting3_1_messages_proto_msgTypes[37] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms + return x.RejectOffer } - return mi.MessageOf(x) -} - -// Deprecated: Use AttributedBlobAvailabilitySignature.ProtoReflect.Descriptor instead. -func (*AttributedBlobAvailabilitySignature) Descriptor() ([]byte, []int) { - return file_offchainreporting3_1_messages_proto_rawDescGZIP(), []int{37} + return false } -func (x *AttributedBlobAvailabilitySignature) GetSignature() []byte { +func (x *MessageBlobOfferResponse) GetSignature() []byte { if x != nil { return x.Signature } return nil } -func (x *AttributedBlobAvailabilitySignature) GetSigner() uint32 { - if x != nil { - return x.Signer - } - return 0 -} - var File_offchainreporting3_1_messages_proto protoreflect.FileDescriptor var file_offchainreporting3_1_messages_proto_rawDesc = []byte{ 0x0a, 0x23, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, - 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x22, 0xa2, 0x0e, 0x0a, 0x0e, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x22, 0xc1, 0x10, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x60, 0x0a, 0x16, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x5f, 0x77, 0x69, 0x73, 0x68, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, @@ -2659,411 +3011,492 @@ var file_offchainreporting3_1_messages_proto_rawDesc = []byte{ 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x6f, 0x63, - 0x6b, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x56, 0x0a, 0x12, + 0x6b, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x6f, 0x0a, 0x1b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x73, 0x79, - 0x6e, 0x63, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, - 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x53, 0x79, 0x6e, 0x63, - 0x48, 0x00, 0x52, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, - 0x53, 0x79, 0x6e, 0x63, 0x12, 0x6c, 0x0a, 0x1a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, - 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x73, 0x75, 0x6d, 0x6d, 0x61, - 0x72, 0x79, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, - 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x53, 0x79, 0x6e, 0x63, - 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x48, 0x00, 0x52, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x75, 0x6d, 0x6d, 0x61, - 0x72, 0x79, 0x12, 0x56, 0x0a, 0x12, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x62, 0x6c, - 0x6f, 0x62, 0x5f, 0x6f, 0x66, 0x66, 0x65, 0x72, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, - 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, - 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x6f, - 0x62, 0x4f, 0x66, 0x66, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x4f, 0x66, 0x66, 0x65, 0x72, 0x12, 0x6c, 0x0a, 0x1a, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x5f, 0x63, 0x68, 0x75, 0x6e, 0x6b, - 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x20, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, - 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, - 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x6f, - 0x62, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, - 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x43, 0x68, 0x75, 0x6e, - 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x6f, 0x0a, 0x1b, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x5f, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x5f, 0x72, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x21, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, - 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, - 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x6f, 0x62, - 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, - 0x18, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x43, 0x68, 0x75, 0x6e, - 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x62, 0x0a, 0x16, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x5f, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, - 0x62, 0x6c, 0x65, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6f, 0x66, 0x66, 0x63, - 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, - 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x41, 0x76, 0x61, 0x69, - 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x48, 0x00, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x42, 0x6c, 0x6f, 0x62, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x05, 0x0a, - 0x03, 0x6d, 0x73, 0x67, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x11, - 0x22, 0x2b, 0x0a, 0x13, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4e, 0x65, 0x77, 0x45, 0x70, - 0x6f, 0x63, 0x68, 0x57, 0x69, 0x73, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x22, 0x92, 0x02, - 0x0a, 0x18, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, - 0x6f, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, - 0x12, 0x5b, 0x0a, 0x11, 0x68, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, - 0x69, 0x66, 0x69, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x6f, 0x66, - 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, - 0x5f, 0x31, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x50, 0x72, 0x65, 0x70, - 0x61, 0x72, 0x65, 0x4f, 0x72, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x10, 0x68, 0x69, 0x67, - 0x68, 0x65, 0x73, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x12, 0x82, 0x01, - 0x0a, 0x22, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x68, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, - 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x6f, 0x66, 0x66, + 0x6e, 0x63, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x1d, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2e, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, + 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x48, 0x00, 0x52, 0x18, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6c, 0x0a, + 0x1a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x73, + 0x79, 0x6e, 0x63, 0x5f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x1e, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2d, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, + 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x48, 0x00, 0x52, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x53, 0x79, 0x6e, 0x63, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x79, 0x0a, 0x1f, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x73, 0x79, 0x6e, 0x63, + 0x5f, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x1f, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x54, 0x72, 0x65, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x68, 0x75, 0x6e, 0x6b, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x1b, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x54, 0x72, 0x65, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x7c, 0x0a, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x5f, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x68, 0x75, 0x6e, + 0x6b, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x20, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x32, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, + 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, + 0x72, 0x65, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x1c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, + 0x72, 0x65, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x56, 0x0a, 0x12, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, + 0x62, 0x6c, 0x6f, 0x62, 0x5f, 0x6f, 0x66, 0x66, 0x65, 0x72, 0x18, 0x21, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x26, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, + 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, + 0x6c, 0x6f, 0x62, 0x4f, 0x66, 0x66, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x4f, 0x66, 0x66, 0x65, 0x72, 0x12, 0x6f, 0x0a, 0x1b, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x5f, 0x6f, 0x66, 0x66, + 0x65, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x22, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2e, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, + 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x42, 0x6c, 0x6f, 0x62, 0x4f, 0x66, 0x66, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x48, 0x00, 0x52, 0x18, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x6f, 0x62, + 0x4f, 0x66, 0x66, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6c, 0x0a, + 0x1a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x5f, 0x63, 0x68, + 0x75, 0x6e, 0x6b, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x23, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2d, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, + 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x42, 0x6c, 0x6f, 0x62, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x48, 0x00, 0x52, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x43, + 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x6f, 0x0a, 0x1b, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x5f, 0x63, 0x68, 0x75, 0x6e, + 0x6b, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2e, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, + 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, + 0x6c, 0x6f, 0x62, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x48, 0x00, 0x52, 0x18, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x43, + 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x05, 0x0a, 0x03, + 0x6d, 0x73, 0x67, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x11, 0x22, + 0x2b, 0x0a, 0x13, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4e, 0x65, 0x77, 0x45, 0x70, 0x6f, + 0x63, 0x68, 0x57, 0x69, 0x73, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x22, 0x92, 0x02, 0x0a, + 0x18, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x53, 0x74, 0x61, + 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, + 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, + 0x5b, 0x0a, 0x11, 0x68, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, + 0x66, 0x69, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, - 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x48, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x43, + 0x31, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x50, 0x72, 0x65, 0x70, 0x61, + 0x72, 0x65, 0x4f, 0x72, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x10, 0x68, 0x69, 0x67, 0x68, + 0x65, 0x73, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x12, 0x82, 0x01, 0x0a, + 0x22, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x68, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x5f, + 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x6f, 0x66, 0x66, 0x63, + 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, + 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x48, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x43, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x1f, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x48, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x52, 0x1f, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x48, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, - 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x22, 0x7c, 0x0a, 0x11, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x70, 0x6f, - 0x63, 0x68, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x51, 0x0a, - 0x11, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x70, 0x72, 0x6f, - 0x6f, 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, - 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, - 0x45, 0x70, 0x6f, 0x63, 0x68, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, - 0x0f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, - 0x22, 0x56, 0x0a, 0x11, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x6f, 0x75, 0x6e, 0x64, - 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x15, 0x0a, 0x06, 0x73, - 0x65, 0x71, 0x5f, 0x6e, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x73, 0x65, 0x71, - 0x4e, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x22, 0x99, 0x01, 0x0a, 0x12, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x70, 0x22, 0x7c, 0x0a, 0x11, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x70, 0x6f, 0x63, + 0x68, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x51, 0x0a, 0x11, + 0x65, 0x70, 0x6f, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x6f, + 0x66, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, + 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x45, + 0x70, 0x6f, 0x63, 0x68, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x52, 0x0f, + 0x65, 0x70, 0x6f, 0x63, 0x68, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x22, + 0x56, 0x0a, 0x11, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x6f, 0x75, 0x6e, 0x64, 0x53, + 0x74, 0x61, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x15, 0x0a, 0x06, 0x73, 0x65, + 0x71, 0x5f, 0x6e, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x73, 0x65, 0x71, 0x4e, + 0x72, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x22, 0x99, 0x01, 0x0a, 0x12, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, + 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x65, + 0x70, 0x6f, 0x63, 0x68, 0x12, 0x15, 0x0a, 0x06, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x73, 0x65, 0x71, 0x4e, 0x72, 0x12, 0x56, 0x0a, 0x12, 0x73, + 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, + 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x53, + 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x11, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x22, 0xb7, 0x01, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x50, + 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x15, 0x0a, + 0x06, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x73, + 0x65, 0x71, 0x4e, 0x72, 0x12, 0x77, 0x0a, 0x1e, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x64, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x6f, + 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, + 0x33, 0x5f, 0x31, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x53, 0x69, + 0x67, 0x6e, 0x65, 0x64, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x1c, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x65, + 0x64, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x5b, 0x0a, + 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x15, 0x0a, 0x06, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x72, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x73, 0x65, 0x71, 0x4e, 0x72, 0x12, 0x56, 0x0a, 0x12, - 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, - 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, - 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x11, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb7, 0x01, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x50, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, - 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x15, - 0x0a, 0x06, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, - 0x73, 0x65, 0x71, 0x4e, 0x72, 0x12, 0x77, 0x0a, 0x1e, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, - 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x6f, 0x62, 0x73, 0x65, 0x72, - 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x73, 0x65, 0x71, 0x4e, 0x72, 0x12, 0x1c, 0x0a, 0x09, + 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x5a, 0x0a, 0x0d, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, + 0x70, 0x6f, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, + 0x68, 0x12, 0x15, 0x0a, 0x06, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x05, 0x73, 0x65, 0x71, 0x4e, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, + 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x5d, 0x0a, 0x17, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x12, 0x15, 0x0a, 0x06, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x05, 0x73, 0x65, 0x71, 0x4e, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x72, 0x65, 0x70, 0x6f, + 0x72, 0x74, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0c, 0x52, 0x10, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0x36, 0x0a, 0x1d, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x73, 0x65, 0x71, 0x4e, 0x72, 0x22, 0x89, 0x01, + 0x0a, 0x16, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x6f, 0x0a, 0x1b, 0x63, 0x65, 0x72, 0x74, + 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x5f, + 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, - 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x53, - 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x1c, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x53, 0x69, 0x67, 0x6e, - 0x65, 0x64, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x5b, - 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x15, 0x0a, 0x06, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x73, 0x65, 0x71, 0x4e, 0x72, 0x12, 0x1c, 0x0a, - 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x5a, 0x0a, 0x0d, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x14, 0x0a, 0x05, - 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, - 0x63, 0x68, 0x12, 0x15, 0x0a, 0x06, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x72, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x05, 0x73, 0x65, 0x71, 0x4e, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, - 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x5d, 0x0a, 0x17, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x12, 0x15, 0x0a, 0x06, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x72, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x05, 0x73, 0x65, 0x71, 0x4e, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x72, 0x65, 0x70, - 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x69, 0x67, 0x6e, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0x36, 0x0a, 0x1d, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x73, 0x65, 0x71, 0x5f, 0x6e, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x73, 0x65, 0x71, 0x4e, 0x72, 0x22, 0x89, - 0x01, 0x0a, 0x16, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x6f, 0x0a, 0x1b, 0x63, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, - 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, - 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, - 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x43, - 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x52, - 0x19, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, - 0x74, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x22, 0x68, 0x0a, 0x17, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37, 0x0a, 0x18, 0x68, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, - 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x71, 0x5f, 0x6e, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x15, 0x68, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, - 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x53, 0x65, 0x71, 0x4e, 0x72, 0x12, 0x14, - 0x0a, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6e, - 0x6f, 0x6e, 0x63, 0x65, 0x22, 0xa5, 0x01, 0x0a, 0x10, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x7b, 0x0a, 0x20, 0x61, 0x74, 0x74, - 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x6e, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, - 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, - 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x1d, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x65, - 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x22, 0x50, 0x0a, 0x17, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x53, 0x79, 0x6e, 0x63, - 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x35, 0x0a, 0x17, 0x6c, 0x6f, 0x77, 0x65, 0x73, - 0x74, 0x5f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x71, 0x5f, - 0x6e, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x6c, 0x6f, 0x77, 0x65, 0x73, 0x74, - 0x50, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x65, 0x71, 0x4e, 0x72, 0x22, 0xe7, - 0x01, 0x0a, 0x0f, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, 0x72, 0x6f, - 0x6f, 0x66, 0x12, 0x5b, 0x0a, 0x11, 0x68, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, + 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x43, 0x6f, + 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x52, 0x19, + 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, + 0x65, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x22, 0x62, 0x0a, 0x17, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x73, 0x65, + 0x71, 0x5f, 0x6e, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x53, 0x65, 0x71, 0x4e, 0x72, 0x12, 0x25, 0x0a, 0x0f, 0x65, 0x6e, 0x64, 0x5f, 0x65, 0x78, + 0x63, 0x6c, 0x5f, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x0c, 0x65, 0x6e, 0x64, 0x45, 0x78, 0x63, 0x6c, 0x53, 0x65, 0x71, 0x4e, 0x72, 0x22, 0xfe, 0x01, + 0x0a, 0x18, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x53, 0x79, + 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x14, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x73, 0x65, 0x71, 0x5f, + 0x6e, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x71, 0x4e, 0x72, 0x12, 0x34, 0x0a, 0x17, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x65, 0x6e, 0x64, 0x5f, 0x65, 0x78, 0x63, 0x6c, 0x5f, + 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x13, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x45, 0x6e, 0x64, 0x45, 0x78, 0x63, 0x6c, 0x53, 0x65, 0x71, 0x4e, + 0x72, 0x12, 0x7b, 0x0a, 0x20, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, + 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x6f, 0x66, + 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, + 0x5f, 0x31, 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, + 0x1d, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, + 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x22, 0x89, + 0x01, 0x0a, 0x17, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, + 0x79, 0x6e, 0x63, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x35, 0x0a, 0x17, 0x6c, 0x6f, + 0x77, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, + 0x65, 0x71, 0x5f, 0x6e, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x6c, 0x6f, 0x77, + 0x65, 0x73, 0x74, 0x50, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x65, 0x71, 0x4e, + 0x72, 0x12, 0x37, 0x0a, 0x18, 0x68, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6f, 0x6d, + 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x15, 0x68, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x6d, 0x6d, + 0x69, 0x74, 0x74, 0x65, 0x64, 0x53, 0x65, 0x71, 0x4e, 0x72, 0x22, 0x80, 0x01, 0x0a, 0x1b, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x72, 0x65, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x68, + 0x75, 0x6e, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x09, 0x74, 0x6f, + 0x5f, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x74, + 0x6f, 0x53, 0x65, 0x71, 0x4e, 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, + 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x24, 0x0a, 0x0e, 0x65, 0x6e, 0x64, 0x5f, 0x69, + 0x6e, 0x63, 0x6c, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x0c, 0x65, 0x6e, 0x64, 0x49, 0x6e, 0x63, 0x6c, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x22, 0x36, 0x0a, + 0x0c, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x50, 0x61, 0x69, 0x72, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x5a, 0x0a, 0x16, 0x4c, 0x65, 0x61, 0x66, 0x4b, 0x65, 0x79, + 0x41, 0x6e, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x73, 0x12, + 0x1d, 0x0a, 0x0a, 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x21, + 0x0a, 0x0c, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x69, 0x67, 0x65, 0x73, + 0x74, 0x22, 0x6c, 0x0a, 0x0c, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x4c, 0x65, 0x61, + 0x66, 0x12, 0x40, 0x0a, 0x04, 0x6c, 0x65, 0x61, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2c, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, + 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x4c, 0x65, 0x61, 0x66, 0x4b, 0x65, 0x79, 0x41, 0x6e, + 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x73, 0x52, 0x04, 0x6c, + 0x65, 0x61, 0x66, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x69, 0x62, 0x6c, 0x69, 0x6e, 0x67, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x08, 0x73, 0x69, 0x62, 0x6c, 0x69, 0x6e, 0x67, 0x73, 0x22, + 0xdf, 0x02, 0x0a, 0x1c, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x72, 0x65, 0x65, 0x53, + 0x79, 0x6e, 0x63, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x1a, 0x0a, 0x09, 0x74, 0x6f, 0x5f, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x07, 0x74, 0x6f, 0x53, 0x65, 0x71, 0x4e, 0x72, 0x12, 0x1f, 0x0a, 0x0b, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x33, 0x0a, + 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x65, 0x6e, 0x64, 0x5f, 0x69, 0x6e, 0x63, + 0x6c, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x13, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x45, 0x6e, 0x64, 0x49, 0x6e, 0x63, 0x6c, 0x49, 0x6e, 0x64, + 0x65, 0x78, 0x12, 0x17, 0x0a, 0x07, 0x67, 0x6f, 0x5f, 0x61, 0x77, 0x61, 0x79, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x06, 0x67, 0x6f, 0x41, 0x77, 0x61, 0x79, 0x12, 0x24, 0x0a, 0x0e, 0x65, + 0x6e, 0x64, 0x5f, 0x69, 0x6e, 0x63, 0x6c, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x65, 0x6e, 0x64, 0x49, 0x6e, 0x63, 0x6c, 0x49, 0x6e, 0x64, 0x65, + 0x78, 0x12, 0x41, 0x0a, 0x0a, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, + 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x4b, 0x65, 0x79, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x50, 0x61, 0x69, 0x72, 0x52, 0x09, 0x6b, 0x65, 0x79, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x0f, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x69, 0x6e, 0x67, + 0x5f, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, - 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x50, 0x72, - 0x65, 0x70, 0x61, 0x72, 0x65, 0x4f, 0x72, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x52, 0x10, 0x68, - 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x12, - 0x77, 0x0a, 0x17, 0x68, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, - 0x66, 0x69, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x3f, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, - 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x65, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x48, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x43, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x52, 0x15, 0x68, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x65, 0x64, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0xb4, 0x01, 0x0a, 0x18, 0x43, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x4f, 0x72, 0x43, - 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x42, 0x0a, 0x07, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, - 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x43, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x48, 0x00, - 0x52, 0x07, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x12, 0x3f, 0x0a, 0x06, 0x63, 0x6f, 0x6d, - 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6f, 0x66, 0x66, 0x63, + 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x4c, 0x65, 0x61, + 0x66, 0x52, 0x0e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x4c, 0x65, 0x61, 0x76, 0x65, + 0x73, 0x22, 0xe7, 0x01, 0x0a, 0x0f, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x12, 0x5b, 0x0a, 0x11, 0x68, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, + 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2e, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, + 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, + 0x64, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x4f, 0x72, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, + 0x52, 0x10, 0x68, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, + 0x65, 0x64, 0x12, 0x77, 0x0a, 0x17, 0x68, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x6f, 0x66, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, + 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x65, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x48, 0x69, 0x67, 0x68, 0x65, + 0x73, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x15, 0x68, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x43, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x50, 0x72, 0x6f, 0x6f, 0x66, 0x22, 0xb4, 0x01, 0x0a, 0x18, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, + 0x4f, 0x72, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x42, 0x0a, 0x07, 0x70, 0x72, 0x65, 0x70, + 0x61, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, - 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, - 0x48, 0x00, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x42, 0x13, 0x0a, 0x11, 0x70, 0x72, - 0x65, 0x70, 0x61, 0x72, 0x65, 0x5f, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x22, - 0x92, 0x03, 0x0a, 0x10, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x50, 0x72, 0x65, - 0x70, 0x61, 0x72, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x15, 0x0a, 0x06, 0x73, 0x65, - 0x71, 0x5f, 0x6e, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x73, 0x65, 0x71, 0x4e, - 0x72, 0x12, 0x43, 0x0a, 0x1e, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x5f, 0x64, 0x69, 0x67, - 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1b, 0x73, 0x74, 0x61, 0x74, 0x65, - 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, - 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x66, 0x0a, 0x18, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, - 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, - 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, - 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4f, - 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x52, 0x16, 0x73, 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, - 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x12, 0x34, - 0x0a, 0x16, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x5f, 0x70, 0x6c, 0x75, 0x73, 0x5f, 0x70, - 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x14, - 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x50, 0x6c, 0x75, 0x73, 0x50, 0x72, 0x65, 0x63, 0x75, - 0x72, 0x73, 0x6f, 0x72, 0x12, 0x6e, 0x0a, 0x1a, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x5f, - 0x71, 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, - 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, - 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, - 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x18, 0x70, 0x72, 0x65, 0x70, - 0x61, 0x72, 0x65, 0x51, 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x22, 0x8e, 0x03, 0x0a, 0x0f, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, - 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x15, - 0x0a, 0x06, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, - 0x73, 0x65, 0x71, 0x4e, 0x72, 0x12, 0x43, 0x0a, 0x1e, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x74, - 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, - 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1b, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, - 0x70, 0x75, 0x74, 0x73, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x66, 0x0a, 0x18, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6f, - 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6f, + 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, + 0x65, 0x48, 0x00, 0x52, 0x07, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x12, 0x3f, 0x0a, 0x06, + 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, - 0x33, 0x5f, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x52, 0x16, 0x73, 0x74, 0x61, 0x74, - 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, - 0x74, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x5f, 0x70, 0x6c, - 0x75, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x14, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x50, 0x6c, 0x75, 0x73, 0x50, - 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x12, 0x6b, 0x0a, 0x19, 0x63, 0x6f, 0x6d, 0x6d, - 0x69, 0x74, 0x5f, 0x71, 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x6f, 0x66, + 0x33, 0x5f, 0x31, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x43, 0x6f, 0x6d, + 0x6d, 0x69, 0x74, 0x48, 0x00, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x42, 0x13, 0x0a, + 0x11, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x5f, 0x6f, 0x72, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, + 0x69, 0x74, 0x22, 0xbe, 0x03, 0x0a, 0x10, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, + 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x15, 0x0a, + 0x06, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x73, + 0x65, 0x71, 0x4e, 0x72, 0x12, 0x43, 0x0a, 0x1e, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x5f, + 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1b, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x70, + 0x75, 0x74, 0x73, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x66, 0x0a, 0x18, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, - 0x5f, 0x31, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, - 0x6d, 0x69, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x17, 0x63, 0x6f, - 0x6d, 0x6d, 0x69, 0x74, 0x51, 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x22, 0x82, 0x03, 0x0a, 0x19, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6f, - 0x72, 0x74, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x5f, 0x65, 0x70, - 0x6f, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x6d, 0x69, - 0x74, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x15, 0x0a, 0x06, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x73, 0x65, 0x71, 0x4e, 0x72, 0x12, 0x43, 0x0a, + 0x5f, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x52, 0x16, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x5f, + 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, + 0x16, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x5f, 0x70, 0x6c, 0x75, 0x73, 0x5f, 0x70, 0x72, + 0x65, 0x63, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x14, 0x72, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x50, 0x6c, 0x75, 0x73, 0x50, 0x72, 0x65, 0x63, 0x75, 0x72, + 0x73, 0x6f, 0x72, 0x12, 0x6e, 0x0a, 0x1a, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x5f, 0x71, + 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, + 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x41, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, + 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x18, 0x70, 0x72, 0x65, 0x70, 0x61, + 0x72, 0x65, 0x51, 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x22, 0xba, 0x03, 0x0a, 0x0f, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, + 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x15, 0x0a, + 0x06, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x73, + 0x65, 0x71, 0x4e, 0x72, 0x12, 0x43, 0x0a, 0x1e, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x5f, + 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1b, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x70, + 0x75, 0x74, 0x73, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x66, 0x0a, 0x18, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6f, 0x66, + 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, + 0x5f, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x52, 0x16, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x5f, + 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, + 0x16, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x5f, 0x70, 0x6c, 0x75, 0x73, 0x5f, 0x70, 0x72, + 0x65, 0x63, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x14, 0x72, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x50, 0x6c, 0x75, 0x73, 0x50, 0x72, 0x65, 0x63, 0x75, 0x72, + 0x73, 0x6f, 0x72, 0x12, 0x6b, 0x0a, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x5f, 0x71, 0x75, + 0x6f, 0x72, 0x75, 0x6d, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, + 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x41, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x53, 0x69, + 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x17, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x51, + 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x22, 0xae, 0x03, 0x0a, 0x19, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x43, 0x6f, + 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x21, + 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x45, 0x70, 0x6f, 0x63, + 0x68, 0x12, 0x15, 0x0a, 0x06, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x05, 0x73, 0x65, 0x71, 0x4e, 0x72, 0x12, 0x43, 0x0a, 0x1e, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x70, + 0x75, 0x74, 0x73, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x1b, 0x73, 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x43, 0x0a, 0x1e, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1b, 0x73, 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x44, 0x69, 0x67, 0x65, - 0x73, 0x74, 0x12, 0x43, 0x0a, 0x1e, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x6e, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x64, 0x69, - 0x67, 0x65, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1b, 0x73, 0x74, 0x61, 0x74, - 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, - 0x74, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x72, 0x65, 0x70, 0x6f, 0x72, - 0x74, 0x73, 0x5f, 0x70, 0x6c, 0x75, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x6f, - 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x14, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x73, - 0x50, 0x6c, 0x75, 0x73, 0x50, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x12, 0x6b, 0x0a, - 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x5f, 0x71, 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x5f, 0x63, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2f, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, - 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x52, 0x17, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x51, 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x43, - 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x22, 0x80, 0x01, 0x0a, 0x19, 0x48, - 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x15, 0x0a, 0x06, 0x73, 0x65, 0x71, 0x5f, - 0x6e, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x73, 0x65, 0x71, 0x4e, 0x72, 0x12, - 0x36, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x5f, 0x65, 0x6c, 0x73, - 0x65, 0x5f, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x15, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x45, 0x6c, 0x73, 0x65, 0x50, - 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x22, 0xc8, 0x01, - 0x0a, 0x29, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x53, 0x69, 0x67, 0x6e, - 0x65, 0x64, 0x48, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, - 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x82, 0x01, 0x0a, 0x22, - 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x68, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x5f, 0x63, + 0x6e, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1b, 0x73, 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, + 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x44, 0x69, 0x67, 0x65, + 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, + 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x34, + 0x0a, 0x16, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x5f, 0x70, 0x6c, 0x75, 0x73, 0x5f, 0x70, + 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x14, + 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x50, 0x6c, 0x75, 0x73, 0x50, 0x72, 0x65, 0x63, 0x75, + 0x72, 0x73, 0x6f, 0x72, 0x12, 0x6b, 0x0a, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x5f, 0x71, + 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, + 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x41, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x53, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x17, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, + 0x51, 0x75, 0x6f, 0x72, 0x75, 0x6d, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x22, 0x80, 0x01, 0x0a, 0x19, 0x48, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x43, 0x65, 0x72, + 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x15, 0x0a, 0x06, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x05, 0x73, 0x65, 0x71, 0x4e, 0x72, 0x12, 0x36, 0x0a, 0x17, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, + 0x74, 0x65, 0x64, 0x5f, 0x65, 0x6c, 0x73, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, + 0x65, 0x64, 0x45, 0x6c, 0x73, 0x65, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x64, 0x12, 0x14, + 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x65, + 0x70, 0x6f, 0x63, 0x68, 0x22, 0xc8, 0x01, 0x0a, 0x29, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x48, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x12, 0x82, 0x01, 0x0a, 0x22, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x68, 0x69, + 0x67, 0x68, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x35, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, + 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x48, 0x69, 0x67, + 0x68, 0x65, 0x73, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x1f, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x48, 0x69, + 0x67, 0x68, 0x65, 0x73, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x65, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x22, + 0xb0, 0x01, 0x0a, 0x1f, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x48, 0x69, 0x67, 0x68, 0x65, 0x73, + 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x12, 0x6f, 0x0a, 0x1b, 0x68, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, + 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, - 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x48, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x43, 0x65, 0x72, - 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, - 0x1f, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x48, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x43, 0x65, - 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x12, 0x16, 0x0a, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x22, 0xb0, 0x01, 0x0a, 0x1f, 0x53, 0x69, 0x67, - 0x6e, 0x65, 0x64, 0x48, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, - 0x69, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x6f, 0x0a, 0x1b, - 0x68, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, - 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2f, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, - 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x48, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, - 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x19, 0x68, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, - 0x66, 0x69, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1c, 0x0a, - 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x55, 0x0a, 0x15, 0x41, - 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x6f, 0x62, 0x73, 0x65, 0x72, - 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x22, 0x91, 0x01, 0x0a, 0x1b, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x64, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x56, 0x0a, 0x12, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x6f, 0x62, 0x73, - 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, - 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, - 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4f, 0x62, 0x73, 0x65, - 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4f, - 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6f, 0x62, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x6f, 0x62, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x22, 0x53, 0x0a, 0x11, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, + 0x48, 0x69, 0x67, 0x68, 0x65, 0x73, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x19, 0x68, 0x69, 0x67, 0x68, 0x65, + 0x73, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x22, 0x55, 0x0a, 0x15, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x0b, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, - 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x52, 0x0a, 0x1a, 0x41, - 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, - 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, - 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, - 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x65, - 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x22, - 0x51, 0x0a, 0x19, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, - 0x6d, 0x69, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1c, 0x0a, 0x09, - 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x69, - 0x67, 0x6e, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x73, 0x69, 0x67, 0x6e, - 0x65, 0x72, 0x22, 0xe6, 0x01, 0x0a, 0x1c, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x65, 0x64, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x6c, - 0x6f, 0x63, 0x6b, 0x12, 0x60, 0x0a, 0x16, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x72, 0x61, - 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, - 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, - 0x14, 0x73, 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x64, 0x0a, 0x15, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, - 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, - 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x41, 0x74, 0x74, 0x72, - 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x53, 0x69, 0x67, 0x6e, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x14, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa6, 0x02, 0x0a, 0x14, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x42, - 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x15, 0x0a, 0x06, 0x73, 0x65, - 0x71, 0x5f, 0x6e, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x73, 0x65, 0x71, 0x4e, - 0x72, 0x12, 0x43, 0x0a, 0x1e, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x5f, 0x64, 0x69, 0x67, - 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1b, 0x73, 0x74, 0x61, 0x74, 0x65, - 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, - 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x66, 0x0a, 0x18, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, - 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, - 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, + 0x52, 0x0b, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, + 0x08, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x08, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x22, 0x91, 0x01, 0x0a, 0x1b, 0x41, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4f, 0x62, + 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x56, 0x0a, 0x12, 0x73, 0x69, 0x67, + 0x6e, 0x65, 0x64, 0x5f, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, + 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x53, 0x69, 0x67, + 0x6e, 0x65, 0x64, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, + 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x08, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x22, 0x53, 0x0a, + 0x11, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, + 0x72, 0x65, 0x22, 0x52, 0x0a, 0x1a, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, + 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x16, + 0x0a, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, + 0x73, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x22, 0x51, 0x0a, 0x19, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, + 0x75, 0x72, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x22, 0xe6, 0x01, 0x0a, 0x1c, 0x41, 0x74, + 0x74, 0x65, 0x73, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x60, 0x0a, 0x16, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, + 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6f, 0x66, 0x66, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, + 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x14, 0x73, 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, + 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x64, 0x0a, 0x15, + 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x6f, 0x66, + 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, + 0x5f, 0x31, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, + 0x6d, 0x69, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x52, 0x14, 0x61, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, + 0x65, 0x73, 0x22, 0xd2, 0x02, 0x0a, 0x14, 0x53, 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, + 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x65, + 0x70, 0x6f, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, + 0x68, 0x12, 0x15, 0x0a, 0x06, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x05, 0x73, 0x65, 0x71, 0x4e, 0x72, 0x12, 0x43, 0x0a, 0x1e, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x70, + 0x75, 0x74, 0x73, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x1b, 0x73, 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x66, 0x0a, + 0x18, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2c, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, + 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, + 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x52, 0x16, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, + 0x6f, 0x6f, 0x74, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x0f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x44, 0x69, 0x67, 0x65, 0x73, + 0x74, 0x12, 0x34, 0x0a, 0x16, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x5f, 0x70, 0x6c, 0x75, + 0x73, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x14, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x50, 0x6c, 0x75, 0x73, 0x50, 0x72, + 0x65, 0x63, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x22, 0x61, 0x0a, 0x16, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x73, 0x12, 0x47, 0x0a, 0x09, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x08, 0x77, 0x72, 0x69, 0x74, 0x65, 0x53, 0x65, 0x74, 0x22, 0x58, 0x0a, 0x14, 0x4b, 0x65, + 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x64, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x64, 0x22, 0xd6, 0x01, 0x0a, 0x15, 0x53, 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, + 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x12, 0x15, + 0x0a, 0x06, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, + 0x73, 0x65, 0x71, 0x4e, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x72, + 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x72, 0x6f, 0x75, 0x6e, + 0x64, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x64, 0x0a, 0x17, 0x61, 0x74, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4f, - 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x52, 0x16, 0x73, 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, - 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x12, 0x34, - 0x0a, 0x16, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x5f, 0x70, 0x6c, 0x75, 0x73, 0x5f, 0x70, - 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x14, - 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x50, 0x6c, 0x75, 0x73, 0x50, 0x72, 0x65, 0x63, 0x75, - 0x72, 0x73, 0x6f, 0x72, 0x22, 0x61, 0x0a, 0x16, 0x53, 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, - 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x12, 0x47, - 0x0a, 0x09, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x2a, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x72, 0x65, 0x70, 0x6f, - 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x77, - 0x72, 0x69, 0x74, 0x65, 0x53, 0x65, 0x74, 0x22, 0x58, 0x0a, 0x14, 0x4b, 0x65, 0x79, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x64, 0x22, 0xd6, 0x01, 0x0a, 0x15, 0x53, 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x12, 0x15, 0x0a, 0x06, 0x73, - 0x65, 0x71, 0x5f, 0x6e, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x73, 0x65, 0x71, - 0x4e, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x05, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x6f, 0x75, 0x6e, - 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x14, - 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x71, - 0x75, 0x65, 0x72, 0x79, 0x12, 0x64, 0x0a, 0x17, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x65, 0x64, 0x5f, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6f, 0x66, 0x66, 0x63, 0x68, 0x61, 0x69, 0x6e, - 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x33, 0x5f, 0x31, 0x2e, 0x41, 0x74, 0x74, - 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x16, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x4f, 0x62, - 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa0, 0x01, 0x0a, 0x10, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x4f, 0x66, 0x66, 0x65, 0x72, 0x12, - 0x23, 0x0a, 0x0d, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x44, 0x69, 0x67, - 0x65, 0x73, 0x74, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, - 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x70, 0x61, - 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x22, 0x0a, 0x0d, 0x65, - 0x78, 0x70, 0x69, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x72, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72, 0x79, 0x53, 0x65, 0x71, 0x4e, 0x72, 0x12, - 0x1c, 0x0a, 0x09, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x09, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x72, 0x22, 0x5b, 0x0a, - 0x17, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x43, 0x68, 0x75, 0x6e, - 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x6c, 0x6f, 0x62, - 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x62, - 0x6c, 0x6f, 0x62, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x68, 0x75, - 0x6e, 0x6b, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, - 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x22, 0x72, 0x0a, 0x18, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x6c, 0x6f, 0x62, 0x5f, 0x64, - 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x62, 0x6c, 0x6f, - 0x62, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x68, 0x75, 0x6e, 0x6b, - 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x63, 0x68, - 0x75, 0x6e, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x68, 0x75, 0x6e, - 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x22, 0x55, - 0x0a, 0x14, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x41, 0x76, 0x61, - 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x6c, 0x6f, 0x62, 0x5f, 0x64, - 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x62, 0x6c, 0x6f, - 0x62, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x5b, 0x0a, 0x23, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, - 0x74, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x62, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, - 0x69, 0x74, 0x79, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1c, 0x0a, 0x09, - 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x69, - 0x67, 0x6e, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x73, 0x69, 0x67, 0x6e, - 0x65, 0x72, 0x42, 0x11, 0x5a, 0x0f, 0x2e, 0x3b, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x16, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x64, 0x4f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x82, 0x01, + 0x0a, 0x10, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x4f, 0x66, 0x66, + 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x5f, 0x64, 0x69, 0x67, 0x65, + 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x63, 0x68, 0x75, 0x6e, 0x6b, + 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x0d, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x22, + 0x0a, 0x0d, 0x65, 0x78, 0x70, 0x69, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x71, 0x5f, 0x6e, 0x72, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72, 0x79, 0x53, 0x65, 0x71, + 0x4e, 0x72, 0x22, 0x5b, 0x0a, 0x17, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x6f, + 0x62, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, + 0x0b, 0x62, 0x6c, 0x6f, 0x62, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x0a, 0x62, 0x6c, 0x6f, 0x62, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x1f, + 0x0a, 0x0b, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x0a, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x22, + 0x8b, 0x01, 0x0a, 0x18, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x43, + 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, + 0x62, 0x6c, 0x6f, 0x62, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0a, 0x62, 0x6c, 0x6f, 0x62, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, + 0x0b, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x0a, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x17, + 0x0a, 0x07, 0x67, 0x6f, 0x5f, 0x61, 0x77, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x06, 0x67, 0x6f, 0x41, 0x77, 0x61, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x22, 0x7c, 0x0a, + 0x18, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x4f, 0x66, 0x66, 0x65, + 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x62, 0x6c, 0x6f, + 0x62, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, + 0x62, 0x6c, 0x6f, 0x62, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6f, 0x66, 0x66, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x4f, 0x66, 0x66, 0x65, 0x72, 0x12, 0x1c, 0x0a, + 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x42, 0x11, 0x5a, 0x0f, 0x2e, + 0x3b, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -3078,7 +3511,7 @@ func file_offchainreporting3_1_messages_proto_rawDescGZIP() []byte { return file_offchainreporting3_1_messages_proto_rawDescData } -var file_offchainreporting3_1_messages_proto_msgTypes = make([]protoimpl.MessageInfo, 38) +var file_offchainreporting3_1_messages_proto_msgTypes = make([]protoimpl.MessageInfo, 42) var file_offchainreporting3_1_messages_proto_goTypes = []interface{}{ (*MessageWrapper)(nil), // 0: offchainreporting3_1.MessageWrapper (*MessageNewEpochWish)(nil), // 1: offchainreporting3_1.MessageNewEpochWish @@ -3093,31 +3526,35 @@ var file_offchainreporting3_1_messages_proto_goTypes = []interface{}{ (*MessageCertifiedCommitRequest)(nil), // 10: offchainreporting3_1.MessageCertifiedCommitRequest (*MessageCertifiedCommit)(nil), // 11: offchainreporting3_1.MessageCertifiedCommit (*MessageBlockSyncRequest)(nil), // 12: offchainreporting3_1.MessageBlockSyncRequest - (*MessageBlockSync)(nil), // 13: offchainreporting3_1.MessageBlockSync - (*MessageBlockSyncSummary)(nil), // 14: offchainreporting3_1.MessageBlockSyncSummary - (*EpochStartProof)(nil), // 15: offchainreporting3_1.EpochStartProof - (*CertifiedPrepareOrCommit)(nil), // 16: offchainreporting3_1.CertifiedPrepareOrCommit - (*CertifiedPrepare)(nil), // 17: offchainreporting3_1.CertifiedPrepare - (*CertifiedCommit)(nil), // 18: offchainreporting3_1.CertifiedCommit - (*CertifiedCommittedReports)(nil), // 19: offchainreporting3_1.CertifiedCommittedReports - (*HighestCertifiedTimestamp)(nil), // 20: offchainreporting3_1.HighestCertifiedTimestamp - (*AttributedSignedHighestCertifiedTimestamp)(nil), // 21: offchainreporting3_1.AttributedSignedHighestCertifiedTimestamp - (*SignedHighestCertifiedTimestamp)(nil), // 22: offchainreporting3_1.SignedHighestCertifiedTimestamp - (*AttributedObservation)(nil), // 23: offchainreporting3_1.AttributedObservation - (*AttributedSignedObservation)(nil), // 24: offchainreporting3_1.AttributedSignedObservation - (*SignedObservation)(nil), // 25: offchainreporting3_1.SignedObservation - (*AttributedPrepareSignature)(nil), // 26: offchainreporting3_1.AttributedPrepareSignature - (*AttributedCommitSignature)(nil), // 27: offchainreporting3_1.AttributedCommitSignature - (*AttestedStateTransitionBlock)(nil), // 28: offchainreporting3_1.AttestedStateTransitionBlock - (*StateTransitionBlock)(nil), // 29: offchainreporting3_1.StateTransitionBlock - (*StateTransitionOutputs)(nil), // 30: offchainreporting3_1.StateTransitionOutputs - (*KeyValueModification)(nil), // 31: offchainreporting3_1.KeyValueModification - (*StateTransitionInputs)(nil), // 32: offchainreporting3_1.StateTransitionInputs - (*MessageBlobOffer)(nil), // 33: offchainreporting3_1.MessageBlobOffer - (*MessageBlobChunkRequest)(nil), // 34: offchainreporting3_1.MessageBlobChunkRequest - (*MessageBlobChunkResponse)(nil), // 35: offchainreporting3_1.MessageBlobChunkResponse - (*MessageBlobAvailable)(nil), // 36: offchainreporting3_1.MessageBlobAvailable - (*AttributedBlobAvailabilitySignature)(nil), // 37: offchainreporting3_1.AttributedBlobAvailabilitySignature + (*MessageBlockSyncResponse)(nil), // 13: offchainreporting3_1.MessageBlockSyncResponse + (*MessageStateSyncSummary)(nil), // 14: offchainreporting3_1.MessageStateSyncSummary + (*MessageTreeSyncChunkRequest)(nil), // 15: offchainreporting3_1.MessageTreeSyncChunkRequest + (*KeyValuePair)(nil), // 16: offchainreporting3_1.KeyValuePair + (*LeafKeyAndValueDigests)(nil), // 17: offchainreporting3_1.LeafKeyAndValueDigests + (*BoundingLeaf)(nil), // 18: offchainreporting3_1.BoundingLeaf + (*MessageTreeSyncChunkResponse)(nil), // 19: offchainreporting3_1.MessageTreeSyncChunkResponse + (*EpochStartProof)(nil), // 20: offchainreporting3_1.EpochStartProof + (*CertifiedPrepareOrCommit)(nil), // 21: offchainreporting3_1.CertifiedPrepareOrCommit + (*CertifiedPrepare)(nil), // 22: offchainreporting3_1.CertifiedPrepare + (*CertifiedCommit)(nil), // 23: offchainreporting3_1.CertifiedCommit + (*CertifiedCommittedReports)(nil), // 24: offchainreporting3_1.CertifiedCommittedReports + (*HighestCertifiedTimestamp)(nil), // 25: offchainreporting3_1.HighestCertifiedTimestamp + (*AttributedSignedHighestCertifiedTimestamp)(nil), // 26: offchainreporting3_1.AttributedSignedHighestCertifiedTimestamp + (*SignedHighestCertifiedTimestamp)(nil), // 27: offchainreporting3_1.SignedHighestCertifiedTimestamp + (*AttributedObservation)(nil), // 28: offchainreporting3_1.AttributedObservation + (*AttributedSignedObservation)(nil), // 29: offchainreporting3_1.AttributedSignedObservation + (*SignedObservation)(nil), // 30: offchainreporting3_1.SignedObservation + (*AttributedPrepareSignature)(nil), // 31: offchainreporting3_1.AttributedPrepareSignature + (*AttributedCommitSignature)(nil), // 32: offchainreporting3_1.AttributedCommitSignature + (*AttestedStateTransitionBlock)(nil), // 33: offchainreporting3_1.AttestedStateTransitionBlock + (*StateTransitionBlock)(nil), // 34: offchainreporting3_1.StateTransitionBlock + (*StateTransitionOutputs)(nil), // 35: offchainreporting3_1.StateTransitionOutputs + (*KeyValueModification)(nil), // 36: offchainreporting3_1.KeyValueModification + (*StateTransitionInputs)(nil), // 37: offchainreporting3_1.StateTransitionInputs + (*MessageBlobOffer)(nil), // 38: offchainreporting3_1.MessageBlobOffer + (*MessageBlobChunkRequest)(nil), // 39: offchainreporting3_1.MessageBlobChunkRequest + (*MessageBlobChunkResponse)(nil), // 40: offchainreporting3_1.MessageBlobChunkResponse + (*MessageBlobOfferResponse)(nil), // 41: offchainreporting3_1.MessageBlobOfferResponse } var file_offchainreporting3_1_messages_proto_depIdxs = []int32{ 1, // 0: offchainreporting3_1.MessageWrapper.message_new_epoch_wish:type_name -> offchainreporting3_1.MessageNewEpochWish @@ -3132,41 +3569,46 @@ var file_offchainreporting3_1_messages_proto_depIdxs = []int32{ 10, // 9: offchainreporting3_1.MessageWrapper.message_certified_commit_request:type_name -> offchainreporting3_1.MessageCertifiedCommitRequest 11, // 10: offchainreporting3_1.MessageWrapper.message_certified_commit:type_name -> offchainreporting3_1.MessageCertifiedCommit 12, // 11: offchainreporting3_1.MessageWrapper.message_block_sync_request:type_name -> offchainreporting3_1.MessageBlockSyncRequest - 13, // 12: offchainreporting3_1.MessageWrapper.message_block_sync:type_name -> offchainreporting3_1.MessageBlockSync - 14, // 13: offchainreporting3_1.MessageWrapper.message_block_sync_summary:type_name -> offchainreporting3_1.MessageBlockSyncSummary - 33, // 14: offchainreporting3_1.MessageWrapper.message_blob_offer:type_name -> offchainreporting3_1.MessageBlobOffer - 34, // 15: offchainreporting3_1.MessageWrapper.message_blob_chunk_request:type_name -> offchainreporting3_1.MessageBlobChunkRequest - 35, // 16: offchainreporting3_1.MessageWrapper.message_blob_chunk_response:type_name -> offchainreporting3_1.MessageBlobChunkResponse - 36, // 17: offchainreporting3_1.MessageWrapper.message_blob_available:type_name -> offchainreporting3_1.MessageBlobAvailable - 16, // 18: offchainreporting3_1.MessageEpochStartRequest.highest_certified:type_name -> offchainreporting3_1.CertifiedPrepareOrCommit - 22, // 19: offchainreporting3_1.MessageEpochStartRequest.signed_highest_certified_timestamp:type_name -> offchainreporting3_1.SignedHighestCertifiedTimestamp - 15, // 20: offchainreporting3_1.MessageEpochStart.epoch_start_proof:type_name -> offchainreporting3_1.EpochStartProof - 25, // 21: offchainreporting3_1.MessageObservation.signed_observation:type_name -> offchainreporting3_1.SignedObservation - 24, // 22: offchainreporting3_1.MessageProposal.attributed_signed_observations:type_name -> offchainreporting3_1.AttributedSignedObservation - 19, // 23: offchainreporting3_1.MessageCertifiedCommit.certified_committed_reports:type_name -> offchainreporting3_1.CertifiedCommittedReports - 28, // 24: offchainreporting3_1.MessageBlockSync.attested_state_transition_blocks:type_name -> offchainreporting3_1.AttestedStateTransitionBlock - 16, // 25: offchainreporting3_1.EpochStartProof.highest_certified:type_name -> offchainreporting3_1.CertifiedPrepareOrCommit - 21, // 26: offchainreporting3_1.EpochStartProof.highest_certified_proof:type_name -> offchainreporting3_1.AttributedSignedHighestCertifiedTimestamp - 17, // 27: offchainreporting3_1.CertifiedPrepareOrCommit.prepare:type_name -> offchainreporting3_1.CertifiedPrepare - 18, // 28: offchainreporting3_1.CertifiedPrepareOrCommit.commit:type_name -> offchainreporting3_1.CertifiedCommit - 30, // 29: offchainreporting3_1.CertifiedPrepare.state_transition_outputs:type_name -> offchainreporting3_1.StateTransitionOutputs - 26, // 30: offchainreporting3_1.CertifiedPrepare.prepare_quorum_certificate:type_name -> offchainreporting3_1.AttributedPrepareSignature - 30, // 31: offchainreporting3_1.CertifiedCommit.state_transition_outputs:type_name -> offchainreporting3_1.StateTransitionOutputs - 27, // 32: offchainreporting3_1.CertifiedCommit.commit_quorum_certificate:type_name -> offchainreporting3_1.AttributedCommitSignature - 27, // 33: offchainreporting3_1.CertifiedCommittedReports.commit_quorum_certificate:type_name -> offchainreporting3_1.AttributedCommitSignature - 22, // 34: offchainreporting3_1.AttributedSignedHighestCertifiedTimestamp.signed_highest_certified_timestamp:type_name -> offchainreporting3_1.SignedHighestCertifiedTimestamp - 20, // 35: offchainreporting3_1.SignedHighestCertifiedTimestamp.highest_certified_timestamp:type_name -> offchainreporting3_1.HighestCertifiedTimestamp - 25, // 36: offchainreporting3_1.AttributedSignedObservation.signed_observation:type_name -> offchainreporting3_1.SignedObservation - 29, // 37: offchainreporting3_1.AttestedStateTransitionBlock.state_transition_block:type_name -> offchainreporting3_1.StateTransitionBlock - 27, // 38: offchainreporting3_1.AttestedStateTransitionBlock.attributed_signatures:type_name -> offchainreporting3_1.AttributedCommitSignature - 30, // 39: offchainreporting3_1.StateTransitionBlock.state_transition_outputs:type_name -> offchainreporting3_1.StateTransitionOutputs - 31, // 40: offchainreporting3_1.StateTransitionOutputs.write_set:type_name -> offchainreporting3_1.KeyValueModification - 23, // 41: offchainreporting3_1.StateTransitionInputs.attributed_observations:type_name -> offchainreporting3_1.AttributedObservation - 42, // [42:42] is the sub-list for method output_type - 42, // [42:42] is the sub-list for method input_type - 42, // [42:42] is the sub-list for extension type_name - 42, // [42:42] is the sub-list for extension extendee - 0, // [0:42] is the sub-list for field type_name + 13, // 12: offchainreporting3_1.MessageWrapper.message_block_sync_response:type_name -> offchainreporting3_1.MessageBlockSyncResponse + 14, // 13: offchainreporting3_1.MessageWrapper.message_state_sync_summary:type_name -> offchainreporting3_1.MessageStateSyncSummary + 15, // 14: offchainreporting3_1.MessageWrapper.message_tree_sync_chunk_request:type_name -> offchainreporting3_1.MessageTreeSyncChunkRequest + 19, // 15: offchainreporting3_1.MessageWrapper.message_tree_sync_chunk_response:type_name -> offchainreporting3_1.MessageTreeSyncChunkResponse + 38, // 16: offchainreporting3_1.MessageWrapper.message_blob_offer:type_name -> offchainreporting3_1.MessageBlobOffer + 41, // 17: offchainreporting3_1.MessageWrapper.message_blob_offer_response:type_name -> offchainreporting3_1.MessageBlobOfferResponse + 39, // 18: offchainreporting3_1.MessageWrapper.message_blob_chunk_request:type_name -> offchainreporting3_1.MessageBlobChunkRequest + 40, // 19: offchainreporting3_1.MessageWrapper.message_blob_chunk_response:type_name -> offchainreporting3_1.MessageBlobChunkResponse + 21, // 20: offchainreporting3_1.MessageEpochStartRequest.highest_certified:type_name -> offchainreporting3_1.CertifiedPrepareOrCommit + 27, // 21: offchainreporting3_1.MessageEpochStartRequest.signed_highest_certified_timestamp:type_name -> offchainreporting3_1.SignedHighestCertifiedTimestamp + 20, // 22: offchainreporting3_1.MessageEpochStart.epoch_start_proof:type_name -> offchainreporting3_1.EpochStartProof + 30, // 23: offchainreporting3_1.MessageObservation.signed_observation:type_name -> offchainreporting3_1.SignedObservation + 29, // 24: offchainreporting3_1.MessageProposal.attributed_signed_observations:type_name -> offchainreporting3_1.AttributedSignedObservation + 24, // 25: offchainreporting3_1.MessageCertifiedCommit.certified_committed_reports:type_name -> offchainreporting3_1.CertifiedCommittedReports + 33, // 26: offchainreporting3_1.MessageBlockSyncResponse.attested_state_transition_blocks:type_name -> offchainreporting3_1.AttestedStateTransitionBlock + 17, // 27: offchainreporting3_1.BoundingLeaf.leaf:type_name -> offchainreporting3_1.LeafKeyAndValueDigests + 16, // 28: offchainreporting3_1.MessageTreeSyncChunkResponse.key_values:type_name -> offchainreporting3_1.KeyValuePair + 18, // 29: offchainreporting3_1.MessageTreeSyncChunkResponse.bounding_leaves:type_name -> offchainreporting3_1.BoundingLeaf + 21, // 30: offchainreporting3_1.EpochStartProof.highest_certified:type_name -> offchainreporting3_1.CertifiedPrepareOrCommit + 26, // 31: offchainreporting3_1.EpochStartProof.highest_certified_proof:type_name -> offchainreporting3_1.AttributedSignedHighestCertifiedTimestamp + 22, // 32: offchainreporting3_1.CertifiedPrepareOrCommit.prepare:type_name -> offchainreporting3_1.CertifiedPrepare + 23, // 33: offchainreporting3_1.CertifiedPrepareOrCommit.commit:type_name -> offchainreporting3_1.CertifiedCommit + 35, // 34: offchainreporting3_1.CertifiedPrepare.state_transition_outputs:type_name -> offchainreporting3_1.StateTransitionOutputs + 31, // 35: offchainreporting3_1.CertifiedPrepare.prepare_quorum_certificate:type_name -> offchainreporting3_1.AttributedPrepareSignature + 35, // 36: offchainreporting3_1.CertifiedCommit.state_transition_outputs:type_name -> offchainreporting3_1.StateTransitionOutputs + 32, // 37: offchainreporting3_1.CertifiedCommit.commit_quorum_certificate:type_name -> offchainreporting3_1.AttributedCommitSignature + 32, // 38: offchainreporting3_1.CertifiedCommittedReports.commit_quorum_certificate:type_name -> offchainreporting3_1.AttributedCommitSignature + 27, // 39: offchainreporting3_1.AttributedSignedHighestCertifiedTimestamp.signed_highest_certified_timestamp:type_name -> offchainreporting3_1.SignedHighestCertifiedTimestamp + 25, // 40: offchainreporting3_1.SignedHighestCertifiedTimestamp.highest_certified_timestamp:type_name -> offchainreporting3_1.HighestCertifiedTimestamp + 30, // 41: offchainreporting3_1.AttributedSignedObservation.signed_observation:type_name -> offchainreporting3_1.SignedObservation + 34, // 42: offchainreporting3_1.AttestedStateTransitionBlock.state_transition_block:type_name -> offchainreporting3_1.StateTransitionBlock + 32, // 43: offchainreporting3_1.AttestedStateTransitionBlock.attributed_signatures:type_name -> offchainreporting3_1.AttributedCommitSignature + 35, // 44: offchainreporting3_1.StateTransitionBlock.state_transition_outputs:type_name -> offchainreporting3_1.StateTransitionOutputs + 36, // 45: offchainreporting3_1.StateTransitionOutputs.write_set:type_name -> offchainreporting3_1.KeyValueModification + 28, // 46: offchainreporting3_1.StateTransitionInputs.attributed_observations:type_name -> offchainreporting3_1.AttributedObservation + 47, // [47:47] is the sub-list for method output_type + 47, // [47:47] is the sub-list for method input_type + 47, // [47:47] is the sub-list for extension type_name + 47, // [47:47] is the sub-list for extension extendee + 0, // [0:47] is the sub-list for field type_name } func init() { file_offchainreporting3_1_messages_proto_init() } @@ -3332,7 +3774,7 @@ func file_offchainreporting3_1_messages_proto_init() { } } file_offchainreporting3_1_messages_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MessageBlockSync); i { + switch v := v.(*MessageBlockSyncResponse); i { case 0: return &v.state case 1: @@ -3344,7 +3786,7 @@ func file_offchainreporting3_1_messages_proto_init() { } } file_offchainreporting3_1_messages_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MessageBlockSyncSummary); i { + switch v := v.(*MessageStateSyncSummary); i { case 0: return &v.state case 1: @@ -3356,7 +3798,7 @@ func file_offchainreporting3_1_messages_proto_init() { } } file_offchainreporting3_1_messages_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EpochStartProof); i { + switch v := v.(*MessageTreeSyncChunkRequest); i { case 0: return &v.state case 1: @@ -3368,7 +3810,7 @@ func file_offchainreporting3_1_messages_proto_init() { } } file_offchainreporting3_1_messages_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CertifiedPrepareOrCommit); i { + switch v := v.(*KeyValuePair); i { case 0: return &v.state case 1: @@ -3380,7 +3822,7 @@ func file_offchainreporting3_1_messages_proto_init() { } } file_offchainreporting3_1_messages_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CertifiedPrepare); i { + switch v := v.(*LeafKeyAndValueDigests); i { case 0: return &v.state case 1: @@ -3392,7 +3834,7 @@ func file_offchainreporting3_1_messages_proto_init() { } } file_offchainreporting3_1_messages_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CertifiedCommit); i { + switch v := v.(*BoundingLeaf); i { case 0: return &v.state case 1: @@ -3404,7 +3846,7 @@ func file_offchainreporting3_1_messages_proto_init() { } } file_offchainreporting3_1_messages_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CertifiedCommittedReports); i { + switch v := v.(*MessageTreeSyncChunkResponse); i { case 0: return &v.state case 1: @@ -3416,7 +3858,7 @@ func file_offchainreporting3_1_messages_proto_init() { } } file_offchainreporting3_1_messages_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HighestCertifiedTimestamp); i { + switch v := v.(*EpochStartProof); i { case 0: return &v.state case 1: @@ -3428,7 +3870,7 @@ func file_offchainreporting3_1_messages_proto_init() { } } file_offchainreporting3_1_messages_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AttributedSignedHighestCertifiedTimestamp); i { + switch v := v.(*CertifiedPrepareOrCommit); i { case 0: return &v.state case 1: @@ -3440,7 +3882,7 @@ func file_offchainreporting3_1_messages_proto_init() { } } file_offchainreporting3_1_messages_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SignedHighestCertifiedTimestamp); i { + switch v := v.(*CertifiedPrepare); i { case 0: return &v.state case 1: @@ -3452,7 +3894,7 @@ func file_offchainreporting3_1_messages_proto_init() { } } file_offchainreporting3_1_messages_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AttributedObservation); i { + switch v := v.(*CertifiedCommit); i { case 0: return &v.state case 1: @@ -3464,7 +3906,7 @@ func file_offchainreporting3_1_messages_proto_init() { } } file_offchainreporting3_1_messages_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AttributedSignedObservation); i { + switch v := v.(*CertifiedCommittedReports); i { case 0: return &v.state case 1: @@ -3476,7 +3918,7 @@ func file_offchainreporting3_1_messages_proto_init() { } } file_offchainreporting3_1_messages_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SignedObservation); i { + switch v := v.(*HighestCertifiedTimestamp); i { case 0: return &v.state case 1: @@ -3488,7 +3930,7 @@ func file_offchainreporting3_1_messages_proto_init() { } } file_offchainreporting3_1_messages_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AttributedPrepareSignature); i { + switch v := v.(*AttributedSignedHighestCertifiedTimestamp); i { case 0: return &v.state case 1: @@ -3500,7 +3942,7 @@ func file_offchainreporting3_1_messages_proto_init() { } } file_offchainreporting3_1_messages_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AttributedCommitSignature); i { + switch v := v.(*SignedHighestCertifiedTimestamp); i { case 0: return &v.state case 1: @@ -3512,7 +3954,7 @@ func file_offchainreporting3_1_messages_proto_init() { } } file_offchainreporting3_1_messages_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AttestedStateTransitionBlock); i { + switch v := v.(*AttributedObservation); i { case 0: return &v.state case 1: @@ -3524,7 +3966,7 @@ func file_offchainreporting3_1_messages_proto_init() { } } file_offchainreporting3_1_messages_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StateTransitionBlock); i { + switch v := v.(*AttributedSignedObservation); i { case 0: return &v.state case 1: @@ -3536,7 +3978,7 @@ func file_offchainreporting3_1_messages_proto_init() { } } file_offchainreporting3_1_messages_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StateTransitionOutputs); i { + switch v := v.(*SignedObservation); i { case 0: return &v.state case 1: @@ -3548,7 +3990,7 @@ func file_offchainreporting3_1_messages_proto_init() { } } file_offchainreporting3_1_messages_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*KeyValueModification); i { + switch v := v.(*AttributedPrepareSignature); i { case 0: return &v.state case 1: @@ -3560,7 +4002,7 @@ func file_offchainreporting3_1_messages_proto_init() { } } file_offchainreporting3_1_messages_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StateTransitionInputs); i { + switch v := v.(*AttributedCommitSignature); i { case 0: return &v.state case 1: @@ -3572,7 +4014,7 @@ func file_offchainreporting3_1_messages_proto_init() { } } file_offchainreporting3_1_messages_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MessageBlobOffer); i { + switch v := v.(*AttestedStateTransitionBlock); i { case 0: return &v.state case 1: @@ -3584,7 +4026,7 @@ func file_offchainreporting3_1_messages_proto_init() { } } file_offchainreporting3_1_messages_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MessageBlobChunkRequest); i { + switch v := v.(*StateTransitionBlock); i { case 0: return &v.state case 1: @@ -3596,7 +4038,7 @@ func file_offchainreporting3_1_messages_proto_init() { } } file_offchainreporting3_1_messages_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MessageBlobChunkResponse); i { + switch v := v.(*StateTransitionOutputs); i { case 0: return &v.state case 1: @@ -3608,7 +4050,7 @@ func file_offchainreporting3_1_messages_proto_init() { } } file_offchainreporting3_1_messages_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MessageBlobAvailable); i { + switch v := v.(*KeyValueModification); i { case 0: return &v.state case 1: @@ -3620,7 +4062,55 @@ func file_offchainreporting3_1_messages_proto_init() { } } file_offchainreporting3_1_messages_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AttributedBlobAvailabilitySignature); i { + switch v := v.(*StateTransitionInputs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_offchainreporting3_1_messages_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MessageBlobOffer); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_offchainreporting3_1_messages_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MessageBlobChunkRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_offchainreporting3_1_messages_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MessageBlobChunkResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_offchainreporting3_1_messages_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MessageBlobOfferResponse); i { case 0: return &v.state case 1: @@ -3645,14 +4135,16 @@ func file_offchainreporting3_1_messages_proto_init() { (*MessageWrapper_MessageCertifiedCommitRequest)(nil), (*MessageWrapper_MessageCertifiedCommit)(nil), (*MessageWrapper_MessageBlockSyncRequest)(nil), - (*MessageWrapper_MessageBlockSync)(nil), - (*MessageWrapper_MessageBlockSyncSummary)(nil), + (*MessageWrapper_MessageBlockSyncResponse)(nil), + (*MessageWrapper_MessageStateSyncSummary)(nil), + (*MessageWrapper_MessageTreeSyncChunkRequest)(nil), + (*MessageWrapper_MessageTreeSyncChunkResponse)(nil), (*MessageWrapper_MessageBlobOffer)(nil), + (*MessageWrapper_MessageBlobOfferResponse)(nil), (*MessageWrapper_MessageBlobChunkRequest)(nil), (*MessageWrapper_MessageBlobChunkResponse)(nil), - (*MessageWrapper_MessageBlobAvailable)(nil), } - file_offchainreporting3_1_messages_proto_msgTypes[16].OneofWrappers = []interface{}{ + file_offchainreporting3_1_messages_proto_msgTypes[21].OneofWrappers = []interface{}{ (*CertifiedPrepareOrCommit_Prepare)(nil), (*CertifiedPrepareOrCommit_Commit)(nil), } @@ -3662,7 +4154,7 @@ func file_offchainreporting3_1_messages_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_offchainreporting3_1_messages_proto_rawDesc, NumEnums: 0, - NumMessages: 38, + NumMessages: 42, NumExtensions: 0, NumServices: 0, }, diff --git a/offchainreporting2plus/internal/ocr3_1/serialization/serialization.go b/offchainreporting2plus/internal/ocr3_1/serialization/serialization.go index 43d964ad..ce53ef82 100644 --- a/offchainreporting2plus/internal/ocr3_1/serialization/serialization.go +++ b/offchainreporting2plus/internal/ocr3_1/serialization/serialization.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/smartcontractkit/libocr/commontypes" + "github.com/smartcontractkit/libocr/internal/jmt" "github.com/smartcontractkit/libocr/offchainreporting2plus/internal/ocr3_1/protocol" "github.com/smartcontractkit/libocr/offchainreporting2plus/types" @@ -35,6 +36,22 @@ func SerializeCertifiedPrepareOrCommit(cpoc protocol.CertifiedPrepareOrCommit) ( return proto.Marshal(tpm.certifiedPrepareOrCommit(cpoc)) } +func SerializeTreeSyncStatus(m protocol.TreeSyncStatus) ([]byte, error) { + tpm := toProtoMessage[struct{}]{} + pb := TreeSyncStatus{ + // zero-initialize protobuf built-ins + protoimpl.MessageState{}, + 0, + nil, + // fields + TreeSyncPhase(m.Phase), + m.TargetSeqNr, + m.TargetStateRootDigest[:], + tpm.pendingKeyDigestRanges(m.PendingKeyDigestRanges), + } + return proto.Marshal(&pb) +} + func SerializePacemakerState(m protocol.PacemakerState) ([]byte, error) { pb := PacemakerState{ // zero-initialize protobuf built-ins @@ -49,14 +66,16 @@ func SerializePacemakerState(m protocol.PacemakerState) ([]byte, error) { return proto.Marshal(&pb) } -func SerializeStatePersistenceState(m protocol.StatePersistenceState) ([]byte, error) { - pb := StatePersistenceState{ +func SerializeBlobMeta(m protocol.BlobMeta) ([]byte, error) { + pb := BlobMeta{ // zero-initialize protobuf built-ins protoimpl.MessageState{}, 0, nil, // fields - m.HighestPersistedStateTransitionBlockSeqNr, + m.PayloadLength, + m.ChunksHave, + m.ExpirySeqNr, } return proto.Marshal(&pb) } @@ -97,6 +116,29 @@ func DeserializeTrustedPrepareOrCommit(b []byte) (protocol.CertifiedPrepareOrCom return fpm.certifiedPrepareOrCommit(&pb) } +func DeserializeTreeSyncStatus(b []byte) (protocol.TreeSyncStatus, error) { + pb := TreeSyncStatus{} + if err := proto.Unmarshal(b, &pb); err != nil { + return protocol.TreeSyncStatus{}, err + } + var stateRootDigest protocol.StateRootDigest + if len(stateRootDigest) < len(pb.TargetStateRootDigest) { + return protocol.TreeSyncStatus{}, fmt.Errorf("invalid state root digest length expected at most %d, got %d", len(stateRootDigest), len(pb.TargetStateRootDigest)) + } + copy(stateRootDigest[:], pb.TargetStateRootDigest) + fpm := fromProtoMessage[struct{}]{} + pkdr, err := fpm.pendingKeyDigestRanges(pb.PendingKeyDigestRanges) + if err != nil { + return protocol.TreeSyncStatus{}, err + } + return protocol.TreeSyncStatus{ + protocol.TreeSyncPhase(pb.Phase), + pb.TargetSeqNr, + stateRootDigest, + pkdr, + }, nil +} + func DeserializePacemakerState(b []byte) (protocol.PacemakerState, error) { pb := PacemakerState{} if err := proto.Unmarshal(b, &pb); err != nil { @@ -109,13 +151,15 @@ func DeserializePacemakerState(b []byte) (protocol.PacemakerState, error) { }, nil } -func DeserializeStatePersistenceState(b []byte) (protocol.StatePersistenceState, error) { - pb := StatePersistenceState{} +func DeserializeBlobMeta(b []byte) (protocol.BlobMeta, error) { + pb := BlobMeta{} if err := proto.Unmarshal(b, &pb); err != nil { - return protocol.StatePersistenceState{}, err + return protocol.BlobMeta{}, err } - return protocol.StatePersistenceState{ - pb.HighestPersistedStateTransitionBlockSeqNr, + return protocol.BlobMeta{ + pb.PayloadLength, + pb.ChunkHaves, + pb.ExpirySeqNr, }, nil } @@ -277,36 +321,65 @@ func (tpm *toProtoMessage[RI]) messageWrapper(m protocol.Message[RI]) (*MessageW 0, nil, // fields - v.HighestCommittedSeqNr, - v.Nonce, + v.StartSeqNr, + v.EndExclSeqNr, } msgWrapper.Msg = &MessageWrapper_MessageBlockSyncRequest{pm} - case protocol.MessageBlockSync[RI]: + case protocol.MessageBlockSyncResponse[RI]: astbs := make([]*AttestedStateTransitionBlock, 0, len(v.AttestedStateTransitionBlocks)) for _, astb := range v.AttestedStateTransitionBlocks { astbs = append(astbs, tpm.attestedStateTransitionBlock(astb)) } - pm := &MessageBlockSync{ + pm := &MessageBlockSyncResponse{ // zero-initialize protobuf built-ins protoimpl.MessageState{}, 0, nil, // fields + v.RequestStartSeqNr, + v.RequestEndExclSeqNr, astbs, - v.Nonce, } - msgWrapper.Msg = &MessageWrapper_MessageBlockSync{pm} - case protocol.MessageBlockSyncSummary[RI]: - pm := &MessageBlockSyncSummary{ + msgWrapper.Msg = &MessageWrapper_MessageBlockSyncResponse{pm} + case protocol.MessageStateSyncSummary[RI]: + pm := &MessageStateSyncSummary{ // zero-initialize protobuf built-ins protoimpl.MessageState{}, 0, nil, // fields v.LowestPersistedSeqNr, + v.HighestCommittedSeqNr, } - msgWrapper.Msg = &MessageWrapper_MessageBlockSyncSummary{pm} - + msgWrapper.Msg = &MessageWrapper_MessageStateSyncSummary{pm} + case protocol.MessageTreeSyncChunkRequest[RI]: + pm := &MessageTreeSyncChunkRequest{ + // zero-initialize protobuf built-ins + protoimpl.MessageState{}, + 0, + nil, + // fields + v.ToSeqNr, + v.StartIndex[:], + v.EndInclIndex[:], + } + msgWrapper.Msg = &MessageWrapper_MessageTreeSyncChunkRequest{pm} + case protocol.MessageTreeSyncChunkResponse[RI]: + pm := &MessageTreeSyncChunkResponse{ + // zero-initialize protobuf built-ins + protoimpl.MessageState{}, + 0, + nil, + // fields + v.ToSeqNr, + v.StartIndex[:], + v.RequestEndInclIndex[:], + v.GoAway, + v.EndInclIndex[:], + tpm.treeSyncChunkKeyValues(v.KeyValues), + tpm.treeSyncChunkBoundingLeaves(v.BoundingLeaves), + } + msgWrapper.Msg = &MessageWrapper_MessageTreeSyncChunkResponse{pm} case protocol.MessageBlobOffer[RI]: pm := &MessageBlobOffer{ // zero-initialize protobuf built-ins @@ -317,9 +390,20 @@ func (tpm *toProtoMessage[RI]) messageWrapper(m protocol.Message[RI]) (*MessageW tpm.chunkDigests(v.ChunkDigests), v.PayloadLength, v.ExpirySeqNr, - uint32(v.Submitter), } msgWrapper.Msg = &MessageWrapper_MessageBlobOffer{pm} + case protocol.MessageBlobOfferResponse[RI]: + pm := &MessageBlobOfferResponse{ + // zero-initialize protobuf built-ins + protoimpl.MessageState{}, + 0, + nil, + // fields + v.BlobDigest[:], + v.RejectOffer, + v.Signature[:], + } + msgWrapper.Msg = &MessageWrapper_MessageBlobOfferResponse{pm} case protocol.MessageBlobChunkRequest[RI]: pm := &MessageBlobChunkRequest{ // zero-initialize protobuf built-ins @@ -340,21 +424,10 @@ func (tpm *toProtoMessage[RI]) messageWrapper(m protocol.Message[RI]) (*MessageW // fields v.BlobDigest[:], v.ChunkIndex, + v.GoAway, v.Chunk, } msgWrapper.Msg = &MessageWrapper_MessageBlobChunkResponse{pm} - case protocol.MessageBlobAvailable[RI]: - pm := &MessageBlobAvailable{ - // zero-initialize protobuf built-ins - protoimpl.MessageState{}, - 0, - nil, - // fields - v.BlobDigest[:], - v.Signature[:], - } - msgWrapper.Msg = &MessageWrapper_MessageBlobAvailable{pm} - default: return nil, fmt.Errorf("unable to serialize message of type %T", m) @@ -401,6 +474,7 @@ func (tpm *toProtoMessage[RI]) certifiedPrepareOrCommit(cpoc protocol.CertifiedP v.SeqNr(), v.StateTransitionInputsDigest[:], tpm.stateTransitionOutputs(v.StateTransitionOutputs), + v.StateRootDigest[:], v.ReportsPlusPrecursor[:], prepareQuorumCertificate, }}, @@ -444,6 +518,7 @@ func (tpm *toProtoMessage[RI]) CertifiedCommit(cpocc protocol.CertifiedCommit) * cpocc.SeqNr(), cpocc.StateTransitionInputsDigest[:], tpm.stateTransitionOutputs(cpocc.StateTransitionOutputs), + cpocc.StateRootDigest[:], cpocc.ReportsPlusPrecursor[:], commitQuorumCertificate, } @@ -472,6 +547,7 @@ func (tpm *toProtoMessage[RI]) CertifiedCommittedReports(ccr protocol.CertifiedC ccr.SeqNr, ccr.StateTransitionInputsDigest[:], ccr.StateTransitionOutputDigest[:], + ccr.StateRootDigest[:], ccr.ReportsPlusPrecursor[:], commitQuorumCertificate, } @@ -555,8 +631,8 @@ func (tpm *toProtoMessage[RI]) attributedSignedObservation(aso protocol.Attribut } func (tpm *toProtoMessage[RI]) attestedStateTransitionBlock(astb protocol.AttestedStateTransitionBlock) *AttestedStateTransitionBlock { - attributedSignatures := make([]*AttributedCommitSignature, 0, len(astb.AttributedSignatures)) - for _, as := range astb.AttributedSignatures { + attributedSignatures := make([]*AttributedCommitSignature, 0, len(astb.AttributedCommitSignatures)) + for _, as := range astb.AttributedCommitSignatures { attributedSignatures = append(attributedSignatures, &AttributedCommitSignature{ // zero-initialize protobuf built-ins protoimpl.MessageState{}, @@ -589,6 +665,7 @@ func (tpm *toProtoMessage[RI]) stateTransitionBlock(stb protocol.StateTransition stb.SeqNr(), stb.StateTransitionInputsDigest[:], tpm.stateTransitionOutputs(stb.StateTransitionOutputs), + stb.StateRootDigest[:], stb.ReportsPlusPrecursor, } } @@ -617,6 +694,75 @@ func (tpm *toProtoMessage[RI]) stateTransitionOutputs(sto protocol.StateTransiti } } +func (tpm *toProtoMessage[RI]) treeSyncChunkProof(proof []jmt.Digest) [][]byte { + pns := make([][]byte, 0, len(proof)) + for _, pn := range proof { + pns = append(pns, pn[:]) + } + return pns +} + +func (tpm *toProtoMessage[RI]) treeSyncChunkBoundingLeaves(boundingLeaves []jmt.BoundingLeaf) []*BoundingLeaf { + pbbls := make([]*BoundingLeaf, 0, len(boundingLeaves)) + for _, bl := range boundingLeaves { + pbbls = append(pbbls, &BoundingLeaf{ + // zero-initialize protobuf built-ins + protoimpl.MessageState{}, + 0, + nil, + // fields + tpm.leafKeyAndValueDigests(bl.Leaf), + tpm.treeSyncChunkProof(bl.Siblings), + }) + } + return pbbls +} + +func (tpm *toProtoMessage[RI]) leafKeyAndValueDigests(leafKeyAndValueDigests jmt.LeafKeyAndValueDigests) *LeafKeyAndValueDigests { + return &LeafKeyAndValueDigests{ + // zero-initialize protobuf built-ins + protoimpl.MessageState{}, + 0, + nil, + // fields + leafKeyAndValueDigests.KeyDigest[:], + leafKeyAndValueDigests.ValueDigest[:], + } +} + +func (tpm *toProtoMessage[RI]) treeSyncChunkKeyValues(kvps []protocol.KeyValuePair) []*KeyValuePair { + pbkvps := make([]*KeyValuePair, 0, len(kvps)) + for _, kvp := range kvps { + pbkvps = append(pbkvps, &KeyValuePair{ + // zero-initialize protobuf built-ins + protoimpl.MessageState{}, + 0, + nil, + // fields + kvp.Key, + kvp.Value, + }) + } + return pbkvps +} + +func (tpm *toProtoMessage[RI]) pendingKeyDigestRanges(pkdr protocol.PendingKeyDigestRanges) []*KeyDigestRange { + allRanges := pkdr.All() + pbRanges := make([]*KeyDigestRange, 0, len(allRanges)) + for _, r := range allRanges { + pbRanges = append(pbRanges, &KeyDigestRange{ + // zero-initialize protobuf built-ins + protoimpl.MessageState{}, + 0, + nil, + // fields + r.StartIndex[:], + r.EndInclIndex[:], + }) + } + return pbRanges +} + // // *fromProtoMessage // @@ -649,22 +795,25 @@ func (fpm *fromProtoMessage[RI]) messageWrapper(wrapper *MessageWrapper) (protoc case *MessageWrapper_MessageCertifiedCommitRequest: return fpm.messageCertifiedCommitRequest(wrapper.GetMessageCertifiedCommitRequest()) case *MessageWrapper_MessageCertifiedCommit: - return fpm.messageCertifiedCommit(wrapper.GetMessageCertifiedCommit()) + return fpm.MessageCertifiedCommit(wrapper.GetMessageCertifiedCommit()) case *MessageWrapper_MessageBlockSyncRequest: return fpm.messageBlockSyncRequest(wrapper.GetMessageBlockSyncRequest()) - case *MessageWrapper_MessageBlockSync: - return fpm.messageBlockSync(wrapper.GetMessageBlockSync()) - case *MessageWrapper_MessageBlockSyncSummary: - return fpm.messageBlockSyncSummary(wrapper.GetMessageBlockSyncSummary()) - + case *MessageWrapper_MessageBlockSyncResponse: + return fpm.messageBlockSyncResponse(wrapper.GetMessageBlockSyncResponse()) + case *MessageWrapper_MessageStateSyncSummary: + return fpm.messageStateSyncSummary(wrapper.GetMessageStateSyncSummary()) + case *MessageWrapper_MessageTreeSyncChunkRequest: + return fpm.messageTreeSyncChunkRequest(wrapper.GetMessageTreeSyncChunkRequest()) + case *MessageWrapper_MessageTreeSyncChunkResponse: + return fpm.messageTreeSyncChunkResponse(wrapper.GetMessageTreeSyncChunkResponse()) case *MessageWrapper_MessageBlobOffer: return fpm.messageBlobOffer(wrapper.GetMessageBlobOffer()) + case *MessageWrapper_MessageBlobOfferResponse: + return fpm.messageBlobOfferResponse(wrapper.GetMessageBlobOfferResponse()) case *MessageWrapper_MessageBlobChunkRequest: return fpm.messageBlobChunkRequest(wrapper.GetMessageBlobChunkRequest()) case *MessageWrapper_MessageBlobChunkResponse: return fpm.messageBlobChunkResponse(wrapper.GetMessageBlobChunkResponse()) - case *MessageWrapper_MessageBlobAvailable: - return fpm.messageBlobAvailable(wrapper.GetMessageBlobAvailable()) default: return nil, fmt.Errorf("unrecognized Msg type %T", msg) @@ -782,6 +931,8 @@ func (fpm *fromProtoMessage[RI]) certifiedPrepare(m *CertifiedPrepare) (protocol } var inputsDigest protocol.StateTransitionInputsDigest copy(inputsDigest[:], m.StateTransitionInputsDigest) + var stateRootDigest protocol.StateRootDigest + copy(stateRootDigest[:], m.StateRootDigest) prepareQuorumCertificate := make([]protocol.AttributedPrepareSignature, 0, len(m.PrepareQuorumCertificate)) for _, aps := range m.PrepareQuorumCertificate { @@ -799,6 +950,7 @@ func (fpm *fromProtoMessage[RI]) certifiedPrepare(m *CertifiedPrepare) (protocol m.SeqNr, inputsDigest, outputs, + stateRootDigest, m.ReportsPlusPrecursor, prepareQuorumCertificate, }, nil @@ -815,6 +967,8 @@ func (fpm *fromProtoMessage[RI]) certifiedCommit(m *CertifiedCommit) (protocol.C } var inputsDigest protocol.StateTransitionInputsDigest copy(inputsDigest[:], m.StateTransitionInputsDigest) + var stateRootDigest protocol.StateRootDigest + copy(stateRootDigest[:], m.StateRootDigest) commitQuorumCertificate := make([]protocol.AttributedCommitSignature, 0, len(m.CommitQuorumCertificate)) for _, aps := range m.CommitQuorumCertificate { @@ -832,6 +986,7 @@ func (fpm *fromProtoMessage[RI]) certifiedCommit(m *CertifiedCommit) (protocol.C m.SeqNr, inputsDigest, outputs, + stateRootDigest, m.ReportsPlusPrecursor, commitQuorumCertificate, }, nil @@ -845,6 +1000,8 @@ func (fpm *fromProtoMessage[RI]) certifiedCommittedReports(m *CertifiedCommitted copy(inputsDigest[:], m.StateTransitionInputsDigest) var outputsDigest protocol.StateTransitionOutputDigest copy(outputsDigest[:], m.StateTransitionOutputDigest) + var stateRootDigest protocol.StateRootDigest + copy(stateRootDigest[:], m.StateRootDigest) commitQuorumCertificate := make([]protocol.AttributedCommitSignature, 0, len(m.CommitQuorumCertificate)) for _, aps := range m.CommitQuorumCertificate { @@ -862,6 +1019,7 @@ func (fpm *fromProtoMessage[RI]) certifiedCommittedReports(m *CertifiedCommitted m.SeqNr, inputsDigest, outputsDigest, + stateRootDigest, m.ReportsPlusPrecursor, commitQuorumCertificate, }, nil @@ -970,7 +1128,7 @@ func (fpm *fromProtoMessage[RI]) messageCertifiedCommitRequest(m *MessageCertifi }, nil } -func (fpm *fromProtoMessage[RI]) messageCertifiedCommit(m *MessageCertifiedCommit) (protocol.MessageCertifiedCommit[RI], error) { +func (fpm *fromProtoMessage[RI]) MessageCertifiedCommit(m *MessageCertifiedCommit) (protocol.MessageCertifiedCommit[RI], error) { if m == nil { return protocol.MessageCertifiedCommit[RI]{}, fmt.Errorf("unable to extract a MessageCertifiedCommit value") } @@ -1041,32 +1199,34 @@ func (fpm *fromProtoMessage[RI]) messageBlockSyncRequest(m *MessageBlockSyncRequ } return protocol.MessageBlockSyncRequest[RI]{ fpm.requestHandle, - m.HighestCommittedSeqNr, - m.Nonce, + m.StartSeqNr, + m.EndExclSeqNr, }, nil } -func (fpm *fromProtoMessage[RI]) messageBlockSync(m *MessageBlockSync) (protocol.MessageBlockSync[RI], error) { +func (fpm *fromProtoMessage[RI]) messageBlockSyncResponse(m *MessageBlockSyncResponse) (protocol.MessageBlockSyncResponse[RI], error) { if m == nil { - return protocol.MessageBlockSync[RI]{}, fmt.Errorf("unable to extract a MessageBlockSync value") + return protocol.MessageBlockSyncResponse[RI]{}, fmt.Errorf("unable to extract a MessageBlockSyncResponse value") } astbs, err := fpm.attestedStateTransitionBlocks(m.AttestedStateTransitionBlocks) if err != nil { - return protocol.MessageBlockSync[RI]{}, err + return protocol.MessageBlockSyncResponse[RI]{}, err } - return protocol.MessageBlockSync[RI]{ + return protocol.MessageBlockSyncResponse[RI]{ nil, // TODO: consider using a sentinel value here, e.g. "EmptyRequestHandleForInboundResponse" + m.RequestStartSeqNr, + m.RequestEndExclSeqNr, astbs, - m.Nonce, }, nil } -func (fpm *fromProtoMessage[RI]) messageBlockSyncSummary(m *MessageBlockSyncSummary) (protocol.MessageBlockSyncSummary[RI], error) { +func (fpm *fromProtoMessage[RI]) messageStateSyncSummary(m *MessageStateSyncSummary) (protocol.MessageStateSyncSummary[RI], error) { if m == nil { - return protocol.MessageBlockSyncSummary[RI]{}, fmt.Errorf("unable to extract a MessageBlockSyncSummary value") + return protocol.MessageStateSyncSummary[RI]{}, fmt.Errorf("unable to extract a MessageStateSyncSummary value") } - return protocol.MessageBlockSyncSummary[RI]{ + return protocol.MessageStateSyncSummary[RI]{ m.LowestPersistedSeqNr, + m.HighestCommittedSeqNr, }, nil } func (fpm *fromProtoMessage[RI]) attestedStateTransitionBlocks(pbastbs []*AttestedStateTransitionBlock) ([]protocol.AttestedStateTransitionBlock, error) { @@ -1105,6 +1265,8 @@ func (fpm *fromProtoMessage[RI]) stateTransitionBlock(m *StateTransitionBlock) ( } var inputsDigest protocol.StateTransitionInputsDigest copy(inputsDigest[:], m.StateTransitionInputsDigest) + var stateRootDigest protocol.StateRootDigest + copy(stateRootDigest[:], m.StateRootDigest) outputs, err := fpm.stateTransitionOutputs(m.StateTransitionOutputs) if err != nil { @@ -1115,6 +1277,7 @@ func (fpm *fromProtoMessage[RI]) stateTransitionBlock(m *StateTransitionBlock) ( m.SeqNr, inputsDigest, outputs, + stateRootDigest, m.ReportsPlusPrecursor, }, nil } @@ -1150,9 +1313,9 @@ func (fpm *fromProtoMessage[RI]) stateTransitionOutputs(m *StateTransitionOutput return protocol.StateTransitionOutputs{}, fmt.Errorf("unable to extract an StateTransitionOutputs value") } - writeSet := make([]protocol.KeyValuePair, 0, len(m.WriteSet)) + writeSet := make([]protocol.KeyValuePairWithDeletions, 0, len(m.WriteSet)) for _, pbkvmod := range m.WriteSet { - writeSet = append(writeSet, protocol.KeyValuePair{ + writeSet = append(writeSet, protocol.KeyValuePairWithDeletions{ pbkvmod.Key, pbkvmod.Value, pbkvmod.Deleted, @@ -1170,22 +1333,16 @@ func (fpm *fromProtoMessage[RI]) messageBlobOffer(m *MessageBlobOffer) (protocol if err != nil { return protocol.MessageBlobOffer[RI]{}, err } - submitter, err := fpm.oracleID(m.Submitter) - if err != nil { - return protocol.MessageBlobOffer[RI]{}, err - } return protocol.MessageBlobOffer[RI]{ + fpm.requestHandle, + nil, chunkDigests, m.PayloadLength, m.ExpirySeqNr, - submitter, }, nil } func (fpm *fromProtoMessage[RI]) chunkDigests(pbcds [][]byte) ([]protocol.BlobChunkDigest, error) { - if pbcds == nil { - return nil, fmt.Errorf("unable to extract a ChunkDigests value") - } cds := make([]protocol.BlobChunkDigest, 0, len(pbcds)) for _, pbcd := range pbcds { var blockChunkDigest protocol.BlobChunkDigest @@ -1196,6 +1353,22 @@ func (fpm *fromProtoMessage[RI]) chunkDigests(pbcds [][]byte) ([]protocol.BlobCh return cds, nil } +func (fpm *fromProtoMessage[RI]) messageBlobOfferResponse(m *MessageBlobOfferResponse) (protocol.MessageBlobOfferResponse[RI], error) { + if m == nil { + return protocol.MessageBlobOfferResponse[RI]{}, fmt.Errorf("unable to extract a MessageBlobOfferResponse value") + } + + var blobDigest protocol.BlobDigest + copy(blobDigest[:], m.BlobDigest) + + return protocol.MessageBlobOfferResponse[RI]{ + nil, // TODO: consider using a sentinel value here, e.g. "EmptyRequestHandleForInboundResponse" + blobDigest, + m.RejectOffer, + m.Signature, + }, nil +} + func (fpm *fromProtoMessage[RI]) messageBlobChunkRequest(m *MessageBlobChunkRequest) (protocol.MessageBlobChunkRequest[RI], error) { if m == nil { return protocol.MessageBlobChunkRequest[RI]{}, fmt.Errorf("unable to extract a MessageBlobChunkRequest value") @@ -1206,6 +1379,7 @@ func (fpm *fromProtoMessage[RI]) messageBlobChunkRequest(m *MessageBlobChunkRequ return protocol.MessageBlobChunkRequest[RI]{ fpm.requestHandle, + nil, blobDigest, m.ChunkIndex, }, nil @@ -1223,20 +1397,146 @@ func (fpm *fromProtoMessage[RI]) messageBlobChunkResponse(m *MessageBlobChunkRes nil, // TODO: consider using a sentinel value here, e.g. "EmptyRequestHandleForInboundResponse" blobDigest, m.ChunkIndex, + m.GoAway, m.Chunk, }, nil } -func (fpm *fromProtoMessage[RI]) messageBlobAvailable(m *MessageBlobAvailable) (protocol.MessageBlobAvailable[RI], error) { +func (fpm *fromProtoMessage[RI]) messageTreeSyncChunkRequest(m *MessageTreeSyncChunkRequest) (protocol.MessageTreeSyncChunkRequest[RI], error) { if m == nil { - return protocol.MessageBlobAvailable[RI]{}, fmt.Errorf("unable to extract a MessageBlobAvailable value") + return protocol.MessageTreeSyncChunkRequest[RI]{}, fmt.Errorf("unable to extract an MessageTreeSyncRequest value") } + startIndex, err := fpm.digest(m.StartIndex) + if err != nil { + return protocol.MessageTreeSyncChunkRequest[RI]{}, err + } + endInclIndex, err := fpm.digest(m.EndInclIndex) + if err != nil { + return protocol.MessageTreeSyncChunkRequest[RI]{}, err + } + return protocol.MessageTreeSyncChunkRequest[RI]{ + fpm.requestHandle, + m.ToSeqNr, + startIndex, + endInclIndex, + }, nil +} - var blobDigest protocol.BlobDigest - copy(blobDigest[:], m.BlobDigest) +func (fpm *fromProtoMessage[RI]) treeSyncChunkProof(pbpns [][]byte) ([]jmt.Digest, error) { + proof := make([]jmt.Digest, 0, len(pbpns)) + for _, pbpn := range pbpns { + var pn jmt.Digest + if len(pbpn) != len(pn) { + return proof, fmt.Errorf("invalid proof node length, expected %d, got %d ", len(pn), len(pbpn)) + } + copy(pn[:], pbpn) + proof = append(proof, pn) + } + return proof, nil +} - return protocol.MessageBlobAvailable[RI]{ - blobDigest, - m.Signature, +func (fpm *fromProtoMessage[RI]) treeSyncChunkKeyValues(pbkvps []*KeyValuePair) ([]protocol.KeyValuePair, error) { + kvps := make([]protocol.KeyValuePair, 0, len(pbkvps)) + for _, pbkvp := range pbkvps { + kvps = append(kvps, protocol.KeyValuePair{ + pbkvp.Key, + pbkvp.Value, + }) + } + return kvps, nil +} + +func (fpm *fromProtoMessage[RI]) digest(pbdigest []byte) (jmt.Digest, error) { + if len(pbdigest) != len(jmt.Digest{}) { + return jmt.Digest{}, fmt.Errorf("digest must be %d bytes, got %d", len(jmt.Digest{}), len(pbdigest)) + } + var digest jmt.Digest + copy(digest[:], pbdigest) + return digest, nil +} + +func (fpm *fromProtoMessage[RI]) leafKeyAndValueDigests(pblkd *LeafKeyAndValueDigests) (jmt.LeafKeyAndValueDigests, error) { + keyDigest, err := fpm.digest(pblkd.KeyDigest) + if err != nil { + return jmt.LeafKeyAndValueDigests{}, err + } + valueDigest, err := fpm.digest(pblkd.ValueDigest) + if err != nil { + return jmt.LeafKeyAndValueDigests{}, err + } + return jmt.LeafKeyAndValueDigests{ + keyDigest, + valueDigest, + }, nil +} + +func (fpm *fromProtoMessage[RI]) treeSyncChunkBoundingLeaves(pbbls []*BoundingLeaf) ([]jmt.BoundingLeaf, error) { + boundingLeaves := make([]jmt.BoundingLeaf, 0, len(pbbls)) + for _, pbbbl := range pbbls { + leafKeyAndValueDigests, err := fpm.leafKeyAndValueDigests(pbbbl.Leaf) + if err != nil { + return nil, err + } + siblings, err := fpm.treeSyncChunkProof(pbbbl.Siblings) + if err != nil { + return nil, err + } + boundingLeaves = append(boundingLeaves, jmt.BoundingLeaf{ + leafKeyAndValueDigests, + siblings, + }) + } + return boundingLeaves, nil +} + +func (fpm *fromProtoMessage[RI]) messageTreeSyncChunkResponse(m *MessageTreeSyncChunkResponse) (protocol.MessageTreeSyncChunkResponse[RI], error) { + if m == nil { + return protocol.MessageTreeSyncChunkResponse[RI]{}, fmt.Errorf("unable to extract an MessageTreeSyncChunk value") + } + startIndex, err := fpm.digest(m.StartIndex) + if err != nil { + return protocol.MessageTreeSyncChunkResponse[RI]{}, err + } + requestEndInclIndex, err := fpm.digest(m.RequestEndInclIndex) + if err != nil { + return protocol.MessageTreeSyncChunkResponse[RI]{}, err + } + encInclIndex, err := fpm.digest(m.EndInclIndex) + if err != nil { + return protocol.MessageTreeSyncChunkResponse[RI]{}, err + } + keyValues, err := fpm.treeSyncChunkKeyValues(m.KeyValues) + if err != nil { + return protocol.MessageTreeSyncChunkResponse[RI]{}, err + } + boundingLeaves, err := fpm.treeSyncChunkBoundingLeaves(m.BoundingLeaves) + if err != nil { + return protocol.MessageTreeSyncChunkResponse[RI]{}, err + } + return protocol.MessageTreeSyncChunkResponse[RI]{ + nil, // TODO: consider using a sentinel value here, e.g. "EmptyRequestHandleForInboundResponse" + m.ToSeqNr, + startIndex, + requestEndInclIndex, + m.GoAway, + encInclIndex, + keyValues, + boundingLeaves, }, nil } + +func (fpm *fromProtoMessage[RI]) pendingKeyDigestRanges(pbRanges []*KeyDigestRange) (protocol.PendingKeyDigestRanges, error) { + ranges := make([]protocol.KeyDigestRange, 0, len(pbRanges)) + for _, pbr := range pbRanges { + startIndex, err := fpm.digest(pbr.StartIndex) + if err != nil { + return protocol.PendingKeyDigestRanges{}, err + } + endInclIndex, err := fpm.digest(pbr.EndInclIndex) + if err != nil { + return protocol.PendingKeyDigestRanges{}, err + } + ranges = append(ranges, protocol.KeyDigestRange{startIndex, endInclIndex}) + } + return protocol.NewPendingKeyDigestRanges(ranges), nil +} diff --git a/offchainreporting2plus/internal/ocr3_1/serialization/serialization_jmt.go b/offchainreporting2plus/internal/ocr3_1/serialization/serialization_jmt.go new file mode 100644 index 00000000..5e455d44 --- /dev/null +++ b/offchainreporting2plus/internal/ocr3_1/serialization/serialization_jmt.go @@ -0,0 +1,204 @@ +package serialization + +import ( + "encoding/binary" + "fmt" + + "github.com/smartcontractkit/libocr/internal/jmt" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// SerializeJmtNode converts a jmt.Node to bytes +func SerializeJmtNode(node jmt.Node) ([]byte, error) { + if node == nil { + return nil, fmt.Errorf("cannot serialize nil jmt node") + } + + pb := &Node{} + switch n := node.(type) { + case *jmt.InternalNode: + internalNode, err := serializeInternalNode(n) + if err != nil { + return nil, fmt.Errorf("failed to serialize internal node: %w", err) + } + pb.Node = &Node_InternalNode{internalNode} + case *jmt.LeafNode: + leafNode, err := serializeLeafNode(n) + if err != nil { + return nil, fmt.Errorf("failed to serialize leaf node: %w", err) + } + pb.Node = &Node_LeafNode{leafNode} + default: + return nil, fmt.Errorf("unknown jmt node type: %T", node) + } + + return proto.Marshal(pb) +} + +// DeserializeJmtNode converts bytes to a jmt.Node +func DeserializeJmtNode(b []byte) (jmt.Node, error) { + pb := &Node{} + if err := proto.Unmarshal(b, pb); err != nil { + return nil, fmt.Errorf("could not unmarshal protobuf: %w", err) + } + + switch n := pb.Node.(type) { + case *Node_InternalNode: + return deserializeInternalNode(n.InternalNode) + case *Node_LeafNode: + return deserializeLeafNode(n.LeafNode) + default: + return nil, fmt.Errorf("unknown protobuf node type: %T", pb.Node) + } +} + +func serializeInternalNode(node *jmt.InternalNode) (*InternalNode, error) { + var bitmap uint32 + children := make([]*InternalNodeChild, 0, 16) + + for i, child := range node.Children { + if child != nil { + bitmap |= (1 << i) // set bit for this nibble position + children = append(children, &InternalNodeChild{ + // zero-initialize protobuf built-ins + protoimpl.MessageState{}, + 0, + nil, + // fields + uint64(child.Version), + child.Digest[:], + child.IsLeaf, + }) + } + // nil children are omitted from the array + } + + return &InternalNode{ + // zero-initialize protobuf built-ins + protoimpl.MessageState{}, + 0, + nil, + // fields + bitmap, + children, + }, nil +} + +func deserializeInternalNode(pb *InternalNode) (*jmt.InternalNode, error) { + var children [16]*jmt.Child + childIndex := 0 + + // Use bitmap to determine which nibble positions have children + for nibble := range children { + if (pb.ChildrenBitmap & (1 << nibble)) != 0 { + // This nibble position has a child + if childIndex >= len(pb.Children) { + return nil, fmt.Errorf("bitmap indicates child at nibble %d but children array is too short", nibble) + } + + pbChild := pb.Children[childIndex] + var digest jmt.Digest + if len(pbChild.Digest) != len(digest) { + return nil, fmt.Errorf("child digest must be %d bytes, got %d", len(digest), len(pbChild.Digest)) + } + copy(digest[:], pbChild.Digest) + children[nibble] = &jmt.Child{ + jmt.Version(pbChild.Version), + digest, + pbChild.IsLeaf, + } + childIndex++ + } + // nibble positions with unset bitmap bits remain nil + } + + if childIndex != len(pb.Children) { + return nil, fmt.Errorf("bitmap indicates %d children but array has %d", childIndex, len(pb.Children)) + } + + return &jmt.InternalNode{children}, nil +} + +func serializeLeafNode(node *jmt.LeafNode) (*LeafNode, error) { + return &LeafNode{ + // zero-initialize protobuf built-ins + protoimpl.MessageState{}, + 0, + nil, + // fields + node.KeyDigest[:], + node.Key, + node.ValueDigest[:], + node.Value, + }, nil +} + +func deserializeLeafNode(pb *LeafNode) (*jmt.LeafNode, error) { + var keyDigest, valueDigest jmt.Digest + expectedDigestLen := len(jmt.Digest{}) + if len(pb.KeyDigest) != expectedDigestLen { + return nil, fmt.Errorf("key digest must be %d bytes, got %d", expectedDigestLen, len(pb.KeyDigest)) + } + if len(pb.ValueDigest) != expectedDigestLen { + return nil, fmt.Errorf("value digest must be %d bytes, got %d", expectedDigestLen, len(pb.ValueDigest)) + } + copy(keyDigest[:], pb.KeyDigest) + copy(valueDigest[:], pb.ValueDigest) + + return &jmt.LeafNode{ + keyDigest, + pb.Key, + valueDigest, + pb.Value, + }, nil +} + +// Important: Serialization must preserve ordering of NodeKeys. +func AppendSerializeNodeKey(buffer []byte, nodeKey jmt.NodeKey) []byte { + buffer = binary.BigEndian.AppendUint64(buffer, nodeKey.Version) + buffer = append(buffer, byte(nodeKey.NibblePath.NumNibbles())) + buffer = append(buffer, nodeKey.NibblePath.Bytes()...) + return buffer +} + +func DeserializeNodeKey(enc []byte) (jmt.NodeKey, error) { + if len(enc) < 8 { + return jmt.NodeKey{}, fmt.Errorf("encoding has no version") + } + version := binary.BigEndian.Uint64(enc[:8]) + enc = enc[8:] + if len(enc) == 0 { + return jmt.NodeKey{}, fmt.Errorf("encoding has no num nibbles") + } + numNibbles := int(enc[0]) + enc = enc[1:] + if len(enc) != (numNibbles+1)/2 { + return jmt.NodeKey{}, fmt.Errorf("encoding has less bytes than expected for nibbles") + } + nibblePath, ok := jmt.NewNibblePath(numNibbles, enc[:]) + if !ok { + return jmt.NodeKey{}, fmt.Errorf("encoding has invalid nibble path") + } + return jmt.NodeKey{version, nibblePath}, nil +} + +// Important: Serialization must preserve ordering of StaleNodes. +func AppendSerializeStaleNode(buffer []byte, staleNode jmt.StaleNode) []byte { + buffer = binary.BigEndian.AppendUint64(buffer, staleNode.StaleSinceVersion) + buffer = AppendSerializeNodeKey(buffer, staleNode.NodeKey) + return buffer +} + +func DeserializeStaleNode(enc []byte) (jmt.StaleNode, error) { + if len(enc) < 8 { + return jmt.StaleNode{}, fmt.Errorf("encoding too short") + } + version := binary.BigEndian.Uint64(enc[:8]) + enc = enc[8:] + nodeKey, err := DeserializeNodeKey(enc) + if err != nil { + return jmt.StaleNode{}, fmt.Errorf("error decoding node key: %w", err) + } + return jmt.StaleNode{version, nodeKey}, nil +} diff --git a/offchainreporting2plus/internal/shim/metrics.go b/offchainreporting2plus/internal/shim/metrics.go index 7ec40e8d..0c7a4b1d 100644 --- a/offchainreporting2plus/internal/shim/metrics.go +++ b/offchainreporting2plus/internal/shim/metrics.go @@ -39,3 +39,54 @@ func (m *serializingEndpointMetrics) Close() { m.registerer.Unregister(m.sentMessagesTotal) m.registerer.Unregister(m.droppedMessagesTotal) } + +type keyValueMetrics struct { + registerer prometheus.Registerer + closeWriteSetDurationNanoseconds prometheus.Histogram + txWriteDurationNanoseconds prometheus.Histogram + txCommitDurationNanoseconds prometheus.Histogram +} + +const ( + hist_bucket_start = 2 + hist_bucket_factor = 2 + hist_bucket_count = 35 +) + +func newKeyValueMetrics( + registerer prometheus.Registerer, + logger commontypes.Logger, +) *keyValueMetrics { + closeWriteSetDurationNanoseconds := prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "ocr3_1_experimental_key_value_close_write_set_duration_ns", + Help: "How long it takes to close the write set.", + Buckets: prometheus.ExponentialBuckets(hist_bucket_start, hist_bucket_factor, hist_bucket_count), + }) + metricshelper.RegisterOrLogError(logger, registerer, closeWriteSetDurationNanoseconds, "ocr3_1_experimental_key_value_close_write_set_duration_ns") + + txWriteDurationNanoseconds := prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "ocr3_1_experimental_key_value_tx_write_duration_ns", + Help: "How long it takes to write to the transaction.", + Buckets: prometheus.ExponentialBuckets(hist_bucket_start, hist_bucket_factor, hist_bucket_count), + }) + metricshelper.RegisterOrLogError(logger, registerer, txWriteDurationNanoseconds, "ocr3_1_experimental_key_value_tx_write_duration_ns") + + txCommitDurationNanoseconds := prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "ocr3_1_experimental_key_value_tx_commit_duration_nanoseconds", + Help: "How long it takes to commit a transaction.", + Buckets: prometheus.ExponentialBuckets(hist_bucket_start, hist_bucket_factor, hist_bucket_count), + }) + metricshelper.RegisterOrLogError(logger, registerer, txCommitDurationNanoseconds, "ocr3_1_experimental_key_value_tx_commit_duration_ns") + return &keyValueMetrics{ + registerer, + closeWriteSetDurationNanoseconds, + txWriteDurationNanoseconds, + txCommitDurationNanoseconds, + } +} + +func (m *keyValueMetrics) Close() { + m.registerer.Unregister(m.closeWriteSetDurationNanoseconds) + m.registerer.Unregister(m.txWriteDurationNanoseconds) + m.registerer.Unregister(m.txCommitDurationNanoseconds) +} diff --git a/offchainreporting2plus/internal/shim/ocr3_1_database.go b/offchainreporting2plus/internal/shim/ocr3_1_database.go index 5035de05..943363cf 100644 --- a/offchainreporting2plus/internal/shim/ocr3_1_database.go +++ b/offchainreporting2plus/internal/shim/ocr3_1_database.go @@ -8,7 +8,6 @@ import ( "github.com/smartcontractkit/libocr/offchainreporting2plus/internal/ocr3_1/protocol" "github.com/smartcontractkit/libocr/offchainreporting2plus/internal/ocr3_1/serialization" "github.com/smartcontractkit/libocr/offchainreporting2plus/types" - "google.golang.org/protobuf/proto" ) type SerializingOCR3_1Database struct { @@ -17,8 +16,6 @@ type SerializingOCR3_1Database struct { var _ protocol.Database = (*SerializingOCR3_1Database)(nil) -const statePersistenceKey = "state" - func (db *SerializingOCR3_1Database) ReadConfig(ctx context.Context) (*types.ContractConfig, error) { return db.BinaryDb.ReadConfig(ctx) } @@ -76,54 +73,3 @@ func (db *SerializingOCR3_1Database) WriteCert(ctx context.Context, configDigest return db.BinaryDb.WriteProtocolState(ctx, configDigest, certKey, raw) } - -func (db *SerializingOCR3_1Database) ReadStatePersistenceState(ctx context.Context, configDigest types.ConfigDigest) (protocol.StatePersistenceState, error) { - raw, err := db.BinaryDb.ReadProtocolState(ctx, configDigest, statePersistenceKey) - if err != nil { - return protocol.StatePersistenceState{}, err - } - - if len(raw) == 0 { - return protocol.StatePersistenceState{}, nil - } - - return serialization.DeserializeStatePersistenceState(raw) -} - -// Writing with an empty value is the same as deleting. -func (db *SerializingOCR3_1Database) WriteStatePersistenceState(ctx context.Context, configDigest types.ConfigDigest, state protocol.StatePersistenceState) error { - raw, err := serialization.SerializeStatePersistenceState(state) - if err != nil { - return err - } - - return db.BinaryDb.WriteProtocolState(ctx, configDigest, statePersistenceKey, raw) -} - -func (db *SerializingOCR3_1Database) ReadAttestedStateTransitionBlock(ctx context.Context, configDigest types.ConfigDigest, seqNr uint64) (protocol.AttestedStateTransitionBlock, error) { - raw, err := db.BinaryDb.ReadBlock(ctx, configDigest, seqNr) - if err != nil { - return protocol.AttestedStateTransitionBlock{}, err - } - - if len(raw) == 0 { - return protocol.AttestedStateTransitionBlock{}, nil - } - - astb := serialization.AttestedStateTransitionBlock{} - if err := proto.Unmarshal(raw, &astb); err != nil { - return protocol.AttestedStateTransitionBlock{}, err - } - - return serialization.DeserializeAttestedStateTransitionBlock(raw) -} - -// Writing with an empty value is the same as deleting. -func (db *SerializingOCR3_1Database) WriteAttestedStateTransitionBlock(ctx context.Context, configDigest types.ConfigDigest, seqNr uint64, astb protocol.AttestedStateTransitionBlock) error { - raw, err := serialization.SerializeAttestedStateTransitionBlock(astb) - if err != nil { - return err - } - - return db.BinaryDb.WriteBlock(ctx, configDigest, seqNr, raw) -} diff --git a/offchainreporting2plus/internal/shim/ocr3_1_key_value_store.go b/offchainreporting2plus/internal/shim/ocr3_1_key_value_store.go index b1298553..687666fb 100644 --- a/offchainreporting2plus/internal/shim/ocr3_1_key_value_store.go +++ b/offchainreporting2plus/internal/shim/ocr3_1_key_value_store.go @@ -4,26 +4,55 @@ import ( "bytes" "encoding/binary" "fmt" + "math" + "slices" "sort" "sync" + "time" + "github.com/prometheus/client_golang/prometheus" + "github.com/smartcontractkit/libocr/commontypes" + "github.com/smartcontractkit/libocr/internal/jmt" + "github.com/smartcontractkit/libocr/internal/singlewriter" "github.com/smartcontractkit/libocr/internal/util" + "github.com/smartcontractkit/libocr/offchainreporting2plus/internal/ocr3_1/blobtypes" "github.com/smartcontractkit/libocr/offchainreporting2plus/internal/ocr3_1/protocol" + "github.com/smartcontractkit/libocr/offchainreporting2plus/internal/ocr3_1/serialization" "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3_1types" ) -type SemanticOCR3_1KeyValueStore struct { +type SemanticOCR3_1KeyValueDatabase struct { + conflictTracker *singlewriter.ConflictTracker KeyValueDatabase ocr3_1types.KeyValueDatabase Limits ocr3_1types.ReportingPluginLimits + logger commontypes.Logger + metrics *keyValueMetrics } -var _ protocol.KeyValueStore = &SemanticOCR3_1KeyValueStore{} +var _ protocol.KeyValueDatabase = &SemanticOCR3_1KeyValueDatabase{} + +func NewSemanticOCR3_1KeyValueDatabase( + keyValueDatabase ocr3_1types.KeyValueDatabase, + limits ocr3_1types.ReportingPluginLimits, + logger commontypes.Logger, + metricsRegisterer prometheus.Registerer, +) *SemanticOCR3_1KeyValueDatabase { + return &SemanticOCR3_1KeyValueDatabase{ + singlewriter.NewConflictTracker(), + keyValueDatabase, + limits, + logger, + newKeyValueMetrics(metricsRegisterer, logger), + } +} -func (s *SemanticOCR3_1KeyValueStore) Close() error { - return s.KeyValueDatabase.Close() +func (s *SemanticOCR3_1KeyValueDatabase) Close() error { + err := s.KeyValueDatabase.Close() + s.metrics.Close() + return err } -func (s *SemanticOCR3_1KeyValueStore) HighestCommittedSeqNr() (uint64, error) { +func (s *SemanticOCR3_1KeyValueDatabase) HighestCommittedSeqNr() (uint64, error) { tx, err := s.NewReadTransactionUnchecked() if err != nil { return 0, fmt.Errorf("failed to create read transaction: %w", err) @@ -32,11 +61,20 @@ func (s *SemanticOCR3_1KeyValueStore) HighestCommittedSeqNr() (uint64, error) { return tx.ReadHighestCommittedSeqNr() } -func (s *SemanticOCR3_1KeyValueStore) NewReadWriteTransaction(postSeqNr uint64) (protocol.KeyValueStoreReadWriteTransaction, error) { - tx, err := s.NewReadWriteTransactionUnchecked() +func (s *SemanticOCR3_1KeyValueDatabase) NewSerializedReadWriteTransaction(postSeqNr uint64) (protocol.KeyValueDatabaseReadWriteTransaction, error) { + fakeTx, err := singlewriter.NewSerializedTransaction(s.KeyValueDatabase, s.conflictTracker) if err != nil { return nil, fmt.Errorf("failed to create read write transaction: %w", err) } + tx := &SemanticOCR3_1KeyValueDatabaseReadWriteTransaction{ + &SemanticOCR3_1KeyValueDatabaseReadTransaction{fakeTx, s.Limits}, + fakeTx, + s.metrics, + sync.Mutex{}, + newLimitCheckWriteSet(s.Limits.MaxKeyValueModifiedKeysPlusValuesLength), + &postSeqNr, + false, + } highestCommittedSeqNr, err := tx.ReadHighestCommittedSeqNr() if err != nil { tx.Discard() @@ -46,7 +84,11 @@ func (s *SemanticOCR3_1KeyValueStore) NewReadWriteTransaction(postSeqNr uint64) tx.Discard() return nil, fmt.Errorf("post seq nr %d must be equal to highest committed seq nr + 1 (%d)", postSeqNr, highestCommittedSeqNr+1) } - return &SemanticOCR3_1KeyValueStoreReadWriteTransactionWithPreCommitHook{ + if err := checkNotClobbered(tx); err != nil { + tx.Discard() + return nil, err + } + return &SemanticOCR3_1KeyValueDatabaseReadWriteTransactionWithPreCommitHook{ tx, func() error { if err := tx.WriteHighestCommittedSeqNr(postSeqNr); err != nil { @@ -57,20 +99,40 @@ func (s *SemanticOCR3_1KeyValueStore) NewReadWriteTransaction(postSeqNr uint64) }, nil } -func (s *SemanticOCR3_1KeyValueStore) NewReadWriteTransactionUnchecked() (protocol.KeyValueStoreReadWriteTransaction, error) { - tx, err := s.KeyValueDatabase.NewReadWriteTransaction() +func (s *SemanticOCR3_1KeyValueDatabase) NewSerializedReadWriteTransactionUnchecked() (protocol.KeyValueDatabaseReadWriteTransaction, error) { + fakeTx, err := singlewriter.NewSerializedTransaction(s.KeyValueDatabase, s.conflictTracker) if err != nil { return nil, fmt.Errorf("failed to create read write transaction: %w", err) } - return &SemanticOCR3_1KeyValueStoreReadWriteTransaction{ - &SemanticOCR3_1KeyValueStoreReadTransaction{tx, s.Limits}, - tx, + tx := &SemanticOCR3_1KeyValueDatabaseReadWriteTransaction{ + &SemanticOCR3_1KeyValueDatabaseReadTransaction{fakeTx, s.Limits}, + fakeTx, + s.metrics, + sync.Mutex{}, + newLimitCheckWriteSet(s.Limits.MaxKeyValueModifiedKeysPlusValuesLength), + nil, + false, + } + return tx, nil +} + +func (s *SemanticOCR3_1KeyValueDatabase) NewUnserializedReadWriteTransactionUnchecked() (protocol.KeyValueDatabaseReadWriteTransaction, error) { + fakeTx, err := singlewriter.NewUnserializedTransaction(s.KeyValueDatabase) + if err != nil { + return nil, fmt.Errorf("failed to create read write transaction: %w", err) + } + return &SemanticOCR3_1KeyValueDatabaseReadWriteTransaction{ + &SemanticOCR3_1KeyValueDatabaseReadTransaction{fakeTx, s.Limits}, + fakeTx, + s.metrics, sync.Mutex{}, newLimitCheckWriteSet(s.Limits.MaxKeyValueModifiedKeysPlusValuesLength), + nil, + false, }, nil } -func (s *SemanticOCR3_1KeyValueStore) NewReadTransaction(postSeqNr uint64) (protocol.KeyValueStoreReadTransaction, error) { +func (s *SemanticOCR3_1KeyValueDatabase) NewReadTransaction(postSeqNr uint64) (protocol.KeyValueDatabaseReadTransaction, error) { tx, err := s.NewReadTransactionUnchecked() if err != nil { return nil, fmt.Errorf("failed to create read transaction: %w", err) @@ -84,43 +146,64 @@ func (s *SemanticOCR3_1KeyValueStore) NewReadTransaction(postSeqNr uint64) (prot tx.Discard() return nil, fmt.Errorf("post seq nr %d must be equal to highest committed seq nr + 1 (%d)", postSeqNr, highestCommittedSeqNr+1) } + if err := checkNotClobbered(tx); err != nil { + tx.Discard() + return nil, err + } return tx, nil } -func (s *SemanticOCR3_1KeyValueStore) NewReadTransactionUnchecked() (protocol.KeyValueStoreReadTransaction, error) { +func checkNotClobbered(tx protocol.KeyValueDatabaseReadTransaction) error { + treeSyncStatus, err := tx.ReadTreeSyncStatus() + if err != nil { + return fmt.Errorf("failed to read tree sync status: %w", err) + } + if treeSyncStatus.Phase != protocol.TreeSyncPhaseInactive { + return fmt.Errorf("tree sync might be in progress") + } + return nil +} + +func (s *SemanticOCR3_1KeyValueDatabase) NewReadTransactionUnchecked() (protocol.KeyValueDatabaseReadTransaction, error) { tx, err := s.KeyValueDatabase.NewReadTransaction() if err != nil { return nil, fmt.Errorf("failed to create read transaction: %w", err) } - return &SemanticOCR3_1KeyValueStoreReadTransaction{tx, s.Limits}, nil + return &SemanticOCR3_1KeyValueDatabaseReadTransaction{tx, s.Limits}, nil } -type SemanticOCR3_1KeyValueStoreReadWriteTransaction struct { - protocol.KeyValueStoreReadTransaction // inherit all read implementations - - rawTransaction ocr3_1types.KeyValueReadWriteTransaction - - mu sync.Mutex - nilOrWriteSet *limitCheckWriteSet +type SemanticOCR3_1KeyValueDatabaseReadWriteTransaction struct { + protocol.KeyValueDatabaseReadTransaction // inherit all read implementations + rawTransaction ocr3_1types.KeyValueDatabaseReadWriteTransaction + metrics *keyValueMetrics + mu sync.Mutex + nilOrWriteSet *limitCheckWriteSet + nilOrSeqNr *uint64 + closedForWriting bool } -var _ protocol.KeyValueStoreReadWriteTransaction = &SemanticOCR3_1KeyValueStoreReadWriteTransaction{} +var _ protocol.KeyValueDatabaseReadWriteTransaction = &SemanticOCR3_1KeyValueDatabaseReadWriteTransaction{} -type SemanticOCR3_1KeyValueStoreReadWriteTransactionWithPreCommitHook struct { - protocol.KeyValueStoreReadWriteTransaction +type SemanticOCR3_1KeyValueDatabaseReadWriteTransactionWithPreCommitHook struct { + protocol.KeyValueDatabaseReadWriteTransaction preCommitHook func() error // must be idempotent } -var _ protocol.KeyValueStoreReadWriteTransaction = &SemanticOCR3_1KeyValueStoreReadWriteTransactionWithPreCommitHook{} +var _ protocol.KeyValueDatabaseReadWriteTransaction = &SemanticOCR3_1KeyValueDatabaseReadWriteTransactionWithPreCommitHook{} -func (s *SemanticOCR3_1KeyValueStoreReadWriteTransactionWithPreCommitHook) Commit() error { +func (s *SemanticOCR3_1KeyValueDatabaseReadWriteTransactionWithPreCommitHook) Commit() error { if err := s.preCommitHook(); err != nil { return fmt.Errorf("failed while executing preCommit: %w", err) } - return s.KeyValueStoreReadWriteTransaction.Commit() + return s.KeyValueDatabaseReadWriteTransaction.Commit() } -func (s *SemanticOCR3_1KeyValueStoreReadWriteTransaction) Commit() error { +func (s *SemanticOCR3_1KeyValueDatabaseReadWriteTransaction) Commit() error { + start := time.Now() + defer func() { + s.metrics.txCommitDurationNanoseconds.Observe(float64(time.Since(start).Nanoseconds())) + }() + err := s.rawTransaction.Commit() // Transactions might persistently fail to commit, due to another txn having // gone in before that causes a conflict, so we need to discard in any case @@ -129,12 +212,16 @@ func (s *SemanticOCR3_1KeyValueStoreReadWriteTransaction) Commit() error { return err } -func (s *SemanticOCR3_1KeyValueStoreReadWriteTransaction) Delete(key []byte) error { +func (s *SemanticOCR3_1KeyValueDatabaseReadWriteTransaction) Delete(key []byte) error { if !(len(key) <= ocr3_1types.MaxMaxKeyValueKeyLength) { return fmt.Errorf("key length %d exceeds maximum %d", len(key), ocr3_1types.MaxMaxKeyValueKeyLength) } s.mu.Lock() + if s.closedForWriting { + s.mu.Unlock() + return fmt.Errorf("transaction has been closed for writing") + } if s.nilOrWriteSet == nil { s.mu.Unlock() return fmt.Errorf("transaction has been discarded") @@ -145,11 +232,91 @@ func (s *SemanticOCR3_1KeyValueStoreReadWriteTransaction) Delete(key []byte) err return fmt.Errorf("failed to delete key %s from write set: %w", key, err) } s.mu.Unlock() + return s.rawTransaction.Delete(pluginPrefixedUnhashedKey(key)) +} + +func (s *SemanticOCR3_1KeyValueDatabaseReadWriteTransaction) deletePrefixedKeys(prefix []byte, except [][]byte, n int) (done bool, err error) { + // We cannot delete the keys while iterating them, if we want to be agnostic + // to kvdb implementation semantics. + var keysToDelete [][]byte - return s.rawTransaction.Delete(pluginPrefixedKey(key)) + it := s.rawTransaction.Range(prefix, nil) + for it.Next() && len(keysToDelete) < n+1 { + if !bytes.HasPrefix(it.Key(), prefix) { + break + } + matchAnyException := false + for _, e := range except { + if bytes.Equal(it.Key(), e) { + matchAnyException = true + break + } + } + if matchAnyException { + continue + } + keysToDelete = append(keysToDelete, it.Key()) + } + if err := it.Err(); err != nil { + it.Close() + return false, fmt.Errorf("failed to range: %w", err) + } + it.Close() + + for _, key := range keysToDelete { + if err := s.rawTransaction.Delete(key); err != nil { + return false, fmt.Errorf("failed to delete key %s: %w", key, err) + } + } + + return len(keysToDelete) <= n, nil +} + +// Caller must ensure to make committed state inaccessible to other transactions +// until completed. Must be reinvoked until done=true. +func (s *SemanticOCR3_1KeyValueDatabaseReadWriteTransaction) DestructiveDestroyForTreeSync(n int) (done bool, err error) { + return s.deletePrefixedKeys([]byte{}, [][]byte{ + []byte(highestCommittedSeqNrKey), + []byte(treeSyncStatusKey), + }, n) +} + +// Helper for reaping methods that require large ranges over multiple transactions +func partialExclusiveRangeKeys(readTransaction ocr3_1types.KeyValueDatabaseReadTransaction, loKey []byte, hiKeyExcl []byte, maxItems int) (keys [][]byte, more bool, err error) { + it := readTransaction.Range(loKey, hiKeyExcl) + defer it.Close() + + for it.Next() { + if len(keys) == maxItems { + more = true + break + } + keys = append(keys, it.Key()) + } + if err := it.Err(); err != nil { + return nil, false, fmt.Errorf("failed to range: %w", err) + } + return keys, more, nil +} + +func partialInclusiveRangeKeys(readTransaction ocr3_1types.KeyValueDatabaseReadTransaction, loKey []byte, hiKeyIncl []byte, maxItems int) (keys [][]byte, more bool, err error) { + hiKeyExcl := append(bytes.Clone(hiKeyIncl), 0) + return partialExclusiveRangeKeys(readTransaction, loKey, hiKeyExcl, maxItems) +} + +func (s *SemanticOCR3_1KeyValueDatabaseReadWriteTransaction) partialExclusiveRangeKeys(loKey []byte, hiKeyExcl []byte, maxItems int) (keys [][]byte, more bool, err error) { + return partialExclusiveRangeKeys(s.rawTransaction, loKey, hiKeyExcl, maxItems) +} + +func (s *SemanticOCR3_1KeyValueDatabaseReadWriteTransaction) partialInclusiveRangeKeys(loKey []byte, hiKeyIncl []byte, maxItems int) (keys [][]byte, more bool, err error) { + return partialInclusiveRangeKeys(s.rawTransaction, loKey, hiKeyIncl, maxItems) } -func (s *SemanticOCR3_1KeyValueStoreReadWriteTransaction) Discard() { +func (s *SemanticOCR3_1KeyValueDatabaseReadTransaction) partialInclusiveRangeKeys(loKey []byte, hiKeyIncl []byte, maxItems int) (keys [][]byte, more bool, err error) { + return partialInclusiveRangeKeys(s.rawTransaction, loKey, hiKeyIncl, maxItems) +} + +func (s *SemanticOCR3_1KeyValueDatabaseReadWriteTransaction) Discard() { s.mu.Lock() s.nilOrWriteSet = nil // tombstone s.mu.Unlock() @@ -157,11 +324,9 @@ func (s *SemanticOCR3_1KeyValueStoreReadWriteTransaction) Discard() { s.rawTransaction.Discard() } -// GetWriteSet returns a map from keys in string encoding to values that have been written in -// this transaction. If the value of a key has been deleted, it is mapped to nil. The write set -// must fit in memory. - -func (s *SemanticOCR3_1KeyValueStoreReadWriteTransaction) GetWriteSet() ([]protocol.KeyValuePair, error) { +// GetWriteSet returns sorted list of key-value pairs that have been modified as +// part of this transaction. +func (s *SemanticOCR3_1KeyValueDatabaseReadWriteTransaction) GetWriteSet() ([]protocol.KeyValuePairWithDeletions, error) { s.mu.Lock() if s.nilOrWriteSet == nil { s.mu.Unlock() @@ -176,7 +341,90 @@ func (s *SemanticOCR3_1KeyValueStoreReadWriteTransaction) GetWriteSet() ([]proto return writeSet, nil } -func (s *SemanticOCR3_1KeyValueStoreReadWriteTransaction) Write(key []byte, value []byte) error { +// CloseWriteSet updates the state tree according to the write set and returns +// the root. After this function is invoked the transaction for writing: any +// future attempts for Writes or Deletes on this transaction will fail. +func (s *SemanticOCR3_1KeyValueDatabaseReadWriteTransaction) CloseWriteSet() (protocol.StateRootDigest, error) { + start := time.Now() + defer func() { + s.metrics.closeWriteSetDurationNanoseconds.Observe(float64(time.Since(start).Nanoseconds())) + }() + + s.mu.Lock() + if s.nilOrWriteSet == nil { + s.mu.Unlock() + return protocol.StateRootDigest{}, fmt.Errorf("transaction has been discarded") + } + writeSet := s.nilOrWriteSet.Pairs() + s.nilOrWriteSet = nil + s.closedForWriting = true + s.mu.Unlock() + + if s.nilOrSeqNr == nil { + return protocol.StateRootDigest{}, fmt.Errorf("transaction seqNr should not be nil") + } + + keyValueUpdates := make([]jmt.KeyValue, 0, len(writeSet)) + for _, pair := range writeSet { + var value []byte + if !pair.Deleted { + value = pair.Value + } + keyValueUpdates = append(keyValueUpdates, jmt.KeyValue{ + pair.Key, + value, + }) + } + + _, err := jmt.BatchUpdate( + s, + s, + s, + protocol.PrevRootVersion(*s.nilOrSeqNr), + protocol.RootVersion(*s.nilOrSeqNr), + keyValueUpdates, + ) + if err != nil { + return protocol.StateRootDigest{}, fmt.Errorf("failed to batch update: %w", err) + } + + stateRootDigest, err := jmt.ReadRootDigest(s, s, protocol.RootVersion(*s.nilOrSeqNr)) + if err != nil { + return protocol.StateRootDigest{}, fmt.Errorf("failed to read root digest: %w", err) + } + return stateRootDigest, nil +} + +func (s *SemanticOCR3_1KeyValueDatabaseReadWriteTransaction) ApplyWriteSet(writeSet []protocol.KeyValuePairWithDeletions) (protocol.StateRootDigest, error) { + if s.nilOrSeqNr == nil { + return protocol.StateRootDigest{}, fmt.Errorf("transaction seqNr should not be nil") + } + seqNr := *s.nilOrSeqNr + for i, m := range writeSet { + var err error + switch m.Deleted { + case false: + err = s.Write(m.Key, m.Value) + case true: + err = s.Delete(m.Key) + } + if err != nil { + return protocol.StateRootDigest{}, fmt.Errorf("failed to write %d-th write-set modification for seq nr %d: %w", i, seqNr, err) + } + } + stateRootDigest, err := s.CloseWriteSet() + if err != nil { + return protocol.StateRootDigest{}, fmt.Errorf("failed to close write set: %w", err) + } + return stateRootDigest, nil +} + +func (s *SemanticOCR3_1KeyValueDatabaseReadWriteTransaction) Write(key []byte, value []byte) error { + start := time.Now() + defer func() { + s.metrics.txWriteDurationNanoseconds.Observe(float64(time.Since(start).Nanoseconds())) + }() + if !(len(key) <= ocr3_1types.MaxMaxKeyValueKeyLength) { return fmt.Errorf("key length %d exceeds maximum %d", len(key), ocr3_1types.MaxMaxKeyValueKeyLength) } @@ -187,6 +435,10 @@ func (s *SemanticOCR3_1KeyValueStoreReadWriteTransaction) Write(key []byte, valu value = util.NilCoalesceSlice(value) s.mu.Lock() + if s.closedForWriting { + s.mu.Unlock() + return fmt.Errorf("transaction has been closed for writing") + } if s.nilOrWriteSet == nil { s.mu.Unlock() return fmt.Errorf("transaction has been discarded") @@ -197,67 +449,340 @@ func (s *SemanticOCR3_1KeyValueStoreReadWriteTransaction) Write(key []byte, valu } s.mu.Unlock() - return s.rawTransaction.Write(pluginPrefixedKey(key), value) + err := s.rawTransaction.Write(pluginPrefixedUnhashedKey(key), value) + if err != nil { + return fmt.Errorf("failed to write key %s to write set: %w", key, err) + } + return nil } -type SemanticOCR3_1KeyValueStoreReadTransaction struct { - rawTransaction ocr3_1types.KeyValueReadTransaction +type SemanticOCR3_1KeyValueDatabaseReadTransaction struct { + rawTransaction ocr3_1types.KeyValueDatabaseReadTransaction limits ocr3_1types.ReportingPluginLimits } -var _ protocol.KeyValueStoreReadTransaction = &SemanticOCR3_1KeyValueStoreReadTransaction{} +var _ protocol.KeyValueDatabaseReadTransaction = &SemanticOCR3_1KeyValueDatabaseReadTransaction{} -func (s *SemanticOCR3_1KeyValueStoreReadTransaction) Discard() { +func (s *SemanticOCR3_1KeyValueDatabaseReadTransaction) Discard() { s.rawTransaction.Discard() } -func (s *SemanticOCR3_1KeyValueStoreReadTransaction) Read(key []byte) ([]byte, error) { +func (s *SemanticOCR3_1KeyValueDatabaseReadTransaction) Read(key []byte) ([]byte, error) { if !(len(key) <= ocr3_1types.MaxMaxKeyValueKeyLength) { return nil, fmt.Errorf("key length %d exceeds maximum %d", len(key), ocr3_1types.MaxMaxKeyValueKeyLength) } - return s.rawTransaction.Read(pluginPrefixedKey(key)) + return s.rawTransaction.Read(pluginPrefixedUnhashedKey(key)) +} + +func readUint64ValueOrZero(raw []byte) (uint64, error) { + if raw == nil { + return 0, nil + } + if len(raw) != 8 { + return 0, fmt.Errorf("expected 8 bytes for seqNr, got %d", len(raw)) + } + return binary.BigEndian.Uint64(raw), nil } -func (s *SemanticOCR3_1KeyValueStoreReadTransaction) ReadHighestCommittedSeqNr() (uint64, error) { - seqNrRaw, err := s.rawTransaction.Read(highestCommittedSeqNrKey()) +func (s *SemanticOCR3_1KeyValueDatabaseReadTransaction) ReadHighestCommittedSeqNr() (uint64, error) { + seqNrRaw, err := s.rawTransaction.Read([]byte(highestCommittedSeqNrKey)) if err != nil { return 0, err } - if seqNrRaw == nil { // indicates that we are starting from scratch - return 0, nil + return readUint64ValueOrZero(seqNrRaw) +} + +func (s *SemanticOCR3_1KeyValueDatabaseReadTransaction) ReadLowestPersistedSeqNr() (uint64, error) { + seqNrRaw, err := s.rawTransaction.Read([]byte(lowestPersistedSeqNrKey)) + if err != nil { + return 0, err + } + return readUint64ValueOrZero(seqNrRaw) +} + +func (s *SemanticOCR3_1KeyValueDatabaseReadTransaction) ReadAttestedStateTransitionBlock(seqNr uint64) (protocol.AttestedStateTransitionBlock, error) { + blockRaw, err := s.rawTransaction.Read(blockKey(seqNr)) + if err != nil { + return protocol.AttestedStateTransitionBlock{}, err } - if len(seqNrRaw) != 8 { - return 0, fmt.Errorf("expected 8 bytes for seqNr, got %d", len(seqNrRaw)) + if blockRaw == nil { + return protocol.AttestedStateTransitionBlock{}, nil } - return binary.BigEndian.Uint64(seqNrRaw), nil + block, err := serialization.DeserializeAttestedStateTransitionBlock(blockRaw) + if err != nil { + return protocol.AttestedStateTransitionBlock{}, fmt.Errorf("failed to deserialize attested state transition block %d: %w", seqNr, err) + } + return block, nil } -func (s *SemanticOCR3_1KeyValueStoreReadWriteTransaction) WriteHighestCommittedSeqNr(seqNr uint64) error { - return s.rawTransaction.Write(highestCommittedSeqNrKey(), binary.BigEndian.AppendUint64(nil, seqNr)) +func (s *SemanticOCR3_1KeyValueDatabaseReadTransaction) ReadAttestedStateTransitionBlocks(minSeqNr uint64, maxItems int) (blocks []protocol.AttestedStateTransitionBlock, more bool, err error) { + blockKeys, more, err := s.partialInclusiveRangeKeys(blockKey(minSeqNr), blockKey(math.MaxUint64), maxItems) + if err != nil { + return nil, false, fmt.Errorf("failed to range: %w", err) + } + + for _, blockKey := range blockKeys { + seqNr, err := deserializeBlockKey(blockKey) + if err != nil { + return nil, false, fmt.Errorf("failed to deserialize block key: %w", err) + } + block, err := s.ReadAttestedStateTransitionBlock(seqNr) + if err != nil { + return nil, false, fmt.Errorf("failed to read attested state transition block %d: %w", seqNr, err) + } + blocks = append(blocks, block) + } + return blocks, more, nil } -func (s *SemanticOCR3_1KeyValueStoreReadTransaction) ReadBlob(blobDigest protocol.BlobDigest) ([]byte, error) { - var blob []byte +func (s *SemanticOCR3_1KeyValueDatabaseReadWriteTransaction) WriteAttestedStateTransitionBlock(seqNr uint64, block protocol.AttestedStateTransitionBlock) error { + blockBytes, err := serialization.SerializeAttestedStateTransitionBlock(block) + if err != nil { + return fmt.Errorf("failed to serialize attested state transition block %d: %w", seqNr, err) + } + return s.rawTransaction.Write(blockKey(seqNr), blockBytes) +} - length, err := s.ReadBlobMeta(blobDigest) +func (s *SemanticOCR3_1KeyValueDatabaseReadWriteTransaction) DeleteAttestedStateTransitionBlocks(maxSeqNrToDelete uint64, maxItems int) (done bool, err error) { + keys, more, err := s.partialInclusiveRangeKeys(blockKey(0), blockKey(maxSeqNrToDelete), maxItems) if err != nil { - return nil, fmt.Errorf("error reading blob meta for %s: %w", blobDigest, err) + return false, fmt.Errorf("failed to range: %w", err) + } + for _, key := range keys { + if err := s.rawTransaction.Delete(key); err != nil { + return false, fmt.Errorf("failed to delete key %s: %w", key, err) + } + } + return !more, nil +} + +func (s *SemanticOCR3_1KeyValueDatabaseReadTransaction) ReadTreeSyncStatus() (protocol.TreeSyncStatus, error) { + statusRaw, err := s.rawTransaction.Read([]byte(treeSyncStatusKey)) + if err != nil { + return protocol.TreeSyncStatus{}, err + } + if statusRaw == nil { + return protocol.TreeSyncStatus{}, nil + } + return serialization.DeserializeTreeSyncStatus(statusRaw) +} + +func (s *SemanticOCR3_1KeyValueDatabaseReadTransaction) ReadTreeSyncChunk( + toSeqNr uint64, + startIndex jmt.Digest, + requestEndInclIndex jmt.Digest, +) ( + endInclIndex jmt.Digest, + boundingLeaves []jmt.BoundingLeaf, + keyValues []protocol.KeyValuePair, + err error, +) { + if !(0 < toSeqNr) { + return jmt.Digest{}, nil, nil, fmt.Errorf("toSeqNr (%d) must be > 0", toSeqNr) + } + + highestCommittedSeqNr, err := s.ReadHighestCommittedSeqNr() + if err != nil { + return jmt.Digest{}, nil, nil, fmt.Errorf("failed to read highest committed seq nr") + } + + lowestPersistedSeqNr, err := s.ReadLowestPersistedSeqNr() + if err != nil { + return jmt.Digest{}, nil, nil, fmt.Errorf("failed to read lowest persisted seq nr") + } + + if !(lowestPersistedSeqNr <= toSeqNr && toSeqNr <= highestCommittedSeqNr) { + return jmt.Digest{}, nil, nil, fmt.Errorf("toSeqNr (%d) must be >= lowest persisted seq nr (%d) and <= highest committed seq nr (%d)", toSeqNr, lowestPersistedSeqNr, highestCommittedSeqNr) + } + + keyValues, truncated, err := jmt.ReadRange( + s, + s, + protocol.RootVersion(toSeqNr), + startIndex, + requestEndInclIndex, + protocol.MaxTreeSyncChunkKeysPlusValuesLength, + protocol.MaxTreeSyncChunkKeys, + ) + if err != nil { + return jmt.Digest{}, nil, nil, fmt.Errorf("failed to read range: %w", err) + } + + if truncated { + if len(keyValues) == 0 { + return jmt.Digest{}, nil, nil, fmt.Errorf("read range could not even fit a single kv pair in required limits, the limits are probably wrong") + } + endInclIndex = jmt.DigestKey(keyValues[len(keyValues)-1].Key) + } else { + endInclIndex = requestEndInclIndex + } + + boundingLeaves, err = jmt.ProveSubrange( + s, + s, + protocol.RootVersion(toSeqNr), + startIndex, + endInclIndex, + ) + if err != nil { + return jmt.Digest{}, nil, nil, fmt.Errorf("failed to prove range: %w", err) } - if length == 0 { + return +} + +func (s *SemanticOCR3_1KeyValueDatabaseReadWriteTransaction) WriteHighestCommittedSeqNr(seqNr uint64) error { + preHighestCommittedSeqNr, err := s.ReadHighestCommittedSeqNr() + if err != nil { + return fmt.Errorf("failed to read highest committed seq nr: %w", err) + } + if preHighestCommittedSeqNr > seqNr { + return fmt.Errorf("pre highest committed seq nr %d must be <= highest committed seq nr %d", preHighestCommittedSeqNr, seqNr) + } + return s.rawTransaction.Write([]byte(highestCommittedSeqNrKey), encodeBigEndianUint64(seqNr)) +} + +func (s *SemanticOCR3_1KeyValueDatabaseReadWriteTransaction) WriteLowestPersistedSeqNr(seqNr uint64) error { + preLowestPersistedSeqNr, err := s.ReadLowestPersistedSeqNr() + if err != nil { + return fmt.Errorf("failed to read lowest persisted seq nr: %w", err) + } + if seqNr < preLowestPersistedSeqNr { + return fmt.Errorf("pre lowest persisted seq nr %d must be <= lowest persisted seq nr %d", preLowestPersistedSeqNr, seqNr) + } + return s.rawTransaction.Write([]byte(lowestPersistedSeqNrKey), encodeBigEndianUint64(seqNr)) +} + +func (s *SemanticOCR3_1KeyValueDatabaseReadWriteTransaction) WriteTreeSyncStatus(status protocol.TreeSyncStatus) error { + rawStatus, err := serialization.SerializeTreeSyncStatus(status) + if err != nil { + return err + } + return s.rawTransaction.Write([]byte(treeSyncStatusKey), rawStatus) +} + +func (s *SemanticOCR3_1KeyValueDatabaseReadWriteTransaction) VerifyAndWriteTreeSyncChunk( + targetRootDigest protocol.StateRootDigest, + targetSeqNr uint64, + startIndex jmt.Digest, + endInclIndex jmt.Digest, + boundingLeaves []jmt.BoundingLeaf, + keyValues []protocol.KeyValuePair, +) (protocol.VerifyAndWriteTreeSyncChunkResult, error) { + if len(keyValues) > protocol.MaxTreeSyncChunkKeys { + return protocol.VerifyAndWriteTreeSyncChunkResultByzantine, fmt.Errorf("too many leaves: %d > %d", + len(keyValues), protocol.MaxTreeSyncChunkKeys) + } + var byteBudget int + for _, kv := range keyValues { + byteBudget += len(kv.Key) + len(kv.Value) + } + if byteBudget > protocol.MaxTreeSyncChunkKeysPlusValuesLength { + return protocol.VerifyAndWriteTreeSyncChunkResultByzantine, fmt.Errorf("chunk exceeds byte limit: %d > %d", + byteBudget, protocol.MaxTreeSyncChunkKeysPlusValuesLength) + } + + prevIdx := startIndex + for i, kv := range keyValues { + if kv.Value == nil { + return protocol.VerifyAndWriteTreeSyncChunkResultByzantine, fmt.Errorf("leaf %v has nil value", kv) + } + idx := hashPluginKey(kv.Key) + if bytes.Compare(idx[:], startIndex[:]) < 0 { + return protocol.VerifyAndWriteTreeSyncChunkResultByzantine, fmt.Errorf("index of leaf %d out of chunk range, want index >= startIndex:%x got index:%x", i, startIndex, idx) + } + if bytes.Compare(idx[:], endInclIndex[:]) > 0 { + return protocol.VerifyAndWriteTreeSyncChunkResultByzantine, fmt.Errorf("index of leaf %d out of chunk range, want index <= endInclIndex:%x got index:%x", i, endInclIndex, idx) + } + if i > 0 && bytes.Compare(idx[:], prevIdx[:]) <= 0 { + return protocol.VerifyAndWriteTreeSyncChunkResultByzantine, fmt.Errorf("leaves not strictly ascending") + } + prevIdx = idx + } + + // verify subrange proof + { + err := jmt.VerifySubrange( + targetRootDigest, + startIndex, + endInclIndex, + keyValues, + boundingLeaves, + ) + if err != nil { + return protocol.VerifyAndWriteTreeSyncChunkResultByzantine, fmt.Errorf("invalid subrange proof: %w", err) + } + } + + // apply the updates as indicated by the leaves + { + _, err := jmt.BatchUpdate( + s, + s, + s, + protocol.RootVersion(targetSeqNr), + protocol.RootVersion(targetSeqNr), + keyValues, + ) + if err != nil { + return protocol.VerifyAndWriteTreeSyncChunkResultUnrelatedError, fmt.Errorf("failed to batch update: %w", err) + } + } + + // write flat representation + + for _, kv := range keyValues { + err := s.rawTransaction.Write(pluginPrefixedUnhashedKey(kv.Key), kv.Value) + if err != nil { + return protocol.VerifyAndWriteTreeSyncChunkResultUnrelatedError, fmt.Errorf("could not write the key-value pair to store: %w", err) + } + } + + rootDigest, err := jmt.ReadRootDigest( + s, + s, + protocol.RootVersion(targetSeqNr), + ) + if err != nil { + return protocol.VerifyAndWriteTreeSyncChunkResultUnrelatedError, fmt.Errorf("failed to read root digest: %w", err) + } + + if rootDigest == targetRootDigest { + return protocol.VerifyAndWriteTreeSyncChunkResultOkComplete, nil + } + return protocol.VerifyAndWriteTreeSyncChunkResultOkNeedMore, nil +} + +func (s *SemanticOCR3_1KeyValueDatabaseReadTransaction) ReadBlobPayload(blobDigest protocol.BlobDigest) ([]byte, error) { + blobMeta, err := s.ReadBlobMeta(blobDigest) + if err != nil { + return nil, fmt.Errorf("error reading blob meta for %s: %w", blobDigest, err) + } + if blobMeta == nil { return nil, nil } + if slices.Contains(blobMeta.ChunksHave, false) { + return nil, fmt.Errorf("blob has missing chunks") + } + + highestCommittedSeqNr, err := s.ReadHighestCommittedSeqNr() + if err != nil { + return nil, fmt.Errorf("error reading highest committed seq nr: %w", err) + } + if blobMeta.ExpirySeqNr < highestCommittedSeqNr { + return nil, fmt.Errorf("blob has expired") + } it := s.rawTransaction.Range(blobChunkPrefixedKey(blobDigest), nil) defer it.Close() - residualLength := length + residualLength := blobMeta.PayloadLength + payload := make([]byte, 0, residualLength) for i := uint64(0); residualLength > 0 && it.Next(); i++ { key := it.Key() if !bytes.Equal(key, blobChunkKey(blobDigest, i)) { - // gap in keys, we're missing a chunk - return nil, nil + return nil, fmt.Errorf("unexpected key for %v-th chunk: %x", i, key) } value, err := it.Value() @@ -268,12 +793,11 @@ func (s *SemanticOCR3_1KeyValueStoreReadTransaction) ReadBlob(blobDigest protoco expectedChunkSize := min(protocol.BlobChunkSize, residualLength) actualChunkSize := uint64(len(value)) if actualChunkSize != expectedChunkSize { - // we don't have the full blob yet - return nil, nil + return nil, fmt.Errorf("actual chunk size %v != expected chunk size %v", actualChunkSize, expectedChunkSize) } residualLength -= actualChunkSize - blob = append(blob, value...) + payload = append(payload, value...) } err = it.Err() @@ -282,81 +806,291 @@ func (s *SemanticOCR3_1KeyValueStoreReadTransaction) ReadBlob(blobDigest protoco } if residualLength != 0 { - // we somehow don't have the full blob yet, defensive - return nil, nil + return nil, fmt.Errorf("residual length %v != 0 even though we have all chunks", residualLength) } - return blob, nil + return payload, nil } -func (s *SemanticOCR3_1KeyValueStoreReadTransaction) ReadBlobChunk(blobDigest protocol.BlobDigest, chunkIndex uint64) ([]byte, error) { +func (s *SemanticOCR3_1KeyValueDatabaseReadTransaction) ReadBlobChunk(blobDigest protocol.BlobDigest, chunkIndex uint64) ([]byte, error) { return s.rawTransaction.Read(blobChunkKey(blobDigest, chunkIndex)) } -func (s *SemanticOCR3_1KeyValueStoreReadWriteTransaction) WriteBlobChunk(blobDigest protocol.BlobDigest, chunkIndex uint64, chunk []byte) error { +func (s *SemanticOCR3_1KeyValueDatabaseReadTransaction) ReadNode(nodeKey jmt.NodeKey) (jmt.Node, error) { + rawNode, err := s.rawTransaction.Read(treePrefixedKey(nodeKey)) + if err != nil { + return nil, fmt.Errorf("failed to read jmt node: %w", err) + } + if rawNode == nil { + + return nil, nil + } + return serialization.DeserializeJmtNode(rawNode) +} + +func (s *SemanticOCR3_1KeyValueDatabaseReadTransaction) ReadRoot(version jmt.Version) (jmt.NodeKey, error) { + rawNodeKey, err := s.rawTransaction.Read(rootKey(version)) + if err != nil { + return jmt.NodeKey{}, fmt.Errorf("failed to read jmt root: %w", err) + } + if rawNodeKey == nil { + return jmt.NodeKey{}, nil + } + return serialization.DeserializeNodeKey(rawNodeKey) +} + +func (s *SemanticOCR3_1KeyValueDatabaseReadWriteTransaction) WriteNode(nodeKey jmt.NodeKey, nodeOrNil jmt.Node) error { + if nodeOrNil == nil { + return s.rawTransaction.Delete(treePrefixedKey(nodeKey)) + } + + rawNode, err := serialization.SerializeJmtNode(nodeOrNil) + if err != nil { + return fmt.Errorf("failed to serialize jmt node: %w", err) + } + return s.rawTransaction.Write(treePrefixedKey(nodeKey), rawNode) +} + +func (s *SemanticOCR3_1KeyValueDatabaseReadWriteTransaction) WriteRoot(version jmt.Version, nodeKey jmt.NodeKey) error { + return s.rawTransaction.Write(rootKey(version), serialization.AppendSerializeNodeKey(nil, nodeKey)) +} + +func (s *SemanticOCR3_1KeyValueDatabaseReadWriteTransaction) DeleteRoots(minVersionToKeep jmt.Version, maxItems int) (done bool, err error) { + keys, more, err := s.partialExclusiveRangeKeys(rootKey(0), rootKey(minVersionToKeep), maxItems) + if err != nil { + return false, fmt.Errorf("failed to range: %w", err) + } + for _, key := range keys { + if err := s.rawTransaction.Delete(key); err != nil { + return false, fmt.Errorf("failed to delete key %s: %w", key, err) + } + } + return !more, nil +} + +func (s *SemanticOCR3_1KeyValueDatabaseReadWriteTransaction) WriteStaleNode(staleNode jmt.StaleNode) error { + return s.rawTransaction.Write(stalePrefixedKey(staleNode), nil) +} + +func (s *SemanticOCR3_1KeyValueDatabaseReadWriteTransaction) DeleteStaleNodes(maxStaleSinceVersion jmt.Version, maxItems int) (done bool, err error) { + staleIndexNodeKeys, more, err := s.partialInclusiveRangeKeys(staleKeyWithStaleSinceVersionBase(0), staleKeyWithStaleSinceVersionBase(maxStaleSinceVersion), maxItems) + if err != nil { + return false, fmt.Errorf("failed to range: %w", err) + } + + for _, staleIndexNodeKey := range staleIndexNodeKeys { + staleNode, err := deserializeStalePrefixedKey(staleIndexNodeKey) + if err != nil { + return false, fmt.Errorf("failed to deserialize stale node: %w", err) + } + + err = s.WriteNode(staleNode.NodeKey, nil) + if err != nil { + return false, fmt.Errorf("error writing node %v: %w", staleNode.NodeKey, err) + } + err = s.deleteStaleNode(staleNode) + if err != nil { + return false, fmt.Errorf("error deleting stale node %v: %w", staleNode.NodeKey, err) + } + } + return !more, nil +} + +func (s *SemanticOCR3_1KeyValueDatabaseReadWriteTransaction) deleteStaleNode(staleNode jmt.StaleNode) error { + return s.rawTransaction.Delete(stalePrefixedKey(staleNode)) +} + +func (s *SemanticOCR3_1KeyValueDatabaseReadWriteTransaction) WriteBlobChunk(blobDigest protocol.BlobDigest, chunkIndex uint64, chunk []byte) error { return s.rawTransaction.Write(blobChunkKey(blobDigest, chunkIndex), chunk) } -func (s *SemanticOCR3_1KeyValueStoreReadTransaction) ReadBlobMeta(blobDigest protocol.BlobDigest) (uint64, error) { - lengthBytes, err := s.rawTransaction.Read(blobMetaPrefixKey(blobDigest)) +func (s *SemanticOCR3_1KeyValueDatabaseReadWriteTransaction) DeleteBlobChunk(blobDigest protocol.BlobDigest, chunkIndex uint64) error { + return s.rawTransaction.Delete(blobChunkKey(blobDigest, chunkIndex)) +} + +func (s *SemanticOCR3_1KeyValueDatabaseReadTransaction) ReadBlobMeta(blobDigest protocol.BlobDigest) (*protocol.BlobMeta, error) { + metaBytes, err := s.rawTransaction.Read(blobMetaPrefixKey(blobDigest)) if err != nil { - return 0, fmt.Errorf("error reading blob meta for %s: %w", blobDigest, err) + return nil, fmt.Errorf("error reading blob meta for %s: %w", blobDigest, err) } - if lengthBytes == nil { + if metaBytes == nil { // no record of the blob at all - return 0, nil + return nil, nil } - if len(lengthBytes) != 8 { - return 0, fmt.Errorf("expected 8 bytes for blob meta length, got %d", len(lengthBytes)) + + blobMeta, err := serialization.DeserializeBlobMeta(metaBytes) + if err != nil { + return nil, fmt.Errorf("error unmarshaling blob meta for %s: %w", blobDigest, err) } - return binary.BigEndian.Uint64(lengthBytes), nil + return &blobMeta, nil } -func (s *SemanticOCR3_1KeyValueStoreReadWriteTransaction) WriteBlobMeta(blobDigest protocol.BlobDigest, length uint64) error { - if length == 0 { - return fmt.Errorf("cannot write blob meta with length 0 for blob %s", blobDigest) +func (s *SemanticOCR3_1KeyValueDatabaseReadWriteTransaction) WriteBlobMeta(blobDigest protocol.BlobDigest, blobMeta protocol.BlobMeta) error { + metaBytes, err := serialization.SerializeBlobMeta(blobMeta) + if err != nil { + return fmt.Errorf("error marshaling blob meta for %s: %w", blobDigest, err) } - lengthBytes := binary.BigEndian.AppendUint64(nil, length) - return s.rawTransaction.Write(blobMetaPrefixKey(blobDigest), lengthBytes) + return s.rawTransaction.Write(blobMetaPrefixKey(blobDigest), metaBytes) } -const ( - protocolPrefix = byte(0) - pluginPrefix = byte(1) +func (s *SemanticOCR3_1KeyValueDatabaseReadWriteTransaction) DeleteBlobMeta(blobDigest protocol.BlobDigest) error { + return s.rawTransaction.Delete(blobMetaPrefixKey(blobDigest)) +} + +func (s *SemanticOCR3_1KeyValueDatabaseReadTransaction) ReadStaleBlobIndex(maxStaleSinceSeqNr uint64, limit int) ([]protocol.StaleBlob, error) { + it := s.rawTransaction.Range(staleBlobIndexPrefixKey(protocol.StaleBlob{0, blobtypes.BlobDigest{}}), staleBlobIndexPrefixKey(protocol.StaleBlob{maxStaleSinceSeqNr + 1, blobtypes.BlobDigest{}})) + defer it.Close() + + var staleBlobs []protocol.StaleBlob + + for i := 0; i < limit && it.Next(); i++ { + key := it.Key() + staleBlob, err := deserializeStaleBlobIndexKey(key) + if err != nil { + return nil, fmt.Errorf("failed to deserialize stale blob index key: %w", err) + } + staleBlobs = append(staleBlobs, staleBlob) + } - blobChunkSuffix = "blob chunk" - blobMetaSuffix = "blob meta" + if err := it.Err(); err != nil { + return nil, fmt.Errorf("error iterating over stale blob index: %w", err) + } + + return staleBlobs, nil +} + +func (s *SemanticOCR3_1KeyValueDatabaseReadWriteTransaction) WriteStaleBlobIndex(staleBlob protocol.StaleBlob) error { + return s.rawTransaction.Write(staleBlobIndexPrefixKey(staleBlob), []byte{}) +} - highestCommittedSeqNrKeySuffix = "highestCommittedSeqNo" +func (s *SemanticOCR3_1KeyValueDatabaseReadWriteTransaction) DeleteStaleBlobIndex(staleBlob protocol.StaleBlob) error { + return s.rawTransaction.Delete(staleBlobIndexPrefixKey(staleBlob)) +} + +const ( + blockPrefix = "B|" + pluginPrefix = "P|" + blobChunkPrefix = "BC|" + blobMetaPrefix = "BM|" + staleBlobIndexPrefix = "BI|" + treeNodePrefix = "TN|" + treeRootPrefix = "TR|" + treeStaleNodePrefix = "TSN|" + + treeSyncStatusKey = "TSS" + highestCommittedSeqNrKey = "HCS" + lowestPersistedSeqNrKey = "LPS" ) -func highestCommittedSeqNrKey() []byte { - return protocolPrefixedKey([]byte(highestCommittedSeqNrKeySuffix)) +func encodeBigEndianUint64(n uint64) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, n) + return b +} + +func hashPluginKey(key []byte) jmt.Digest { + return jmt.DigestKey(key) } -func pluginPrefixedKey(key []byte) []byte { - return prefixKey(pluginPrefix, key) +func pluginPrefixedUnhashedKey(key []byte) []byte { + pluginKey := hashPluginKey(key) + return pluginPrefixedHashedKey(pluginKey[:]) } -func protocolPrefixedKey(key []byte) []byte { - return prefixKey(protocolPrefix, key) +func pluginPrefixedHashedKey(hashedKey []byte) []byte { + return append([]byte(pluginPrefix), hashedKey[:]...) } +// ────────────────────────── blocks ─────────────────────────── + +func blockKey(seqNr uint64) []byte { + return append([]byte(blockPrefix), encodeBigEndianUint64(seqNr)...) +} + +func deserializeBlockKey(enc []byte) (uint64, error) { + if len(enc) < len(blockPrefix) { + return 0, fmt.Errorf("encoding too short") + } + enc = enc[len(blockPrefix):] + return binary.BigEndian.Uint64(enc), nil +} + +// ────────────────────────── blobs ─────────────────────────── + func blobChunkPrefixedKey(blobDigest protocol.BlobDigest) []byte { - return append(protocolPrefixedKey([]byte(blobChunkSuffix)), blobDigest[:]...) + return append([]byte(blobChunkPrefix), blobDigest[:]...) } func blobChunkKey(blobDigest protocol.BlobDigest, chunkIndex uint64) []byte { - chunkIndexBytes := binary.BigEndian.AppendUint64(nil, chunkIndex) - return append(blobChunkPrefixedKey(blobDigest), chunkIndexBytes...) + return append(blobChunkPrefixedKey(blobDigest), encodeBigEndianUint64(chunkIndex)...) } func blobMetaPrefixKey(blobDigest protocol.BlobDigest) []byte { - return append(protocolPrefixedKey([]byte(blobMetaSuffix)), blobDigest[:]...) + return append([]byte(blobMetaPrefix), blobDigest[:]...) +} + +// ───────────────────────── meta ──────────────────────────── + +func rootKey(version uint64) []byte { + return append([]byte(treeRootPrefix), encodeBigEndianUint64(version)...) +} + +// ────────────────────────── tree ─────────────────────────── + +func treePrefixedKey(nodeKey jmt.NodeKey) []byte { + base := []byte(treeNodePrefix) + return serialization.AppendSerializeNodeKey(base, nodeKey) +} + +// ────────────────────────── stale tree nodes ─────────────────────────── + +func staleKeyWithStaleSinceVersionBase(staleSinceVersion jmt.Version) []byte { + return append([]byte(treeStaleNodePrefix), encodeBigEndianUint64(staleSinceVersion)...) } -func prefixKey(prefix byte, key []byte) []byte { - return append([]byte{prefix}, key...) +func stalePrefixedKey(staleNode jmt.StaleNode) []byte { + base := staleKeyWithStaleSinceVersionBase(staleNode.StaleSinceVersion) + return serialization.AppendSerializeNodeKey(base, staleNode.NodeKey) +} + +func deserializeStalePrefixedKey(enc []byte) (jmt.StaleNode, error) { + base := []byte(treeStaleNodePrefix) + if len(enc) < len(base) { + return jmt.StaleNode{}, fmt.Errorf("encoding too short") + } + enc = enc[len(base):] + return serialization.DeserializeStaleNode(enc) +} + +// ────────────────────────── stale blobs ─────────────────────────── + +func staleBlobIndexPrefixKey(staleBlob protocol.StaleBlob) []byte { + base := []byte(staleBlobIndexPrefix) + base = binary.BigEndian.AppendUint64(base, staleBlob.StaleSinceSeqNr) + base = append(base, staleBlob.BlobDigest[:]...) + return base +} + +func deserializeStaleBlobIndexKey(enc []byte) (protocol.StaleBlob, error) { + base := []byte(staleBlobIndexPrefix) + if len(enc) < len(base) { + return protocol.StaleBlob{}, fmt.Errorf("encoding too short") + } + enc = enc[len(base):] + if len(enc) < 8 { + return protocol.StaleBlob{}, fmt.Errorf("encoding too short to contain seqnr") + } + staleSinceSeqNr := binary.BigEndian.Uint64(enc[:8]) + enc = enc[8:] + if len(enc) < len(protocol.BlobDigest{}) { + return protocol.StaleBlob{}, fmt.Errorf("encoding too short to contain blob digest") + } + blobDigest := protocol.BlobDigest(enc[:len(protocol.BlobDigest{})]) + enc = enc[len(protocol.BlobDigest{}):] + if len(enc) != 0 { + return protocol.StaleBlob{}, fmt.Errorf("encoding too long") + } + return protocol.StaleBlob{staleSinceSeqNr, blobDigest}, nil } type limitCheckWriteSet struct { @@ -406,10 +1140,10 @@ func (l *limitCheckWriteSet) Delete(key []byte) error { return l.modify(key, nil) } -func (l *limitCheckWriteSet) Pairs() []protocol.KeyValuePair { - pairs := make([]protocol.KeyValuePair, 0, len(l.m)) +func (l *limitCheckWriteSet) Pairs() []protocol.KeyValuePairWithDeletions { + pairs := make([]protocol.KeyValuePairWithDeletions, 0, len(l.m)) for k, v := range l.m { - pairs = append(pairs, protocol.KeyValuePair{ + pairs = append(pairs, protocol.KeyValuePairWithDeletions{ []byte(k), v, v == nil, diff --git a/offchainreporting2plus/internal/shim/ocr3_1_reporting_plugin.go b/offchainreporting2plus/internal/shim/ocr3_1_reporting_plugin.go index 5e1ed8eb..b8544cab 100644 --- a/offchainreporting2plus/internal/shim/ocr3_1_reporting_plugin.go +++ b/offchainreporting2plus/internal/shim/ocr3_1_reporting_plugin.go @@ -21,49 +21,49 @@ type LimitCheckOCR3_1ReportingPlugin[RI any] struct { var _ ocr3_1types.ReportingPlugin[struct{}] = LimitCheckOCR3_1ReportingPlugin[struct{}]{} -func (rp LimitCheckOCR3_1ReportingPlugin[RI]) Query(ctx context.Context, seqNr uint64, kvReader ocr3_1types.KeyValueReader, blobBroadcastFetcher ocr3_1types.BlobBroadcastFetcher) (types.Query, error) { +func (rp LimitCheckOCR3_1ReportingPlugin[RI]) Query(ctx context.Context, seqNr uint64, kvReader ocr3_1types.KeyValueStateReader, blobBroadcastFetcher ocr3_1types.BlobBroadcastFetcher) (types.Query, error) { query, err := rp.Plugin.Query(ctx, seqNr, kvReader, blobBroadcastFetcher) if err != nil { return nil, err } if !(len(query) <= rp.Limits.MaxQueryLength) { - return nil, fmt.Errorf("LimitCheckOCR3Plugin: underlying plugin returned oversize query (%v vs %v)", len(query), rp.Limits.MaxQueryLength) + return nil, fmt.Errorf("LimitCheckOCR3_1Plugin: underlying plugin returned oversize query (%v vs %v)", len(query), rp.Limits.MaxQueryLength) } return query, nil } -func (rp LimitCheckOCR3_1ReportingPlugin[RI]) ObservationQuorum(ctx context.Context, seqNr uint64, aq types.AttributedQuery, aos []types.AttributedObservation, kvReader ocr3_1types.KeyValueReader, blobFetcher ocr3_1types.BlobFetcher) (bool, error) { +func (rp LimitCheckOCR3_1ReportingPlugin[RI]) ObservationQuorum(ctx context.Context, seqNr uint64, aq types.AttributedQuery, aos []types.AttributedObservation, kvReader ocr3_1types.KeyValueStateReader, blobFetcher ocr3_1types.BlobFetcher) (bool, error) { return rp.Plugin.ObservationQuorum(ctx, seqNr, aq, aos, kvReader, blobFetcher) } -func (rp LimitCheckOCR3_1ReportingPlugin[RI]) Observation(ctx context.Context, seqNr uint64, aq types.AttributedQuery, kvReader ocr3_1types.KeyValueReader, blobBroadcastFetcher ocr3_1types.BlobBroadcastFetcher) (types.Observation, error) { +func (rp LimitCheckOCR3_1ReportingPlugin[RI]) Observation(ctx context.Context, seqNr uint64, aq types.AttributedQuery, kvReader ocr3_1types.KeyValueStateReader, blobBroadcastFetcher ocr3_1types.BlobBroadcastFetcher) (types.Observation, error) { observation, err := rp.Plugin.Observation(ctx, seqNr, aq, kvReader, blobBroadcastFetcher) if err != nil { return nil, err } if !(len(observation) <= rp.Limits.MaxObservationLength) { - return nil, fmt.Errorf("LimitCheckOCR3Plugin: underlying plugin returned oversize observation (%v vs %v)", len(observation), rp.Limits.MaxObservationLength) + return nil, fmt.Errorf("LimitCheckOCR3_1Plugin: underlying plugin returned oversize observation (%v vs %v)", len(observation), rp.Limits.MaxObservationLength) } return observation, nil } -func (rp LimitCheckOCR3_1ReportingPlugin[RI]) ValidateObservation(ctx context.Context, seqNr uint64, aq types.AttributedQuery, ao types.AttributedObservation, kvReader ocr3_1types.KeyValueReader, blobFetcher ocr3_1types.BlobFetcher) error { +func (rp LimitCheckOCR3_1ReportingPlugin[RI]) ValidateObservation(ctx context.Context, seqNr uint64, aq types.AttributedQuery, ao types.AttributedObservation, kvReader ocr3_1types.KeyValueStateReader, blobFetcher ocr3_1types.BlobFetcher) error { return rp.Plugin.ValidateObservation(ctx, seqNr, aq, ao, kvReader, blobFetcher) } -func (rp LimitCheckOCR3_1ReportingPlugin[RI]) StateTransition(ctx context.Context, seqNr uint64, aq types.AttributedQuery, aos []types.AttributedObservation, kvReadWriter ocr3_1types.KeyValueReadWriter, blobFetcher ocr3_1types.BlobFetcher) (ocr3_1types.ReportsPlusPrecursor, error) { +func (rp LimitCheckOCR3_1ReportingPlugin[RI]) StateTransition(ctx context.Context, seqNr uint64, aq types.AttributedQuery, aos []types.AttributedObservation, kvReadWriter ocr3_1types.KeyValueStateReadWriter, blobFetcher ocr3_1types.BlobFetcher) (ocr3_1types.ReportsPlusPrecursor, error) { reportsPlusPrecursor, err := rp.Plugin.StateTransition(ctx, seqNr, aq, aos, kvReadWriter, blobFetcher) if err != nil { return nil, err } - //if !(len(reportsPlusPrecursor) <= rp.Limits.MaxReportsPlusPrecursorLength) { - // return nil, fmt.Errorf("LimitCheckOCR3Plugin: underlying plugin returned oversize reportsPlus (%v vs %v)", len(reportsPlusPrecursor), rp.Limits.MaxReportsPlusPrecursorLength) - //} + if !(len(reportsPlusPrecursor) <= rp.Limits.MaxReportsPlusPrecursorLength) { + return nil, fmt.Errorf("LimitCheckOCR3_1Plugin: underlying plugin returned oversize reports precursor (%v vs %v)", len(reportsPlusPrecursor), rp.Limits.MaxReportsPlusPrecursorLength) + } return reportsPlusPrecursor, nil } -func (rp LimitCheckOCR3_1ReportingPlugin[RI]) Committed(ctx context.Context, seqNr uint64, keyValueReader ocr3_1types.KeyValueReader) error { +func (rp LimitCheckOCR3_1ReportingPlugin[RI]) Committed(ctx context.Context, seqNr uint64, keyValueReader ocr3_1types.KeyValueStateReader) error { return rp.Plugin.Committed(ctx, seqNr, keyValueReader) } @@ -73,11 +73,11 @@ func (rp LimitCheckOCR3_1ReportingPlugin[RI]) Reports(ctx context.Context, seqNr return nil, err } if !(len(reports) <= rp.Limits.MaxReportCount) { - return nil, fmt.Errorf("LimitCheckOCR3Plugin: underlying plugin returned too many reports (%v vs %v)", len(reports), rp.Limits.MaxReportCount) + return nil, fmt.Errorf("LimitCheckOCR3_1Plugin: underlying plugin returned too many reports (%v vs %v)", len(reports), rp.Limits.MaxReportCount) } for i, reportPlus := range reports { if !(len(reportPlus.ReportWithInfo.Report) <= rp.Limits.MaxReportLength) { - return nil, fmt.Errorf("LimitCheckOCR3Plugin: underlying plugin returned oversize report at index %v (%v vs %v)", i, len(reportPlus.ReportWithInfo.Report), rp.Limits.MaxReportLength) + return nil, fmt.Errorf("LimitCheckOCR3_1Plugin: underlying plugin returned oversize report at index %v (%v vs %v)", i, len(reportPlus.ReportWithInfo.Report), rp.Limits.MaxReportLength) } } return reports, nil diff --git a/offchainreporting2plus/internal/shim/ocr3_1_serializing_endpoint.go b/offchainreporting2plus/internal/shim/ocr3_1_serializing_endpoint.go index 310587e8..85349d25 100644 --- a/offchainreporting2plus/internal/shim/ocr3_1_serializing_endpoint.go +++ b/offchainreporting2plus/internal/shim/ocr3_1_serializing_endpoint.go @@ -3,7 +3,6 @@ package shim import ( "fmt" - "math" "sync" "time" @@ -11,6 +10,7 @@ import ( "github.com/smartcontractkit/libocr/commontypes" "github.com/smartcontractkit/libocr/internal/loghelper" "github.com/smartcontractkit/libocr/offchainreporting2plus/internal/config/ocr3config" + "github.com/smartcontractkit/libocr/offchainreporting2plus/internal/managed/limits" "github.com/smartcontractkit/libocr/offchainreporting2plus/internal/ocr3_1/protocol" "github.com/smartcontractkit/libocr/offchainreporting2plus/internal/ocr3_1/serialization" "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3_1types" @@ -19,14 +19,15 @@ import ( ) type OCR3_1SerializingEndpoint[RI any] struct { - chTelemetry chan<- *serialization.TelemetryWrapper - configDigest types.ConfigDigest - endpoint types.BinaryNetworkEndpoint2 - maxSigLen int - logger commontypes.Logger - metrics *serializingEndpointMetrics - pluginLimits ocr3_1types.ReportingPluginLimits - publicConfig ocr3config.PublicConfig + chTelemetry chan<- *serialization.TelemetryWrapper + configDigest types.ConfigDigest + endpoint types.BinaryNetworkEndpoint2 + maxSigLen int + logger commontypes.Logger + metrics *serializingEndpointMetrics + pluginLimits ocr3_1types.ReportingPluginLimits + publicConfig ocr3config.PublicConfig + serializedLengthLimits limits.OCR3_1SerializedLengthLimits mutex sync.Mutex subprocesses subprocesses.Subprocesses @@ -49,6 +50,7 @@ func NewOCR3_1SerializingEndpoint[RI any]( metricsRegisterer prometheus.Registerer, pluginLimits ocr3_1types.ReportingPluginLimits, publicConfig ocr3config.PublicConfig, + serializedLengthLimits limits.OCR3_1SerializedLengthLimits, ) *OCR3_1SerializingEndpoint[RI] { return &OCR3_1SerializingEndpoint[RI]{ chTelemetry, @@ -59,6 +61,7 @@ func NewOCR3_1SerializingEndpoint[RI any]( newSerializingEndpointMetrics(metricsRegisterer, logger), pluginLimits, publicConfig, + serializedLengthLimits, sync.Mutex{}, subprocesses.Subprocesses{}, @@ -131,35 +134,52 @@ func (n *OCR3_1SerializingEndpoint[RI]) toOutboundBinaryMessage(msg protocol.Mes return types.OutboundBinaryMessagePlain{payload, types.BinaryMessagePriorityDefault}, pbm case protocol.MessageCertifiedCommit[RI]: return types.OutboundBinaryMessagePlain{payload, types.BinaryMessagePriorityDefault}, pbm + case protocol.MessageStateSyncSummary[RI]: + return types.OutboundBinaryMessagePlain{payload, types.BinaryMessagePriorityLow}, pbm case protocol.MessageBlockSyncRequest[RI]: return types.OutboundBinaryMessageRequest{ types.SingleUseSizedLimitedResponsePolicy{ - math.MaxInt, + n.serializedLengthLimits.MaxLenMsgBlockSyncResponse, time.Now().Add(protocol.DeltaMaxBlockSyncRequest), }, payload, types.BinaryMessagePriorityLow, }, pbm - case protocol.MessageBlockSync[RI]: + case protocol.MessageBlockSyncResponse[RI]: + return msg.RequestHandle.MakeResponse(payload), pbm + case protocol.MessageTreeSyncChunkRequest[RI]: + return types.OutboundBinaryMessageRequest{ + types.SingleUseSizedLimitedResponsePolicy{ + n.serializedLengthLimits.MaxLenMsgTreeSyncChunkResponse, + time.Now().Add(protocol.DeltaMaxTreeSyncRequest), + }, + payload, + types.BinaryMessagePriorityLow, + }, pbm + case protocol.MessageTreeSyncChunkResponse[RI]: return msg.RequestHandle.MakeResponse(payload), pbm - case protocol.MessageBlockSyncSummary[RI]: - return types.OutboundBinaryMessagePlain{payload, types.BinaryMessagePriorityLow}, pbm - case protocol.MessageBlobOffer[RI]: - return types.OutboundBinaryMessagePlain{payload, types.BinaryMessagePriorityDefault}, pbm + return types.OutboundBinaryMessageRequest{ + types.SingleUseSizedLimitedResponsePolicy{ + n.serializedLengthLimits.MaxLenMsgBlobOfferResponse, + msg.RequestInfo.ExpiryTimestamp, + }, + payload, + types.BinaryMessagePriorityDefault, + }, pbm case protocol.MessageBlobChunkRequest[RI]: return types.OutboundBinaryMessageRequest{ types.SingleUseSizedLimitedResponsePolicy{ - math.MaxInt, - time.Now().Add(protocol.DeltaBlobChunkRequestTimeout), + n.serializedLengthLimits.MaxLenMsgBlobChunkResponse, + msg.RequestInfo.ExpiryTimestamp, }, payload, types.BinaryMessagePriorityDefault, }, pbm case protocol.MessageBlobChunkResponse[RI]: return msg.RequestHandle.MakeResponse(payload), pbm - case protocol.MessageBlobAvailable[RI]: - return types.OutboundBinaryMessagePlain{payload, types.BinaryMessagePriorityDefault}, pbm + case protocol.MessageBlobOfferResponse[RI]: + return msg.RequestHandle.MakeResponse(payload), pbm } panic("unreachable") @@ -235,22 +255,29 @@ func (n *OCR3_1SerializingEndpoint[RI]) fromInboundBinaryMessage(inboundBinaryMe if ibm, ok := inboundBinaryMessage.(types.InboundBinaryMessageRequest); !ok || ibm.Priority != types.BinaryMessagePriorityLow { return protocol.MessageBlockSyncRequest[RI]{}, pbm, fmt.Errorf("wrong type or priority for MessageBlockSyncRequest") } - case protocol.MessageBlockSync[RI]: + case protocol.MessageBlockSyncResponse[RI]: if ibm, ok := inboundBinaryMessage.(types.InboundBinaryMessageResponse); !ok || ibm.Priority != types.BinaryMessagePriorityLow { - return protocol.MessageBlockSync[RI]{}, pbm, fmt.Errorf("wrong type or priority for MessageBlockSync") + return protocol.MessageBlockSyncResponse[RI]{}, pbm, fmt.Errorf("wrong type or priority for MessageBlockSync") } - case protocol.MessageBlockSyncSummary[RI]: + case protocol.MessageStateSyncSummary[RI]: if ibm, ok := inboundBinaryMessage.(types.InboundBinaryMessagePlain); !ok || ibm.Priority != types.BinaryMessagePriorityLow { - return protocol.MessageBlockSyncSummary[RI]{}, pbm, fmt.Errorf("wrong type or priority for MessageBlockSyncSummary") + return protocol.MessageStateSyncSummary[RI]{}, pbm, fmt.Errorf("wrong type or priority for MessageStateSyncSummary") + } + case protocol.MessageTreeSyncChunkRequest[RI]: + if ibm, ok := inboundBinaryMessage.(types.InboundBinaryMessageRequest); !ok || ibm.Priority != types.BinaryMessagePriorityLow { + return protocol.MessageTreeSyncChunkRequest[RI]{}, pbm, fmt.Errorf("wrong type or priority for MessageTreeSyncRequest") + } + case protocol.MessageTreeSyncChunkResponse[RI]: + if ibm, ok := inboundBinaryMessage.(types.InboundBinaryMessageResponse); !ok || ibm.Priority != types.BinaryMessagePriorityLow { + return protocol.MessageTreeSyncChunkResponse[RI]{}, pbm, fmt.Errorf("wrong type or priority for MessageTreeSyncChunk") } - case protocol.MessageBlobOffer[RI]: - if ibm, ok := inboundBinaryMessage.(types.InboundBinaryMessagePlain); !ok || ibm.Priority != types.BinaryMessagePriorityDefault { + if ibm, ok := inboundBinaryMessage.(types.InboundBinaryMessageRequest); !ok || ibm.Priority != types.BinaryMessagePriorityDefault { return protocol.MessageBlobOffer[RI]{}, pbm, fmt.Errorf("wrong type or priority for MessageBlobOffer") } - case protocol.MessageBlobAvailable[RI]: - if ibm, ok := inboundBinaryMessage.(types.InboundBinaryMessagePlain); !ok || ibm.Priority != types.BinaryMessagePriorityDefault { - return protocol.MessageBlobAvailable[RI]{}, pbm, fmt.Errorf("wrong type or priority for MessageBlobAvailable") + case protocol.MessageBlobOfferResponse[RI]: + if ibm, ok := inboundBinaryMessage.(types.InboundBinaryMessageResponse); !ok || ibm.Priority != types.BinaryMessagePriorityDefault { + return protocol.MessageBlobOfferResponse[RI]{}, pbm, fmt.Errorf("wrong type or priority for MessageBlobOfferResponse") } case protocol.MessageBlobChunkRequest[RI]: if ibm, ok := inboundBinaryMessage.(types.InboundBinaryMessageRequest); !ok || ibm.Priority != types.BinaryMessagePriorityDefault { diff --git a/offchainreporting2plus/keyvaluedatabase/badger_key_value_database.go b/offchainreporting2plus/keyvaluedatabase/badger_key_value_database.go deleted file mode 100644 index fded6287..00000000 --- a/offchainreporting2plus/keyvaluedatabase/badger_key_value_database.go +++ /dev/null @@ -1,180 +0,0 @@ -package keyvaluedatabase - -import ( - "bytes" - "errors" - "path" - - badger "github.com/dgraph-io/badger/v4" - - "github.com/smartcontractkit/libocr/internal/util" - "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3_1types" - "github.com/smartcontractkit/libocr/offchainreporting2plus/types" -) - -// NewBadgerKeyValueDatabaseFactory produces a -// [ocr3_1types.KeyValueDatabaseFactory] that creates [Badger] databases under -// the directory indicated by baseDir. The directory must exist and be -// writeable. NewKeyValueDatabase may fail if not. The factory requires -// exclusive control of the directory: external changes are forbidden. -// -// [Badger]: https://pkg.go.dev/github.com/dgraph-io/badger/v4 -func NewBadgerKeyValueDatabaseFactory(baseDir string) ocr3_1types.KeyValueDatabaseFactory { - return &badgerKeyValueDatabaseFactory{baseDir} -} - -type badgerKeyValueDatabaseFactory struct{ baseDir string } - -var _ ocr3_1types.KeyValueDatabaseFactory = &badgerKeyValueDatabaseFactory{} - -func (b *badgerKeyValueDatabaseFactory) NewKeyValueDatabase(configDigest types.ConfigDigest) (ocr3_1types.KeyValueDatabase, error) { - path := path.Join(b.baseDir, configDigest.String()) - db, err := badger.Open(badger.DefaultOptions(path)) - if err != nil { - return nil, err - } - return &badgerKeyValueDatabase{db}, nil -} - -type badgerKeyValueDatabase struct{ raw *badger.DB } - -var _ ocr3_1types.KeyValueDatabase = &badgerKeyValueDatabase{} - -func (b *badgerKeyValueDatabase) Close() error { - return b.raw.Close() -} - -func (b *badgerKeyValueDatabase) NewReadTransaction() (ocr3_1types.KeyValueReadTransaction, error) { - txn := b.raw.NewTransaction(false) - return &badgerReadWriteTransaction{txn}, nil // write funcs are type erased -} - -func (b *badgerKeyValueDatabase) NewReadWriteTransaction() (ocr3_1types.KeyValueReadWriteTransaction, error) { - txn := b.raw.NewTransaction(true) - return &badgerReadWriteTransaction{txn}, nil -} - -type badgerReadWriteTransaction struct{ view *badger.Txn } - -var _ ocr3_1types.KeyValueReadWriteTransaction = &badgerReadWriteTransaction{} -var _ ocr3_1types.KeyValueReadTransaction = &badgerReadWriteTransaction{} - -func (b *badgerReadWriteTransaction) Discard() { - b.view.Discard() -} - -func (b *badgerReadWriteTransaction) Read(key []byte) ([]byte, error) { - item, err := b.view.Get(key) - if err != nil { - if errors.Is(err, badger.ErrKeyNotFound) { - return nil, nil - } - return nil, err - } - val, err := item.ValueCopy(nil) - if err != nil { - return nil, err - } - return util.NilCoalesceSlice(val), nil -} - -type badgerRangeIterator struct { - hiKeyExcl []byte - - it *badger.Iterator - firstTime bool - - currentItem *badger.Item - currentItemKey []byte -} - -var _ ocr3_1types.KeyValueIterator = &badgerRangeIterator{} - -func (b *badgerRangeIterator) Close() error { - b.it.Close() - return nil -} - -func (b *badgerRangeIterator) Next() bool { - if b.firstTime { - b.firstTime = false - } else { - b.it.Next() - } - - if !b.it.Valid() { - return false - } - - item := b.it.Item() - key := item.KeyCopy(nil) - - if len(b.hiKeyExcl) > 0 && bytes.Compare(key, b.hiKeyExcl) >= 0 { - return false - } - - b.currentItem = item - b.currentItemKey = key - - return true -} - -func (b *badgerRangeIterator) Key() []byte { - return bytes.Clone(b.currentItemKey) -} - -func (b *badgerRangeIterator) Value() ([]byte, error) { - val, err := b.currentItem.ValueCopy(nil) - if err != nil { - return nil, err - } - return util.NilCoalesceSlice(val), nil -} - -func (b *badgerRangeIterator) Err() error { - return nil -} - -func (b *badgerReadWriteTransaction) Range(loKey []byte, hiKeyExcl []byte) ocr3_1types.KeyValueIterator { - loKey = bytes.Clone(loKey) - hiKeyExcl = bytes.Clone(hiKeyExcl) - - loKey = util.NilCoalesceSlice(loKey) - - opts := badger.DefaultIteratorOptions - opts.PrefetchValues = false // iterator offers values only on demand - opts.AllVersions = false // so that we don't have to check [badger.Item.IsDeletedOrExpired] - it := b.view.NewIterator(opts) - - it.Seek(loKey) - - return &badgerRangeIterator{ - hiKeyExcl, - it, - true, - nil, - nil, - } -} - -func (b *badgerReadWriteTransaction) Write(key, value []byte) error { - // Badger: The current transaction keeps a reference to the key and val byte - // slice arguments. Users must not modify key and val until the end of the - // transaction. - key = bytes.Clone(key) - value = bytes.Clone(value) - - return b.view.Set(key, util.NilCoalesceSlice(value)) -} - -func (b *badgerReadWriteTransaction) Delete(key []byte) error { - // Badger: The current transaction keeps a reference to the key byte slice - // argument. Users must not modify the key until the end of the transaction. - key = bytes.Clone(key) - - return b.view.Delete(key) -} - -func (b *badgerReadWriteTransaction) Commit() error { - return b.view.Commit() -} diff --git a/offchainreporting2plus/keyvaluedatabase/pebble_key_value_database.go b/offchainreporting2plus/keyvaluedatabase/pebble_key_value_database.go new file mode 100644 index 00000000..61148c32 --- /dev/null +++ b/offchainreporting2plus/keyvaluedatabase/pebble_key_value_database.go @@ -0,0 +1,260 @@ +package keyvaluedatabase + +import ( + "bytes" + "errors" + "fmt" + "path" + "sync" + + "github.com/cockroachdb/pebble" + + "github.com/smartcontractkit/libocr/internal/util" + "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3_1types" + "github.com/smartcontractkit/libocr/offchainreporting2plus/types" +) + +var ErrPebbleReadWriteTransactionAlreadyOpen = errors.New("a read-write transaction is already open") + +// NewPebbleKeyValueDatabaseFactory produces a +// [ocr3_1types.KeyValueDatabaseFactory] that creates Pebble databases under +// the directory indicated by baseDir. The directory must exist and be +// writeable. NewKeyValueDatabase may fail if not. The factory requires +// exclusive control of the directory: external changes are forbidden. +func NewPebbleKeyValueDatabaseFactory(baseDir string) ocr3_1types.KeyValueDatabaseFactory { + return &pebbleKeyValueDatabaseFactory{baseDir} +} + +type pebbleKeyValueDatabaseFactory struct{ baseDir string } + +var _ ocr3_1types.KeyValueDatabaseFactory = &pebbleKeyValueDatabaseFactory{} + +func (p *pebbleKeyValueDatabaseFactory) NewKeyValueDatabase(configDigest types.ConfigDigest) (ocr3_1types.KeyValueDatabase, error) { + dbPath := path.Join(p.baseDir, fmt.Sprintf("%s.db", configDigest.String())) + + db, err := pebble.Open(dbPath, nil) + if err != nil { + return nil, err + } + + return &pebbleKeyValueDatabase{ + db, + sync.Mutex{}, + sync.Once{}, + }, nil +} + +type pebbleKeyValueDatabase struct { + db *pebble.DB + + // This lock enforces that we can have at most one active committable + // read-write transaction open at any point in time. + rwSerializationLock sync.Mutex + closeOnce sync.Once +} + +var _ ocr3_1types.KeyValueDatabase = &pebbleKeyValueDatabase{} + +func (p *pebbleKeyValueDatabase) Close() error { + err := fmt.Errorf("database already closed") + p.closeOnce.Do(func() { + err = p.db.Close() + }) + return err +} + +// The resulting transaction is NOT thread-safe. + +func (p *pebbleKeyValueDatabase) NewReadTransaction() (ocr3_1types.KeyValueDatabaseReadTransaction, error) { + snapshot := p.db.NewSnapshot() + return &pebbleReadTransaction{ + snapshot, + false, + }, nil +} + +// The resulting transaction is NOT thread-safe. + +func (p *pebbleKeyValueDatabase) NewReadWriteTransaction() (ocr3_1types.KeyValueDatabaseReadWriteTransaction, error) { + p.rwSerializationLock.Lock() + batch := p.db.NewIndexedBatch() + return &pebbleReadWriteTransaction{ + batch, + false, + func() { + p.rwSerializationLock.Unlock() + }, + }, nil +} + +type pebbleReadTransaction struct { + snapshot *pebble.Snapshot + discarded bool +} + +var _ ocr3_1types.KeyValueDatabaseReadTransaction = &pebbleReadTransaction{} + +func (p *pebbleReadTransaction) Discard() { + if p.discarded { + return + } + p.discarded = true + _ = p.snapshot.Close() +} + +func (p *pebbleReadTransaction) Read(key []byte) ([]byte, error) { + return readFromPebbleReader(p.snapshot, key) +} + +func (p *pebbleReadTransaction) Range(loKey []byte, hiKeyExcl []byte) ocr3_1types.KeyValueDatabaseIterator { + return newPebbleIterator(p.snapshot, loKey, hiKeyExcl) +} + +type pebbleReadWriteTransaction struct { + batch *pebble.Batch + + committedOrDiscarded bool + afterCommitOrDiscardFunc func() +} + +var _ ocr3_1types.KeyValueDatabaseReadWriteTransaction = &pebbleReadWriteTransaction{} +var _ ocr3_1types.KeyValueDatabaseReadTransaction = &pebbleReadWriteTransaction{} + +func (p *pebbleReadWriteTransaction) Discard() { + if p.committedOrDiscarded { + return + } + defer p.afterCommitOrDiscardFunc() + p.committedOrDiscarded = true + _ = p.batch.Close() +} + +func (p *pebbleReadWriteTransaction) Read(key []byte) ([]byte, error) { + return readFromPebbleReader(p.batch, key) +} + +func (p *pebbleReadWriteTransaction) Range(loKey []byte, hiKeyExcl []byte) ocr3_1types.KeyValueDatabaseIterator { + return newPebbleIterator(p.batch, loKey, hiKeyExcl) +} + +func (p *pebbleReadWriteTransaction) Write(key, value []byte) error { + return p.batch.Set(key, util.NilCoalesceSlice(value), nil) +} + +func (p *pebbleReadWriteTransaction) Delete(key []byte) error { + return p.batch.Delete(key, nil) +} + +func (p *pebbleReadWriteTransaction) Commit() error { + if p.committedOrDiscarded { + return fmt.Errorf("transaction has been committed or discarded") + } + defer p.afterCommitOrDiscardFunc() + p.committedOrDiscarded = true + return p.batch.Commit(nil) +} + +type pebbleIterator struct { + iter *pebble.Iterator + loKey []byte + hiKeyExcl []byte + err error + firstCall bool +} + +var _ ocr3_1types.KeyValueDatabaseIterator = &pebbleIterator{} + +func newPebbleIterator(reader pebble.Reader, loKey []byte, hiKeyExcl []byte) *pebbleIterator { + loKey = util.NilCoalesceSlice(bytes.Clone(loKey)) + hiKeyExcl = bytes.Clone(hiKeyExcl) + + opts := &pebble.IterOptions{ + LowerBound: loKey, + UpperBound: hiKeyExcl, + } + + errorneousPebbleIterator := func(err error) *pebbleIterator { + return &pebbleIterator{ + nil, + nil, + nil, + err, + true, + } + } + + iter, err := reader.NewIter(opts) + if err != nil { + return errorneousPebbleIterator(err) + } + + return &pebbleIterator{ + iter, + loKey, + hiKeyExcl, + nil, + true, + } +} + +func (p *pebbleIterator) Close() error { + if p.iter == nil { + return p.err + } + return p.iter.Close() +} + +func (p *pebbleIterator) Next() bool { + if p.iter == nil || p.err != nil { + return false + } + + if p.firstCall { + p.firstCall = false + if !p.iter.First() { + p.err = p.iter.Error() + return false + } + } else { + if !p.iter.Next() { + p.err = p.iter.Error() + return false + } + } + + return p.iter.Valid() +} + +func (p *pebbleIterator) Key() []byte { + if p.iter == nil { + return nil + } + return bytes.Clone(p.iter.Key()) +} + +func (p *pebbleIterator) Value() ([]byte, error) { + if p.iter == nil { + return nil, nil + } + return util.NilCoalesceSlice(bytes.Clone(p.iter.Value())), nil +} + +func (p *pebbleIterator) Err() error { + if p.iter == nil || p.err != nil { + return p.err + } + return p.iter.Error() +} + +func readFromPebbleReader(reader pebble.Reader, key []byte) ([]byte, error) { + value, closer, err := reader.Get(key) + if err != nil { + if errors.Is(err, pebble.ErrNotFound) { + return nil, nil + } else { + return nil, err + } + } + defer closer.Close() + return util.NilCoalesceSlice(bytes.Clone(value)), nil +} diff --git a/offchainreporting2plus/ocr3_1types/db.go b/offchainreporting2plus/ocr3_1types/db.go index 096a5525..e72850e3 100644 --- a/offchainreporting2plus/ocr3_1types/db.go +++ b/offchainreporting2plus/ocr3_1types/db.go @@ -1,27 +1,5 @@ package ocr3_1types -import ( - "context" +import "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3types" - "github.com/smartcontractkit/libocr/offchainreporting2plus/ocr3types" - "github.com/smartcontractkit/libocr/offchainreporting2plus/types" -) - -type Database interface { - ocr3types.Database - BlockDatabase -} - -// BlockDatabase persistently stores state transition blocks to support state transfer requests -// Expect Write to be called far more frequently than Read. -// -// All its functions should be thread-safe. - -type BlockDatabase interface { - // ReadBlock retrieves a block from the database. - // If the block is not found, ErrBlockNotFound should be returned. - ReadBlock(ctx context.Context, configDigest types.ConfigDigest, seqNr uint64) ([]byte, error) - // WriteBlock writes a block to the database. - // Writing with a nil value is the same as deleting. - WriteBlock(ctx context.Context, configDigest types.ConfigDigest, seqNr uint64, block []byte) error -} +type Database = ocr3types.Database diff --git a/offchainreporting2plus/ocr3_1types/kv.go b/offchainreporting2plus/ocr3_1types/kvdb.go similarity index 65% rename from offchainreporting2plus/ocr3_1types/kv.go rename to offchainreporting2plus/ocr3_1types/kvdb.go index 17448ca6..7ff7ecce 100644 --- a/offchainreporting2plus/ocr3_1types/kv.go +++ b/offchainreporting2plus/ocr3_1types/kvdb.go @@ -4,8 +4,11 @@ import ( "github.com/smartcontractkit/libocr/offchainreporting2plus/types" ) -type KeyValueReadWriteTransaction interface { - KeyValueReadTransaction +// Deprecated: Use KeyValueDatabaseReadWriteTransaction instead. +type KeyValueReadWriteTransaction = KeyValueDatabaseReadWriteTransaction + +type KeyValueDatabaseReadWriteTransaction interface { + KeyValueDatabaseReadTransaction // A value of nil is interpreted as an empty slice, and does *not* delete // the key. For deletions you must use the Delete method. Write(key []byte, value []byte) error @@ -14,7 +17,10 @@ type KeyValueReadWriteTransaction interface { Commit() error } -type KeyValueReadTransaction interface { +// Deprecated: Use KeyValueDatabaseReadTransaction instead. +type KeyValueReadTransaction = KeyValueDatabaseReadTransaction + +type KeyValueDatabaseReadTransaction interface { // If the key exists, the returned value must not be nil! Read(key []byte) ([]byte, error) // Range iterates over the key-value pairs with keys in the range [loKey, @@ -25,11 +31,14 @@ type KeyValueReadTransaction interface { // // WARNING: DO NOT perform any writes/deletes to the key-value store while // the iterator is opened. - Range(loKey []byte, hiKeyExcl []byte) KeyValueIterator + Range(loKey []byte, hiKeyExcl []byte) KeyValueDatabaseIterator Discard() } -// KeyValueIterator is a iterator over key-value pairs, in ascending order of +// Deprecated: Use KeyValueDatabaseIterator instead. +type KeyValueIterator = KeyValueDatabaseIterator + +// KeyValueDatabaseIterator is a iterator over key-value pairs, in ascending order of // keys. // // Example usage: @@ -47,7 +56,7 @@ type KeyValueReadTransaction interface { // if err := it.Err(); err != nil { // // handle error // } -type KeyValueIterator interface { +type KeyValueDatabaseIterator interface { // Next prepares the next key-value pair for reading. It returns true on // success, or false if there is no next key-value pair or an error occurred // while preparing it. @@ -56,24 +65,24 @@ type KeyValueIterator interface { Key() []byte // Value returns the value of the current key-value pair. An error value // indicates a failure to retrieve the value, and the caller is responsible - // for handling it. Even if all errors are nil, [KeyValueIterator.Err] must + // for handling it. Even if all errors are nil, [KeyValueDatabaseIterator.Err] must // be checked after iteration is completed. Value() ([]byte, error) // Err returns any error encountered during iteration. Must be checked after // the end of the iteration, to ensure that no key-value pairs were missed - // due to iteration errors. Errors in [KeyValueIterator.Value] are distinct + // due to iteration errors. Errors in [KeyValueDatabaseIterator.Value] are distinct // and will not cause a non-nil error. Err() error // Close closes the iterator and releases any resources associated with it. - // Further iteration is prevented, i.e., [KeyValueIterator.Next] will return + // Further iteration is prevented, i.e., [KeyValueDatabaseIterator.Next] will return // false. Must be called in any case, even if the iteration encountered any - // error through [KeyValueIterator.Value] or [KeyValueIterator.Err]. + // error through [KeyValueDatabaseIterator.Value] or [KeyValueDatabaseIterator.Err]. Close() error } type KeyValueDatabase interface { - NewReadWriteTransaction() (KeyValueReadWriteTransaction, error) - NewReadTransaction() (KeyValueReadTransaction, error) + NewReadWriteTransaction() (KeyValueDatabaseReadWriteTransaction, error) + NewReadTransaction() (KeyValueDatabaseReadTransaction, error) Close() error } diff --git a/offchainreporting2plus/ocr3_1types/plugin.go b/offchainreporting2plus/ocr3_1types/plugin.go index 8938bc9d..aefc115f 100644 --- a/offchainreporting2plus/ocr3_1types/plugin.go +++ b/offchainreporting2plus/ocr3_1types/plugin.go @@ -7,6 +7,7 @@ import ( "github.com/smartcontractkit/libocr/offchainreporting2plus/types" ) +// The precursor value from which a list of [ocr3types.ReportPlus] is generated type ReportsPlusPrecursor []byte type ReportingPluginFactory[RI any] interface { @@ -20,25 +21,37 @@ type ReportingPluginFactory[RI any] interface { ) (ReportingPlugin[RI], ReportingPluginInfo, error) } -type KeyValueReader interface { +// Deprecated: Use KeyValueStateReader instead. +type KeyValueReader = KeyValueStateReader + +// Provides read access to the replicated KeyValueState. +type KeyValueStateReader interface { // A return value of nil indicates that the key does not exist. Read(key []byte) ([]byte, error) } -type KeyValueReadWriter interface { - KeyValueReader +// Deprecated: Use KeyValueStateReadWriter instead. +type KeyValueReadWriter = KeyValueStateReadWriter + +// Provides read and write access to the replicated KeyValueState. +type KeyValueStateReadWriter interface { + KeyValueStateReader Write(key []byte, value []byte) error Delete(key []byte) error } -// A ReportingPlugin allows plugging custom logic into the OCR3 protocol. The +// # Overview +// +// A ReportingPlugin allows plugging custom logic into the OCR protocol. The // OCR protocol handles cryptography, networking, ensuring that a sufficient // number of nodes is in agreement about any report, transmitting the report to -// the contract, etc... The ReportingPlugin handles application-specific logic. +// the contract, etc. The ReportingPlugin handles application-specific logic. // To do so, the ReportingPlugin defines a number of callbacks that are called // by the OCR protocol logic at certain points in the protocol's execution flow. // The report generated by the ReportingPlugin must be in a format understood by -// contract that the reports are transmitted to. +// contract (or offchain target system) that the reports are transmitted to. +// +// # Byzantine Fault Tolerance // // We assume that each correct node participating in the protocol instance will // be running the same ReportingPlugin implementation. However, not all nodes @@ -46,9 +59,11 @@ type KeyValueReadWriter interface { // faults). For example, faulty nodes could be down, have intermittent // connectivity issues, send garbage messages, or be controlled by an adversary. // +// # Execution Flow +// // For a protocol round where everything is working correctly, follower oracles // will call Observation, ValidateObservation, ObservationQuorum, StateTransition, -// and Reports. The leader oracle will additionally call Query at the beginning of +// Committed, and Reports. The leader oracle will additionally call Query at the beginning of // the round. For each report, ShouldAcceptAttestedReport will be called, iff // the oracle is in the set of transmitters for the report. If // ShouldAcceptAttestedReport returns true, ShouldTransmitAcceptedReport will be @@ -56,21 +71,19 @@ type KeyValueReadWriter interface { // faults occur. // // In particular, an ReportingPlugin must deal with cases where: +// - only a subset of the functions on the ReportingPlugin are invoked for a +// given round +// - the observation returned by Observation is not included in the list of +// AttributedObservations passed to StateTransition +// - a [Query] or [Observation] is malformed. (For defense in depth, it is also +// recommended that malformed elements in the key value state are handled gracefully.) +// - instances of the ReportingPlugin run by different oracles have different +// call traces. E.g., the ReportingPlugin's Observation function may have been +// invoked on node A, but not on node B. // -// - only a subset of the functions on the ReportingPlugin are invoked for a -// given round -// -// - the observation returned by Observation is not included in the list of -// AttributedObservations passed to StateTransition +// # Engineering requirements for ReportingPlugin implementations // -// - a query or observation is malformed. (For defense in depth, it is also -// recommended that malformed outcomes are handled gracefully.) -// -// - instances of the ReportingPlugin run by different oracles have different -// call traces. E.g., the ReportingPlugin's Observation function may have been -// invoked on node A, but not on node B. -// -// All functions on an ReportingPlugin should be thread-safe. +// All functions on an ReportingPlugin must be thread-safe. // // The execution of the functions in the ReportingPlugin is on the critical path // of the protocol's execution. A blocking function may block the oracle from @@ -82,14 +95,31 @@ type KeyValueReadWriter interface { // the documentation on ocr3config.PublicConfig for more information on how // to configure timeouts. // +// Many functions on the ReportingPlugin are marked as pure‡. This is because they may be +// invoked in arbitrary order, and because they must return deterministic results in order +// to ensure agreement across different oracles. This means that +// they must act as [pure functions], with a few important clarifications and exceptions: +// - In cases where the function returns an error, the determinism requirement is lifted. (But beware: if too many +// oracles encounter errors, the protocol may fail to progress.) +// - Side effects arising from calling methods on a [KeyValueStateReader] argument are allowed. +// - Side effects arising from calling methods on a [KeyValueStateReadWriter] argument are allowed. +// - Side effects arising from calling methods on a [BlobFetcher] argument are allowed. +// - Side effects arising from calling methods on a [BlobBroadcastFetcher] argument are allowed. +// - Unobservable side effects (e.g. memoization of expensive computations) are allowed. Be careful! +// - In cases where the function does not return an error, the [context.Context] argument must +// not affect the observable behavior of the function. Note that this does not preclude +// a function from returning an error on context expiration. +// // For a given OCR protocol instance, there can be many (consecutive) instances // of an ReportingPlugin, e.g. due to software restarts. If you need -// ReportingPlugin state to survive across restarts, you should -// persist it in the key-value store. A ReportingPlugin instance will only ever serve a +// ReportingPlugin state to survive across restarts, you should probably +// persist it in the KeyValueState. A ReportingPlugin instance will only ever serve a // single protocol instance. State is not preserved between protocol instances. // A fresh protocol instance will start with a clean state. // Carrying state between different protocol instances is up to the // ReportingPlugin logic. +// +// [pure functions]: https://en.wikipedia.org/wiki/Pure_function type ReportingPlugin[RI any] interface { // Query creates a Query that is sent from the leader to all follower nodes // as part of the request for an observation. Be careful! A malicious leader @@ -103,9 +133,24 @@ type ReportingPlugin[RI any] interface { // You may assume that the seqNr is increasing strictly monotonically // across the lifetime of a protocol instance. // - // The KeyValueReader gives read access to the key-value store in the state - // that it is after seqNr - 1 is committed. - Query(ctx context.Context, seqNr uint64, keyValueReader KeyValueReader, blobBroadcastFetcher BlobBroadcastFetcher) (types.Query, error) + // The keyValueStateReader gives read access to the replicated KeyValueState + // at the point after seqNr - 1 is committed. It must not be used outside the execution + // of this function. (It's okay to use it anywhere in the call tree rooted at this function + // and to pass it to separate goroutines, but they must stop using it before this + // function returns.) + // + // The blobBroadcastFetcher enables broadcasting and fetching blobs. Broadcasting blobs + // can be a more efficient data dissemination method than direct use of Query/Observation + // when data is larger. It must not be used outside the execution + // of this function. (It's okay to use it anywhere in the call tree rooted + // at this function and to pass it to separate goroutines, but they must stop using it before this + // function returns.) + Query( + ctx context.Context, + seqNr uint64, + keyValueStateReader KeyValueStateReader, + blobBroadcastFetcher BlobBroadcastFetcher, + ) (types.Query, error) // Observation gets an observation from the underlying data source. Returns // a value or an error. @@ -113,26 +158,57 @@ type ReportingPlugin[RI any] interface { // You may assume that the seqNr is increasing strictly monotonically // across the lifetime of a protocol instance. // - // The KeyValueReader gives read access to the key-value store in the state - // that it is after seqNr - 1 is committed. - Observation(ctx context.Context, seqNr uint64, aq types.AttributedQuery, keyValueReader KeyValueReader, blobBroadcastFetcher BlobBroadcastFetcher) (types.Observation, error) + // The keyValueStateReader gives read access to the replicated KeyValueState + // at the point after seqNr - 1 is committed. It must not be used outside the execution + // of this function. (It's okay to use it anywhere in the call tree rooted at this function + // and to pass it to separate goroutines, but they must stop using it before this + // function returns.) + // + // The blobBroadcastFetcher enables broadcasting and fetching blobs. Broadcasting blobs + // can be a more efficient data dissemination method than direct use of Query/Observation + // when data is larger. It must not be used outside the execution + // of this function. (It's okay to use it anywhere in the call tree rooted + // at this function and to pass it to separate goroutines, but they must stop using it before this + // function returns.) + Observation( + ctx context.Context, + seqNr uint64, + aq types.AttributedQuery, + keyValueStateReader KeyValueStateReader, + blobBroadcastFetcher BlobBroadcastFetcher, + ) (types.Observation, error) // ValidateObservation should return an error if an observation isn't well-formed. - // Non-well-formed observations will be discarded by the protocol. This - // function should be pure. This is called for each observation, don't do - // anything slow in here. + // Non-well-formed observations will be discarded by the protocol. + // + // This function must be pure‡. // // You may assume that the seqNr is increasing strictly monotonically // across the lifetime of a protocol instance. // - // The KeyValueReader gives read access to the key-value store in the state - // that it is after seqNr - 1 is committed. - ValidateObservation(ctx context.Context, seqNr uint64, aq types.AttributedQuery, ao types.AttributedObservation, keyValueReader KeyValueReader, blobFetcher BlobFetcher) error + // The keyValueStateReader gives read access to the replicated KeyValueState + // at the point after seqNr - 1 is committed. It must not be used outside the execution + // of this function. (It's okay to use it anywhere in the call tree rooted at this function + // and to pass it to separate goroutines, but they must stop using it before this + // function returns.) + // + // The blobFetcher enables fetching blobs. It must not be used outside the execution + // of this function. (It's okay to use it anywhere in the call tree rooted at this function + // and to pass it to separate goroutines, but they must stop using it before this + // function returns.) + ValidateObservation( + ctx context.Context, + seqNr uint64, + aq types.AttributedQuery, + ao types.AttributedObservation, + keyValueStateReader KeyValueStateReader, + blobFetcher BlobFetcher, + ) error // ObservationQuorum indicates whether the provided valid (according to // ValidateObservation) observations are sufficient to construct an outcome. // - // This function should be pure. Don't do anything slow in here. + // This function must be pure‡. // // This is an advanced feature. The "default" approach (what OCR1 & OCR2 // did) is to have this function call @@ -144,17 +220,31 @@ type ReportingPlugin[RI any] interface { // it returns true for aos, it must also return true for any // superset of aos. // - // The KeyValueReader gives read access to the key-value store in the state - // that it is after seqNr - 1 is committed. - ObservationQuorum(ctx context.Context, seqNr uint64, aq types.AttributedQuery, aos []types.AttributedObservation, keyValueReader KeyValueReader, blobFetcher BlobFetcher) (quorumReached bool, err error) + // The keyValueStateReader gives read access to the replicated KeyValueState + // at the point after seqNr - 1 is committed. It must not be used outside the execution + // of this function. (It's okay to use it anywhere in the call tree rooted at this function + // and to pass it to separate goroutines, but they must stop using it before this + // function returns.) + // + // The blobFetcher enables fetching blobs. It must not be used outside the execution + // of this function. (It's okay to use it anywhere in the call tree rooted at this function + // and to pass it to separate goroutines, but they must stop using it before this + // function returns.) + ObservationQuorum( + ctx context.Context, + seqNr uint64, + aq types.AttributedQuery, + aos []types.AttributedObservation, + keyValueStateReader KeyValueStateReader, + blobFetcher BlobFetcher, + ) (quorumReached bool, err error) // StateTransition modifies the state of the Reporting Plugin, based on // the attributed query and the set of attributed observations of the round. // Generates ReportsPlusPrecursor, which encodes a possibly empty list of - // reports, as a side effect. - // - // This function should be pure. Don't do anything slow in here. + // reports. // + // This function must be pure‡. // // You may assume that the seqNr is increasing strictly monotonically // across the lifetime of a protocol instance. @@ -163,25 +253,53 @@ type ReportingPlugin[RI any] interface { // (1) validated by ValidateObservation on each element, and (2) checked // by ObservationQuorum to have reached quorum. // - // The KeyValueReadWriter gives read and write access to the key-value store in the state - // that it is after seqNr - 1 is committed. - StateTransition(ctx context.Context, seqNr uint64, aq types.AttributedQuery, aos []types.AttributedObservation, keyValueReadWriter KeyValueReadWriter, blobFetcher BlobFetcher) (ReportsPlusPrecursor, error) + // The keyValueStateReadWriter gives read and write access to the replicated KeyValueState + // from the point after seqNr - 1 is committed. Writing to the keyValueStateReadWriter allows + // a ReportingPlugin to modify the replicated KeyValueState. The keyValueStateReadWriter must not be + // used outside the execution of this function. (It's okay to use it anywhere in the call tree rooted + // at this function and to pass it to separate goroutines, but they must stop using it before this + // function returns.) + // + // The blobFetcher enables fetching blobs. It must not be used outside the execution + // of this function. (It's okay to use it anywhere in the call tree rooted at this function + // and to pass it to separate goroutines, but they must stop using it before this + // function returns.) + StateTransition( + ctx context.Context, + seqNr uint64, + aq types.AttributedQuery, + aos []types.AttributedObservation, + keyValueStateReadWriter KeyValueStateReadWriter, + blobFetcher BlobFetcher, + ) (ReportsPlusPrecursor, error) - // Committed notifies the plugin that a sequence number has been committed. - // It might or might not be preceded by a StateTransition call for the same - // sequence number. - // TODO: This function is not called by the protocol yet. - Committed(ctx context.Context, seqNr uint64, keyValueReader KeyValueReader) error + // Committed notifies the plugin that seqNr has been committed. + // It might not be called for all sequence numbers. + // This method return an error != nil will not lead to the round being aborted/not committed. + // + // Don't do anything slow in here. + // + // The KeyValueReader gives read access to the key-value store in the state + // that it is after the StateTransition for seqNr is computed. + Committed( + ctx context.Context, + seqNr uint64, + keyValueStateReader KeyValueStateReader, + ) error // Reports generates a (possibly empty) list of reports from a ReportsPlusPrecursor. Each report // will be signed and possibly be transmitted to the contract. (Depending on // ShouldAcceptAttestedReport & ShouldTransmitAcceptedReport) // - // This function should be pure. Don't do anything slow in here. + // This function must be pure‡. // // This is likely to change in the future. It will likely be returning a // list of report batches, where each batch goes into its own Merkle tree. - Reports(ctx context.Context, seqNr uint64, reportsPlusPrecursor ReportsPlusPrecursor) ([]ocr3types.ReportPlus[RI], error) + Reports( + ctx context.Context, + seqNr uint64, + reportsPlusPrecursor ReportsPlusPrecursor, + ) ([]ocr3types.ReportPlus[RI], error) // ShouldAcceptAttestedReport decides whether a report should be accepted for transmission. // Any report passed to this function will have been attested, i.e. signed by f+1 @@ -189,11 +307,15 @@ type ReportingPlugin[RI any] interface { // // Don't make assumptions about the seqNr order in which this function // is called. - ShouldAcceptAttestedReport(ctx context.Context, seqNr uint64, reportWithInfo ocr3types.ReportWithInfo[RI]) (bool, error) + ShouldAcceptAttestedReport( + ctx context.Context, + seqNr uint64, + reportWithInfo ocr3types.ReportWithInfo[RI], + ) (bool, error) // ShouldTransmitAcceptedReport decides whether the given report should actually // be broadcast to the contract. This is invoked just before the broadcast occurs. - // Any report passed to this function will have been signed by a quorum of oracle + // Any report passed to this function will have been signed by a quorum of oracles // and been accepted by ShouldAcceptAttestedReport. // // Don't make assumptions about the seqNr order in which this function @@ -205,7 +327,11 @@ type ReportingPlugin[RI any] interface { // database upon oracle restart, this function may be called with reports // that no other function of this instance of this interface has ever // been invoked on. - ShouldTransmitAcceptedReport(ctx context.Context, seqNr uint64, reportWithInfo ocr3types.ReportWithInfo[RI]) (bool, error) + ShouldTransmitAcceptedReport( + ctx context.Context, + seqNr uint64, + reportWithInfo ocr3types.ReportWithInfo[RI], + ) (bool, error) // If Close is called a second time, it may return an error but must not // panic. This will always be called when a plugin is no longer diff --git a/offchainreporting2plus/types/types.go b/offchainreporting2plus/types/types.go index 3905d7f7..4f787186 100644 --- a/offchainreporting2plus/types/types.go +++ b/offchainreporting2plus/types/types.go @@ -424,6 +424,12 @@ type ConfigEncryptionPublicKey [curve25519.PointSize]byte // X25519 type OffchainKeyring interface { // OffchainSign returns an EdDSA-Ed25519 signature on msg produced using the // standard library's ed25519.Sign function. + // + // For domain separation between different applications, the recommended pattern is to + // format msg as: || <32 byte hash of application-specific data> + // For example, OCR3 uses "ocr3" || sha256(). + // OCR2 confirms to the pattern if you squint: it uses a zero-length application-specific domain separator. ;-) + OffchainSign(msg []byte) (signature []byte, err error) // ConfigDiffieHellman multiplies point with the secret key (i.e. scalar) diff --git a/ragep2p/ragep2p.go b/ragep2p/ragep2p.go index 30787f44..924c1f2d 100644 --- a/ragep2p/ragep2p.go +++ b/ragep2p/ragep2p.go @@ -16,6 +16,7 @@ import ( "github.com/smartcontractkit/libocr/commontypes" "github.com/smartcontractkit/libocr/internal/loghelper" + "github.com/smartcontractkit/libocr/networking/ragep2pwrapper" "github.com/smartcontractkit/libocr/ragep2p/internal/knock" "github.com/smartcontractkit/libocr/ragep2p/internal/msgbuf" "github.com/smartcontractkit/libocr/ragep2p/internal/mtls" @@ -26,13 +27,13 @@ import ( ) // Maximum number of streams with another peer that can be opened on a host -const MaxStreamsPerPeer = 2_000 +const MaxStreamsPerPeer = types.MaxStreamsPerPeer // Maximum stream name length -const MaxStreamNameLength = 256 +const MaxStreamNameLength = types.MaxStreamNameLength // Maximum length of messages sent with ragep2p -const MaxMessageLength = 1024 * 1024 * 1024 // 1 GiB. This must be smaller than INT32_MAX +const MaxMessageLength = types.MaxMessageLength const newConnTokens = MaxStreamsPerPeer * (frameHeaderEncodedSize + MaxStreamNameLength) @@ -254,7 +255,7 @@ func (ho *Host) Start() error { }) } - err := ho.discoverer.Start(ho, ho.keyring, ho.logger) + err := ho.discoverer.Start(Wrapped(ho), ho.keyring, ho.logger) if err != nil { return fmt.Errorf("failed to start discoverer: %w", err) } @@ -875,12 +876,7 @@ func (ho *Host) handleConnection(incoming bool, rlConn *ratelimitedconn.RateLimi } } -// TokenBucketParams contains the two parameters for a token bucket rate -// limiter. -type TokenBucketParams struct { - Rate float64 - Capacity uint32 -} +type TokenBucketParams = types.TokenBucketParams // NewStream creates a new bidirectional stream with peer other for streamName. // It is parameterized with a maxMessageLength, the maximum size of a message in @@ -1532,7 +1528,7 @@ func incomingConnsRateLimit(durationBetweenDials time.Duration) ratelimit.Millit // Discoverer is responsible for discovering the addresses of peers on the network. type Discoverer interface { - Start(host *Host, keyring types.PeerKeyring, logger loghelper.LoggerWithContext) error + Start(host ragep2pwrapper.Host, keyring types.PeerKeyring, logger loghelper.LoggerWithContext) error Close() error FindPeer(peer types.PeerID) ([]types.Address, error) } diff --git a/ragep2p/ragep2pnew/doc.go b/ragep2p/ragep2pnew/doc.go new file mode 100644 index 00000000..77536e2e --- /dev/null +++ b/ragep2p/ragep2pnew/doc.go @@ -0,0 +1,127 @@ +// ragep2p is a simple p2p networking library that provides best-effort, +// self-healing, message-oriented, authenticated, encrypted bidirectional +// communication streams between peers. +// +// # Concepts +// +// ragep2p peers are identified by their PeerID. PeerIDs are public keys. +// PeerIDs' string representation is compatible with libp2p to ease migration +// from libp2p to ragep2p. +// +// ragep2p provides Host and Stream abstractions. +// +// A Host allows users to establish Streams with other peers identified by their +// PeerID. The host will transparently handle peer discovery, secure connection +// (re)establishment, multiplexing streams over the connection and rate +// limiting. +// +// A Stream allows users to send binary messages to another peer. Messages are +// delivered on a best-effort basis. If the underlying connection to the other +// peer drops or isn't fast enough or the other peer has not opened a +// corresponding stream or ..., messages may get dropped. Streams are +// self-healing: users don't need to close and re-open streams even if the +// underlying connection drops or the other peer becomes unavailable. We +// guarantee that messages that are delivered are delivered in FIFO order and +// without modifications. +// +// # Peer discovery +// +// ragep2p will handle peer discovery (i.e. associating network addresses like +// 1.2.3.4:1337 with PeerIDs) automatically. Upon construction of a Host, a +// Discoverer is passed in, which is then used by the Host for this purpose. +// +// If multiple network addresses are discovered for a PeerID, ragep2p will try +// sequentially dialing all of them until a connection is successfully +// established. +// +// # Thread Safety +// +// All public functions on Host and Stream are thread-safe. +// +// It is safe to double-Close(), though all but the first Close() may return an +// error. +// +// # Allocations +// +// We allocate a buffer for each message received. In principle, this could allo +// an adversary to force a recipient to run out of memory. To defend against +// this, we put limits on the length of messages and rate limit messages, +// thereby also limiting adversarially-controlled allocations. +// +// # Security +// +// Note: Users don't need to care about the details of how these security +// measures work, only what properties they provide. +// +// ragep2p's security model assumes that all Streams on the local Host behave +// honestly and cooperatively. Since many Streams are multiplexed over a single +// connection, a single "bad" Stream could completely exhaust the entire +// connection preventing other Streams from delivering messages as well. Other +// network participants, however, are not assumed to behave honestly and we +// attempt to defend against fingerprinting, impersonation, MitM, resource +// exhaustion, tarpitting, etc. +// +// ragep2p uses the Ed25519 signature algorithm and sha2 hash function. +// +// ragep2p attempts to prevent fingerprinting of ragep2p nodes. ragep2p will not +// respond on a connection until it has received a valid knock message +// constructed over the PeerID of the connection initiator and the PeerID of the +// connection receiver. (knocks carry a signature for authentication, though +// it's important to note that by their uni-directional nature a knock does not +// constitute a proper handshake and can be replayed.) +// +// ragep2p connections are authenticated and encrypted using mutual TLS 1.3, +// using the crypto/tls package from Go's standard library. TLS is used with +// ephemeral certificates using the keypair corresponding to the Host's PeerID. +// +// ragep2p tries to defend against resource exchaustion attacks. In particular, +// we enforce maximum Stream counts per peer, maximum lengths for various +// messages, apply rate limiting at the tcp connection level as well as at the +// individual Stream level, and have a constant bound on the number of buffered +// messages per Stream. +// +// ragep2p defends against tarpitting, i.e. other peers that intentionally +// read/write from the underlying connection slowly. Host.NewStream(), +// Stream.Close(), Stream.SendMessage(), and Stream.Receive() return immediately +// and do any potential resulting communication asynchronously in the +// background. Host.Close() terminates after at most a few seconds. +// +// # Metrics +// +// ragep2p exposes prometheus metrics. Their names are prefixed with "ragep2p_". +// The audience for these metrics are operators of software using ragep2p and +// developers building on top of ragep2p. +// +// # Suggested Health Checks +// +// The following example health checks in PromQL enable operators to monitor +// connectivity to remote peers and health of connections with them. We +// suggest monitoring *all* of these metrics. These examples only serve as a +// starting point, operators are encouraged to adjust thresholds empirically +// and come up with more sophisticated queries. +// +// Is my ragep2p host receiving data from the remote peer? If not, there is a +// connectivity problem. +// +// rate(ragep2p_peer_conn_read_processed_bytes_total[5m]) > 0 +// +// Is my ragep2p host sending data to the remote peer? If not, there is a +// connectivity problem. +// +// rate(ragep2p_peer_conn_written_bytes_total[5m]) > 0 +// +// Is connection churn reasonable (e.g. less than one new connection every 3m)? +// If not, this may point to infrastructure or ISP problems. +// +// rate(ragep2p_peer_conn_established_total[10m]) < 1/(3 * 60) +// +// Is my ragep2p host not skipping any data received from the remote peer? if +// not, this may point to bugs in the software running on top of ragep2p. +// +// rate(ragep2p_peer_conn_read_skipped_bytes_total[5m]) == 0 +// +// Is my ragep2p host receiving inbound dials at all? If not, this may point +// to a misconfiguration in my infrastructure. +// +// rate(ragep2p_host_inbound_dials_total[48h]) > 0 +package ragep2pnew diff --git a/ragep2p/ragep2pnew/internal/demuxer/demuxer.go b/ragep2p/ragep2pnew/internal/demuxer/demuxer.go new file mode 100644 index 00000000..9a38b01d --- /dev/null +++ b/ragep2p/ragep2pnew/internal/demuxer/demuxer.go @@ -0,0 +1,277 @@ +package demuxer + +import ( + "fmt" + "math" + "sync" + + "github.com/smartcontractkit/libocr/internal/ringbuffer" + "github.com/smartcontractkit/libocr/ragep2p/ragep2pnew/internal/internaltypes" + "github.com/smartcontractkit/libocr/ragep2p/ragep2pnew/internal/ratelimit" + "github.com/smartcontractkit/libocr/ragep2p/ragep2pnew/internal/responselimit" + "github.com/smartcontractkit/libocr/ragep2p/ragep2pnew/internal/stream2types" + "github.com/smartcontractkit/libocr/ragep2p/types" +) + +type ShouldPushResult int + +const ( + _ ShouldPushResult = iota + ShouldPushResultYes + ShouldPushResultMessageTooBig + ShouldPushResultMessagesLimitExceeded + ShouldPushResultBytesLimitExceeded + ShouldPushResultUnknownStream + ShouldPushResultResponseRejected +) + +type PushResult int + +const ( + _ PushResult = iota + PushResultSuccess + PushResultDropped + PushResultUnknownStream +) + +type PopResult int + +const ( + _ PopResult = iota + PopResultSuccess + PopResultEmpty + PopResultUnknownStream +) + +type demuxerStream struct { + buffer *ringbuffer.RingBuffer[stream2types.InboundBinaryMessage] + chSignal chan struct{} + maxMessageSize int + messagesLimiter ratelimit.TokenBucket + bytesLimiter ratelimit.TokenBucket +} + +// Demuxer helps, on the receiving side of a connection, to demux the arriving messages +// into the correct streams. In the process, it checks rate limits. +type Demuxer struct { + mutex sync.Mutex + streams map[internaltypes.StreamID]*demuxerStream + responseChecker *responselimit.ResponseChecker +} + +func NewDemuxer() *Demuxer { + return &Demuxer{ + sync.Mutex{}, + map[internaltypes.StreamID]*demuxerStream{}, + responselimit.NewResponseChecker(), + } +} + +func makeTokenBucket(params types.TokenBucketParams) ratelimit.TokenBucket { + tb := ratelimit.TokenBucket{} + tb.SetRate(ratelimit.MillitokensPerSecond(math.Ceil(params.Rate * 1000))) + tb.SetCapacity(params.Capacity) + tb.AddTokens(params.Capacity) + return tb +} + +func updateTokenBucket(bucket *ratelimit.TokenBucket, params types.TokenBucketParams) { + bucket.SetRate(ratelimit.MillitokensPerSecond(math.Ceil(params.Rate * 1000))) + bucket.SetCapacity(params.Capacity) +} + +// panics if incomingBufferSize is 0 or less +func (d *Demuxer) AddStream( + sid internaltypes.StreamID, + maxIncomingBufferedMessages int, + maxMessageSize int, + messagesLimit types.TokenBucketParams, + bytesLimit types.TokenBucketParams, +) bool { + d.mutex.Lock() + defer d.mutex.Unlock() + + if _, ok := d.streams[sid]; ok { + return false + } + + d.streams[sid] = &demuxerStream{ + ringbuffer.NewRingBuffer[stream2types.InboundBinaryMessage](maxIncomingBufferedMessages), + make(chan struct{}, 1), + maxMessageSize, + makeTokenBucket(messagesLimit), + makeTokenBucket(bytesLimit), + } + return true +} + +func (d *Demuxer) UpdateStream( + sid internaltypes.StreamID, + maxIncomingBufferedMessages int, + maxMessageSize int, + messagesLimit types.TokenBucketParams, + bytesLimit types.TokenBucketParams, +) bool { + + d.mutex.Lock() + defer d.mutex.Unlock() + + s, ok := d.streams[sid] + if !ok { + return false + } + + if maxIncomingBufferedMessages <= 0 { + return false + } + + s.buffer.SetCap(maxIncomingBufferedMessages) + s.maxMessageSize = maxMessageSize + updateTokenBucket(&s.messagesLimiter, messagesLimit) + updateTokenBucket(&s.bytesLimiter, bytesLimit) + + return true +} + +func (d *Demuxer) RemoveStream(sid internaltypes.StreamID) { + d.mutex.Lock() + defer d.mutex.Unlock() + + delete(d.streams, sid) + d.responseChecker.ClearPoliciesForStream(sid) +} + +func (d *Demuxer) SetPolicy(sid internaltypes.StreamID, rid internaltypes.RequestID, policy responselimit.ResponsePolicy) { + // reponseChecker.SetPolicy is threadsafe, no need to acquire d.mutex + d.responseChecker.SetPolicy(sid, rid, policy) +} + +func (d *Demuxer) ShouldPush(sid internaltypes.StreamID, size int) ShouldPushResult { + d.mutex.Lock() + defer d.mutex.Unlock() + + s, ok := d.streams[sid] + if !ok { + return ShouldPushResultUnknownStream + } + + if size > s.maxMessageSize { + return ShouldPushResultMessageTooBig + } + + messagesLimiterAllow := s.messagesLimiter.RemoveTokens(1) + bytesLimiterAllow := s.bytesLimiter.RemoveTokens(uint32(size)) + + if !messagesLimiterAllow { + return ShouldPushResultMessagesLimitExceeded + } + + if !bytesLimiterAllow { + return ShouldPushResultBytesLimitExceeded + } + + return ShouldPushResultYes +} + +func (d *Demuxer) ShouldPushResponse(sid internaltypes.StreamID, rid internaltypes.RequestID, size int) ShouldPushResult { + d.mutex.Lock() + defer d.mutex.Unlock() + + _, ok := d.streams[sid] + if !ok { + return ShouldPushResultUnknownStream + } + + checkResult := d.responseChecker.CheckResponse(sid, rid, size) + switch checkResult { + case responselimit.ResponseCheckResultReject: + return ShouldPushResultResponseRejected + case responselimit.ResponseCheckResultAllow: + return ShouldPushResultYes + } + + // The above switch should be exhaustive. If it is not the linter is expected to catch this. + panic(fmt.Sprintf("unexpected ragep2p.responseCheckResult: %#v", checkResult)) +} + +func (d *Demuxer) PushMessage(sid internaltypes.StreamID, msg stream2types.InboundBinaryMessage) PushResult { + d.mutex.Lock() + defer d.mutex.Unlock() + + s, ok := d.streams[sid] + if !ok { + return PushResultUnknownStream + } + + result := PushResultSuccess + if _, evicted := s.buffer.PushEvict(msg); evicted { + result = PushResultDropped + } + + select { + case s.chSignal <- struct{}{}: + default: + } + + return result +} + +// Pops a message from the underlying stream's buffer. +// Returns a non-nil value iff popResult == popResultSuccess. +func (d *Demuxer) PopMessage(sid internaltypes.StreamID) (stream2types.InboundBinaryMessage, PopResult) { + d.mutex.Lock() + defer d.mutex.Unlock() + + s, ok := d.streams[sid] + if !ok { + return nil, PopResultUnknownStream + } + + result, ok := s.buffer.Pop() + if !ok { + return nil, PopResultEmpty + } + + if !s.buffer.IsEmpty() { + select { + case s.chSignal <- struct{}{}: + default: + } + } + + return result, PopResultSuccess +} + +// The signals received via the returned channel are NOT a reliable indicator that the buffer is NON-empty. Depending on +// the exact interleaving of the goroutines (in particular, authenticatedConnectionReadLoop, and receiveLoop), a call +// to PopMessage() - after receiving a signal through the channel - may return (nil, popResultEmpty). +// +// Example execution timeline for a buffer size of 1: +// +// | authenticatedConnectionReadLoop buffer receiveLoop +// | [] +// | demux.PushMessage(m1) +// | [m1] +// | send signal to s.chSignal +// | signal received (case <-chSignalMaybePending triggers) +// | demux.PushMessage(m2), buffer +// | overflows and m1 is dropped +// | [m2] +// | demux.PopMessage() returns (m2, popResultSuccess) +// | [] +// | send signal to s.chSignal +// | signal received (case <-chSignalMaybePending triggers) +// | demux.PopMessage() returns (nil, popResultEmpty) +// ▼ +// time +func (d *Demuxer) SignalMaybePending(sid internaltypes.StreamID) <-chan struct{} { + d.mutex.Lock() + defer d.mutex.Unlock() + + s, ok := d.streams[sid] + if !ok { + return nil + } + + return s.chSignal +} diff --git a/ragep2p/ragep2pnew/internal/frame/frame.go b/ragep2p/ragep2pnew/internal/frame/frame.go new file mode 100644 index 00000000..730c1dad --- /dev/null +++ b/ragep2p/ragep2pnew/internal/frame/frame.go @@ -0,0 +1,321 @@ +package frame + +import ( + "encoding/binary" + "fmt" + + "github.com/smartcontractkit/libocr/ragep2p/ragep2pnew/internal/internaltypes" + "github.com/smartcontractkit/libocr/ragep2p/types" +) + +const ( + streamIDSize = internaltypes.StreamIDSize + requestIDSize = internaltypes.RequestIDSize +) + +var errMaxMessageSizeExceeded = fmt.Errorf("frame header error: the message size must not exceed %v bytes", types.MaxMessageLength) +var errOpenStreamPayloadSizeExceeded = fmt.Errorf( + "frame header error: the payload size specified in a 'OpenStream' frame header must be at most %v bytes", + types.MaxStreamNameLength, +) +var errCloseStreamPayloadSizeInvalid = fmt.Errorf("frame header error: the payload size specified in a 'CloseStream' frame header must be 0") +var errFrameHeaderSizeInvalid = fmt.Errorf("frame header error: invalid header length") +var errInvalidFrameType = fmt.Errorf("frame header error: invalid frame type") + +type FrameType byte + +const ( + _ FrameType = iota + FrameTypeOpenStream + FrameTypeCloseStream + FrameTypeMessagePlain + FrameTypeMessageRequest + FrameTypeMessageResponse +) + +//go-sumtype:decl FrameHeader + +type FrameHeader interface { + isFrameHeader() + + GetType() FrameType + GetPayloadSize() int + GetStreamID() internaltypes.StreamID + + Encode() []byte +} + +const ( + baseFrameHeaderSize = 1 + 32 + 4 + openStreamFrameHeaderSize = baseFrameHeaderSize + closeStreamFrameHeaderSize = baseFrameHeaderSize + MaxControlFrameHeaderSize = baseFrameHeaderSize // maximum size of a control frame (open/close) header + messagePlainFrameHeaderSize = baseFrameHeaderSize + messageRequestFrameHeaderSize = baseFrameHeaderSize + requestIDSize + messageResponseFrameHeaderSize = baseFrameHeaderSize + requestIDSize + MaxFrameHeaderSize = baseFrameHeaderSize + requestIDSize // whatever value is the largest header size from above +) + +var frameHeaderSizes = map[FrameType]int{ + FrameTypeOpenStream: openStreamFrameHeaderSize, + FrameTypeCloseStream: closeStreamFrameHeaderSize, + FrameTypeMessagePlain: messagePlainFrameHeaderSize, + FrameTypeMessageRequest: messageRequestFrameHeaderSize, + FrameTypeMessageResponse: messageResponseFrameHeaderSize, +} + +// The different frame header types must be wire-compatible with the previously used frame header structure. +// See encodeBaseFrameHeader(...) and decodeBaseFrameHeader(). +// +// type frameHeader struct { +// Type frameType +// StreamID streamID +// PayloadSize uint32 +// } + +type OpenStreamFrameHeader struct { + StreamID internaltypes.StreamID + PayloadSize int +} + +type CloseStreamFrameHeader struct { + StreamID internaltypes.StreamID +} + +type MessagePlainFrameHeader struct { + StreamID internaltypes.StreamID + PayloadSize int +} + +type MessageRequestFrameHeader struct { + StreamID internaltypes.StreamID + PayloadSize int + RequestID internaltypes.RequestID +} + +type MessageResponseFrameHeader struct { + StreamID internaltypes.StreamID + PayloadSize int + RequestID internaltypes.RequestID +} + +func (OpenStreamFrameHeader) isFrameHeader() {} +func (CloseStreamFrameHeader) isFrameHeader() {} +func (MessagePlainFrameHeader) isFrameHeader() {} +func (MessageRequestFrameHeader) isFrameHeader() {} +func (MessageResponseFrameHeader) isFrameHeader() {} + +func (OpenStreamFrameHeader) GetType() FrameType { + return FrameTypeOpenStream +} +func (CloseStreamFrameHeader) GetType() FrameType { + return FrameTypeCloseStream +} +func (MessagePlainFrameHeader) GetType() FrameType { + return FrameTypeMessagePlain +} +func (MessageRequestFrameHeader) GetType() FrameType { + return FrameTypeMessageRequest +} +func (MessageResponseFrameHeader) GetType() FrameType { + return FrameTypeMessageResponse +} + +func (h OpenStreamFrameHeader) GetPayloadSize() int { + return h.PayloadSize +} +func (h CloseStreamFrameHeader) GetPayloadSize() int { + return 0 +} +func (h MessagePlainFrameHeader) GetPayloadSize() int { + return h.PayloadSize +} +func (h MessageRequestFrameHeader) GetPayloadSize() int { + return h.PayloadSize +} +func (h MessageResponseFrameHeader) GetPayloadSize() int { + return h.PayloadSize +} + +func (h OpenStreamFrameHeader) GetStreamID() internaltypes.StreamID { + return h.StreamID +} +func (h CloseStreamFrameHeader) GetStreamID() internaltypes.StreamID { + return h.StreamID +} +func (h MessagePlainFrameHeader) GetStreamID() internaltypes.StreamID { + return h.StreamID +} +func (h MessageRequestFrameHeader) GetStreamID() internaltypes.StreamID { + return h.StreamID +} +func (h MessageResponseFrameHeader) GetStreamID() internaltypes.StreamID { + return h.StreamID +} + +func encodeBaseFrameHeader(frameType FrameType, streamID internaltypes.StreamID, payloadSize int, extraBufferCapacity int) []byte { + buffer := make([]byte, 0, baseFrameHeaderSize+extraBufferCapacity) + buffer = append(buffer, byte(frameType)) + buffer = append(buffer, streamID[:]...) + buffer = binary.BigEndian.AppendUint32(buffer, uint32(payloadSize)) + return buffer +} + +func decodeBaseFrameHeader(encoded []byte, expectedType FrameType, expectedSize int) (internaltypes.StreamID, int, error) { + var streamID internaltypes.StreamID + + if len(encoded) != expectedSize { + return internaltypes.StreamID{}, 0, errFrameHeaderSizeInvalid + } + if FrameType(encoded[0]) != expectedType { + return internaltypes.StreamID{}, 0, errInvalidFrameType + } + + payloadSize := binary.BigEndian.Uint32(encoded[1+streamIDSize:]) + if payloadSize > types.MaxMessageLength { + return internaltypes.StreamID{}, 0, errMaxMessageSizeExceeded + } + + copy(streamID[:], encoded[1:streamIDSize+1]) + + return streamID, int(payloadSize), nil +} + +func (h OpenStreamFrameHeader) Encode() []byte { + return encodeBaseFrameHeader(FrameTypeOpenStream, h.StreamID, h.PayloadSize, 0) +} + +func (h CloseStreamFrameHeader) Encode() []byte { + return encodeBaseFrameHeader(FrameTypeCloseStream, h.StreamID, 0, 0) +} + +func (h MessagePlainFrameHeader) Encode() []byte { + return encodeBaseFrameHeader(FrameTypeMessagePlain, h.StreamID, h.PayloadSize, 0) +} + +func (h MessageRequestFrameHeader) Encode() []byte { + buffer := encodeBaseFrameHeader(FrameTypeMessageRequest, h.StreamID, h.PayloadSize, requestIDSize) + buffer = append(buffer, h.RequestID[:]...) + return buffer +} + +func (h MessageResponseFrameHeader) Encode() []byte { + buffer := encodeBaseFrameHeader(FrameTypeMessageResponse, h.StreamID, h.PayloadSize, requestIDSize) + buffer = append(buffer, h.RequestID[:]...) + return buffer +} + +func decodeFrameHeader(encoded []byte) (FrameHeader, error) { + if len(encoded) == 0 { + return nil, errFrameHeaderSizeInvalid + } + + switch FrameType(encoded[0]) { + case FrameTypeOpenStream: + return decodeOpenStreamFrameHeader(encoded) + case FrameTypeCloseStream: + return decodeCloseStreamFrameHeader(encoded) + case FrameTypeMessagePlain: + return decodeMessagePlainFrameHeader(encoded) + case FrameTypeMessageRequest: + return decodeMessageRequestFrameHeader(encoded) + case FrameTypeMessageResponse: + return decodeMessageResponseFrameHeader(encoded) + default: + return nil, errInvalidFrameType + } +} + +func decodeOpenStreamFrameHeader(encoded []byte) (OpenStreamFrameHeader, error) { + streamID, payloadSize, err := decodeBaseFrameHeader(encoded, FrameTypeOpenStream, openStreamFrameHeaderSize) + if err != nil { + return OpenStreamFrameHeader{}, err + } + if payloadSize > types.MaxStreamNameLength { + return OpenStreamFrameHeader{}, errOpenStreamPayloadSizeExceeded + } + + return OpenStreamFrameHeader{streamID, payloadSize}, nil +} + +func decodeCloseStreamFrameHeader(encoded []byte) (CloseStreamFrameHeader, error) { + streamID, payloadSize, err := decodeBaseFrameHeader(encoded, FrameTypeCloseStream, closeStreamFrameHeaderSize) + if err != nil { + return CloseStreamFrameHeader{}, err + } + if payloadSize != 0 { + return CloseStreamFrameHeader{}, errCloseStreamPayloadSizeInvalid + } + + return CloseStreamFrameHeader{streamID}, nil +} + +func decodeMessagePlainFrameHeader(encoded []byte) (MessagePlainFrameHeader, error) { + streamID, payloadSize, err := decodeBaseFrameHeader(encoded, FrameTypeMessagePlain, messagePlainFrameHeaderSize) + if err != nil { + return MessagePlainFrameHeader{}, err + } + return MessagePlainFrameHeader{streamID, payloadSize}, err +} + +func decodeMessageRequestFrameHeader(encoded []byte) (MessageRequestFrameHeader, error) { + var requestID internaltypes.RequestID + streamID, payloadSize, err := decodeBaseFrameHeader(encoded, FrameTypeMessageRequest, messageRequestFrameHeaderSize) + if err != nil { + return MessageRequestFrameHeader{}, err + } + + copy(requestID[:], encoded[baseFrameHeaderSize:]) + return MessageRequestFrameHeader{streamID, payloadSize, requestID}, nil +} + +func decodeMessageResponseFrameHeader(encoded []byte) (MessageResponseFrameHeader, error) { + var requestID internaltypes.RequestID + streamID, payloadSize, err := decodeBaseFrameHeader(encoded, FrameTypeMessageResponse, messageResponseFrameHeaderSize) + if err != nil { + return MessageResponseFrameHeader{}, err + } + + copy(requestID[:], encoded[baseFrameHeaderSize:]) + return MessageResponseFrameHeader{streamID, payloadSize, requestID}, nil +} + +var ErrReadFrameHeaderReadFailed = fmt.Errorf("failed to read frame header") + +type FrameHeaderReader struct { + readFn func(buf []byte) bool + buf []byte +} + +// readFn must return false if it wasn't able to completely fill the buffer. +// readFn is assumed to be stateful and remember what was read already. +func MakeFrameHeaderReader(readFn func(buf []byte) bool) FrameHeaderReader { + return FrameHeaderReader{readFn, make([]byte, MaxFrameHeaderSize)} +} + +func (r *FrameHeaderReader) ReadFrameHeader() (header FrameHeader, err error) { + // Read frame type. + if !r.readFn(r.buf[:1]) { + return nil, ErrReadFrameHeaderReadFailed + } + + // Get the length of the frame header for the given type. Abort if the type is invalid. + frameType := FrameType(r.buf[0]) + headerSize, ok := frameHeaderSizes[frameType] + if !ok { + return nil, fmt.Errorf("invalid frame type: %d", frameType) + } + + // Read the rest of the frame header. + if !r.readFn(r.buf[1:headerSize]) { + return nil, ErrReadFrameHeaderReadFailed + } + + // Decode the frame header. + header, err = decodeFrameHeader(r.buf[:headerSize]) + if err != nil { + return nil, fmt.Errorf("%w caused by raw frame header %x", err, r.buf[:headerSize]) + } + + return header, nil +} diff --git a/ragep2p/ragep2pnew/internal/internaltypes/internaltypes.go b/ragep2p/ragep2pnew/internal/internaltypes/internaltypes.go new file mode 100644 index 00000000..3cc77c8a --- /dev/null +++ b/ragep2p/ragep2pnew/internal/internaltypes/internaltypes.go @@ -0,0 +1,54 @@ +package internaltypes + +import ( + "bytes" + "crypto/rand" + "crypto/sha256" + "encoding/hex" + "fmt" + + "github.com/smartcontractkit/libocr/ragep2p/types" +) + +const ( + StreamIDSize = 32 + RequestIDSize = 32 +) + +type StreamID [StreamIDSize]byte +type RequestID [RequestIDSize]byte + +var _ fmt.Stringer = StreamID{} +var _ fmt.Stringer = RequestID{} + +func (s StreamID) String() string { + return hex.EncodeToString(s[:]) +} + +func (r RequestID) String() string { + return hex.EncodeToString(r[:]) +} + +func MakeStreamID(self types.PeerID, other types.PeerID, name string) StreamID { + if bytes.Compare(self[:], other[:]) < 0 { + return MakeStreamID(other, self, name) + } + + h := sha256.New() + _, _ = h.Write(self[:]) + _, _ = h.Write(other[:]) + // this is fine because self and other are of constant length. if more than + // one variable length item is ever added here, we should also hash lengths + // to prevent collisions. + _, _ = h.Write([]byte(name)) + + var result StreamID + _ = h.Sum(result[:0]) + return result +} + +func MakeRandomRequestID() (RequestID, error) { + requestID := RequestID{} + _, err := rand.Read(requestID[:]) + return requestID, err +} diff --git a/ragep2p/ragep2pnew/internal/muxer/muxer.go b/ragep2p/ragep2pnew/internal/muxer/muxer.go new file mode 100644 index 00000000..612cfc2d --- /dev/null +++ b/ragep2p/ragep2pnew/internal/muxer/muxer.go @@ -0,0 +1,272 @@ +package muxer + +import ( + "fmt" + "sync" + + "github.com/smartcontractkit/libocr/commontypes" + "github.com/smartcontractkit/libocr/internal/loghelper" + "github.com/smartcontractkit/libocr/internal/randmap" + "github.com/smartcontractkit/libocr/internal/ringbuffer" + "github.com/smartcontractkit/libocr/ragep2p/ragep2pnew/internal/internaltypes" + "github.com/smartcontractkit/libocr/ragep2p/ragep2pnew/internal/stream2types" +) + +const invertPrioritiesEvery = 8 + +type streamRecord struct { + streamName string + priority stream2types.StreamPriority + enabled bool + messageBuffer *ringbuffer.RingBuffer[stream2types.OutboundBinaryMessage] +} + +type Muxer struct { + logger loghelper.LoggerWithContext + + chSignal chan struct{} + + mutex sync.Mutex + streamRecords map[internaltypes.StreamID]*streamRecord + defaultPriorityStreamsWithPendingMessage *randmap.Map[internaltypes.StreamID, *ringbuffer.RingBuffer[stream2types.OutboundBinaryMessage]] + lowPriorityStreamsWithPendingMessage *randmap.Map[internaltypes.StreamID, *ringbuffer.RingBuffer[stream2types.OutboundBinaryMessage]] + popCount uint +} + +func NewMuxer(logger loghelper.LoggerWithContext) *Muxer { + return &Muxer{ + logger, + + make(chan struct{}, 1), + sync.Mutex{}, + map[internaltypes.StreamID]*streamRecord{}, + randmap.NewMap[internaltypes.StreamID, *ringbuffer.RingBuffer[stream2types.OutboundBinaryMessage]](), + randmap.NewMap[internaltypes.StreamID, *ringbuffer.RingBuffer[stream2types.OutboundBinaryMessage]](), + + 0, + } +} + +// Adds a stream to the Muxer. The stream is initially disabled. +// +// panics if maxOutgoingBufferedMessages is 0 or less +func (mux *Muxer) AddStream( + sid internaltypes.StreamID, + streamName string, + priority stream2types.StreamPriority, + maxOutgoingBufferedMessages int, +) bool { + mux.mutex.Lock() + defer mux.mutex.Unlock() + + if _, exists := mux.streamRecords[sid]; exists { + return false + } + + mux.streamRecords[sid] = &streamRecord{ + streamName, + priority, + false, + ringbuffer.NewRingBuffer[stream2types.OutboundBinaryMessage](maxOutgoingBufferedMessages), + } + + mux.logger.Debug("Muxer: stream added", commontypes.LogFields{ + "sid": sid, + "streamName": streamName, + "priority": priority, + "maxOutgoingBufferedMessages": maxOutgoingBufferedMessages, + }) + return true +} + +// panics if maxOutgoingBufferedMessages is 0 or less +func (mux *Muxer) UpdateStream(sid internaltypes.StreamID, maxOutgoingBufferedMessages int) bool { + mux.mutex.Lock() + defer mux.mutex.Unlock() + + streamRecord, ok := mux.streamRecords[sid] + if !ok { + return false + } + + streamRecord.messageBuffer.SetCap(maxOutgoingBufferedMessages) + return true +} + +func (mux *Muxer) RemoveStream(sid internaltypes.StreamID) bool { + mux.mutex.Lock() + defer mux.mutex.Unlock() + + streamRecord, ok := mux.streamRecords[sid] + if !ok { + return false + } + + streamsWithPendingMessage := mux.streamsWithPendingMessageForPriority(streamRecord.priority) + streamsWithPendingMessage.Delete(sid) + + delete(mux.streamRecords, sid) + + mux.logger.Debug("Muxer: stream removed", commontypes.LogFields{ + "sid": sid, + }) + return true +} + +// Enables a stream to emit messages via Pop(). +func (mux *Muxer) EnableStream(sid internaltypes.StreamID) bool { + mux.mutex.Lock() + defer mux.mutex.Unlock() + + streamRecord, ok := mux.streamRecords[sid] + if !ok { + return false + } + + streamRecord.enabled = true + + if streamRecord.messageBuffer.IsEmpty() { + return true + } + + streamsWithPendingMessage := mux.streamsWithPendingMessageForPriority(streamRecord.priority) + streamsWithPendingMessage.Set(sid, streamRecord.messageBuffer) + + select { + case mux.chSignal <- struct{}{}: + default: + } + + mux.logger.Debug("Muxer: stream enabled", commontypes.LogFields{ + "sid": sid, + }) + return true +} + +// Disables a stream from emitting messages via Pop(). Messages can +// still be pushed to the stream's buffer via PushEvict() and will +// be emitted once the stream is enabled again. +func (mux *Muxer) DisableStream(sid internaltypes.StreamID) bool { + mux.mutex.Lock() + defer mux.mutex.Unlock() + + streamRecord, ok := mux.streamRecords[sid] + if !ok { + return false + } + + streamRecord.enabled = false + + streamsWithPendingMessage := mux.streamsWithPendingMessageForPriority(streamRecord.priority) + streamsWithPendingMessage.Delete(sid) + + mux.logger.Debug("Muxer: stream disabled", commontypes.LogFields{ + "sid": sid, + }) + return true +} + +// Pushes a message to the stream's ring buffer of messages and evicts the oldest message if the buffer is full. +func (mux *Muxer) PushEvict(sid internaltypes.StreamID, m stream2types.OutboundBinaryMessage) bool { + mux.mutex.Lock() + defer mux.mutex.Unlock() + + streamRecord, ok := mux.streamRecords[sid] + if !ok { + return false + } + + streamRecord.messageBuffer.PushEvict(m) + + if streamRecord.enabled { + streamsWithPendingMessage := mux.streamsWithPendingMessageForPriority(streamRecord.priority) + streamsWithPendingMessage.Set(sid, streamRecord.messageBuffer) + } + + select { + case mux.chSignal <- struct{}{}: + default: + } + + return true +} + +// Signals that there are (maybe) messages available via Pop(). Note that unlike +// its counterpart in the demuxer, Pop() will not raise the flag again. A consumer +// of this signal is assumed to keep Pop()-ing until there are no more messages. +func (mux *Muxer) SignalMaybePending() <-chan struct{} { + return mux.chSignal +} + +func (mux *Muxer) streamsWithPendingMessageForPriority(priority stream2types.StreamPriority) *randmap.Map[internaltypes.StreamID, *ringbuffer.RingBuffer[stream2types.OutboundBinaryMessage]] { + switch priority { + case stream2types.StreamPriorityDefault: + return mux.defaultPriorityStreamsWithPendingMessage + case stream2types.StreamPriorityLow: + return mux.lowPriorityStreamsWithPendingMessage + } + panic(fmt.Sprintf("unexpected stream priority: %#v", priority)) +} + +func (mux *Muxer) popForPriority(priority stream2types.StreamPriority) (stream2types.OutboundBinaryMessage, internaltypes.StreamID) { + streamsWithPendingMessage := mux.streamsWithPendingMessageForPriority(priority) + + if streamsWithPendingMessage.Size() == 0 { + return nil, internaltypes.StreamID{} + } + + entry, ok := streamsWithPendingMessage.GetRandom() + if !ok { + return nil, internaltypes.StreamID{} + } + sid, messageBuffer := entry.Key, entry.Value + + msg, ok := messageBuffer.Pop() + if !ok { + mux.logger.Error("Muxer: message buffer is unexpectedly empty, this points to a bug", commontypes.LogFields{ + "sid": sid, + }) + return nil, internaltypes.StreamID{} + } + + if messageBuffer.IsEmpty() { + streamsWithPendingMessage.Delete(sid) + } + + return msg, sid +} + +// If any stream is enabled and has messages buffered, one of these will be returned. +// The policy for choosing which message to return is an implementation detail and may change. +// For now, we implement a simple policy that prefers DefaultPriority messages most +// of the time, and LowPriority messages once in a while. If there are multiple enabled streams +// of equal priority with buffered messages, we choose one uniformly at random. +func (mux *Muxer) Pop() (stream2types.OutboundBinaryMessage, internaltypes.StreamID) { + mux.mutex.Lock() + defer mux.mutex.Unlock() + + // Overflow is harmless + mux.popCount++ + + var highPriority, lowPriority stream2types.StreamPriority + if mux.popCount%invertPrioritiesEvery == 0 { + highPriority = stream2types.StreamPriorityLow + lowPriority = stream2types.StreamPriorityDefault + } else { + highPriority = stream2types.StreamPriorityDefault + lowPriority = stream2types.StreamPriorityLow + } + + msg, sid := mux.popForPriority(highPriority) + if msg != nil { + return msg, sid + } + + msg, sid = mux.popForPriority(lowPriority) + if msg != nil { + return msg, sid + } + + mux.logger.Info("Muxer: nothing to pop", commontypes.LogFields{}) + return nil, internaltypes.StreamID{} +} diff --git a/ragep2p/ragep2pnew/internal/overheadawareconn/overhead_aware_conn.go b/ragep2p/ragep2pnew/internal/overheadawareconn/overhead_aware_conn.go new file mode 100644 index 00000000..91d147cc --- /dev/null +++ b/ragep2p/ragep2pnew/internal/overheadawareconn/overhead_aware_conn.go @@ -0,0 +1,111 @@ +package overheadawareconn + +import ( + "fmt" + "net" + + "github.com/prometheus/client_golang/prometheus" +) + +// OverheadAwareConn keeps track of TLS overhead (vs application data consumed by ragep2p) +// and enables us to kill the connection is the overhead is suspiciously high. +// This is to defend against an adversary causing undue overhead by e.g. fragmenting their TLS +// records into tiny pieces and other similar shenanigans. +// +// Note that the overhead computation is specific to ragep2p's use of TLS. This is not useful +// for other applications. +// +// Not thread-safe, except for functions exposed directly from the embedded net.Conn +// such as Close() and SetDeadline(). +type OverheadAwareConn struct { + net.Conn + readBytesTotal prometheus.Counter + writtenBytesTotal prometheus.Counter + setupComplete bool + readPostSetupRawBytes int64 // raw = including TLS record & encrypption overhead + deliveredApplicationDataBytes int64 +} + +var _ net.Conn = (*OverheadAwareConn)(nil) + +func NewOverheadAwareConn( + conn net.Conn, + readBytesTotal prometheus.Counter, + writtenBytesTotal prometheus.Counter, +) *OverheadAwareConn { + return &OverheadAwareConn{ + conn, + readBytesTotal, + writtenBytesTotal, + + false, + 0, + 0, + } +} + +func (r *OverheadAwareConn) SetupComplete() { + r.setupComplete = true +} + +func (r *OverheadAwareConn) Read(b []byte) (n int, err error) { + n, err = r.Conn.Read(b) + r.readBytesTotal.Add(float64(n)) + + if r.setupComplete { + r.readPostSetupRawBytes += int64(n) + } + + return n, err +} + +func (r *OverheadAwareConn) Write(b []byte) (n int, err error) { + n, err = r.Conn.Write(b) + r.writtenBytesTotal.Add(float64(n)) + return n, err +} + +// Whenever ragep2p reads a message, that message starts with a frame header. +// The frame header's size is at least 37 bytes. It will always fit into a single TLS record. +// The per-record overhead of TLS 1.3 in our configuration is 22 bytes. +// So even for a worst-case scenario of the user only sending empty ragep2p messages, +// we should only have a factor of (37+22)/37 ≈ 1.6. We conservatively round up to 2. +// +// Intuitively, it is obvious that the overhead will go down for larger message, but let's make +// this a bit more concrete: +// - If frame header size + payload size is less than 2048 bytes, ragep2p will coalesce the write +// into a single TLS record, giving us a factor of (2048+22)/2048 ≈ 1.01 +// - Otherwise, ragep2p will send separate records for the frame header and the payload, giving us +// a factor of (2049 + 2*22)/2049 ≈ 1.02 or smaller. +// - Once messages reach the maximum TLS record size of 16348 bytes, they need to be fragmented. +// Again, application data is so large at this point that the record overhead is negligible: +// (37 + 16349 + 3*22)/(37 + 16349) ≈ 1.005 +const MaximumAllowedApplicationDataToRawFactor = 2 + +// Golang's TLS implementation performs some read-ahead buffering, e.g. when buffering an incomplete +// record. (Decryption can only take place once the whole record has been read.) +// Let's *generously* overestimate how much. +const GenerouslyOverstimatedTLSReadAhead = 100 * 1024 + +// AddDeliveredApplicationDataBytes tells the OverheadAwareConn how many bytes of application data +// have been delivered to the user and returns an error if it seems that the overhead +// on the underlying net.Conn is too large. +func (r *OverheadAwareConn) AddDeliveredApplicationDataBytes(bytes int) error { + if bytes < 0 { + return fmt.Errorf("bytes must be non-negative") + } + + r.deliveredApplicationDataBytes += int64(bytes) + + readPostSetupRawBytesAttributableToDeliveredApplicationData := r.readPostSetupRawBytes - GenerouslyOverstimatedTLSReadAhead + + if r.deliveredApplicationDataBytes*2 < readPostSetupRawBytesAttributableToDeliveredApplicationData { + return fmt.Errorf("inbound read overhead on underlying TCP connection is too large, suspecting shenanigans: deliveredApplicationDataBytes=%d, readPostSetupRawBytes=%d, generouslyOverstimatedTLSReadAhead=%d", + r.deliveredApplicationDataBytes, + r.readPostSetupRawBytes, + GenerouslyOverstimatedTLSReadAhead, + ) + } + + return nil +} diff --git a/ragep2p/ragep2pnew/internal/ratelimit/ratelimit.go b/ragep2p/ragep2pnew/internal/ratelimit/ratelimit.go new file mode 100644 index 00000000..31d21db8 --- /dev/null +++ b/ragep2p/ragep2pnew/internal/ratelimit/ratelimit.go @@ -0,0 +1,115 @@ +package ratelimit + +import ( + "math" + "time" +) + +type MillitokensPerSecond uint64 + +// A token bucket rate limiter. Can store at most UINT32_MAX tokens before it +// saturates. +// The zero TokenBucket{} is a valid value. +// +// NOT thread-safe +type TokenBucket struct { + rate MillitokensPerSecond + capacity uint32 + + nanotokens uint64 + updated time.Time +} + +func NewTokenBucket(rate MillitokensPerSecond, capacity uint32, full bool) *TokenBucket { + tb := &TokenBucket{} + tb.SetRate(rate) + tb.SetCapacity(capacity) + if full { + tb.AddTokens(capacity) + } + return tb +} + +func (tb *TokenBucket) update(now time.Time) { + if now.Before(tb.updated) { // we assume that time moves forward monotonically + now = tb.updated + } + + // We round down the time difference to the nearest microsecond + // and store the remainder in the updated timestamp + timeDiff := now.Sub(tb.updated) + timeDiffMicroseconds := uint64(timeDiff / time.Microsecond) + timeDiffRemainder := timeDiff % time.Microsecond + tb.updated = now.Add(-timeDiffRemainder) + + // millitokens per second x microseconds yields nanotokens + nanotokensDiff := timeDiffMicroseconds * uint64(tb.rate) + if timeDiffMicroseconds != 0 && nanotokensDiff/uint64(timeDiffMicroseconds) != uint64(tb.rate) { + // multiplication overflow + tb.nanotokens = math.MaxUint64 + } else { + newNanotokens := tb.nanotokens + nanotokensDiff + if newNanotokens < tb.nanotokens { + // addition overflow + tb.nanotokens = math.MaxUint64 + } else { + tb.nanotokens = newNanotokens + } + } + + capacityNanotokens := uint64(tb.capacity) * 1_000_000_000 + if tb.nanotokens > capacityNanotokens { + tb.nanotokens = capacityNanotokens + } +} + +// Adds n tokens to the bucket. +func (tb *TokenBucket) AddTokens(n uint32) { + newNanotokens := tb.nanotokens + uint64(n)*1_000_000_000 + if newNanotokens < tb.nanotokens { + // addition overflow + tb.nanotokens = math.MaxUint64 + } else { + tb.nanotokens = newNanotokens + } +} + +func (tb *TokenBucket) removeTokens(now time.Time, n uint32) bool { + tb.update(now) + if tb.nanotokens >= uint64(n)*1_000_000_000 { + tb.nanotokens -= uint64(n) * 1_000_000_000 + return true + } else { + tb.nanotokens = 0 + return false + } +} + +// Removes n tokens from the bucket. If the bucket contained at least n tokens, +// return true. Otherwise, returns false and sets bucket to contain zero tokens. +func (tb *TokenBucket) RemoveTokens(n uint32) bool { + return tb.removeTokens(time.Now(), n) +} + +func (tb *TokenBucket) setRate(now time.Time, rate MillitokensPerSecond) { + tb.update(now) + tb.rate = rate +} + +// Sets the rate at which the bucket fills with tokens. +func (tb *TokenBucket) SetRate(rate MillitokensPerSecond) { + tb.setRate(time.Now(), rate) +} + +func (tb *TokenBucket) Rate() MillitokensPerSecond { + return tb.rate +} + +// Sets the bucket's capacity. +func (tb *TokenBucket) SetCapacity(capacity uint32) { + tb.capacity = capacity +} + +func (tb *TokenBucket) Capacity() uint32 { + return tb.capacity +} diff --git a/ragep2p/ragep2pnew/internal/ratelimitaggregator/aggregator.go b/ragep2p/ragep2pnew/internal/ratelimitaggregator/aggregator.go new file mode 100644 index 00000000..dc1b6bad --- /dev/null +++ b/ragep2p/ragep2pnew/internal/ratelimitaggregator/aggregator.go @@ -0,0 +1,94 @@ +package ratelimitaggregator + +import ( + "sync" + + "github.com/smartcontractkit/libocr/commontypes" + "github.com/smartcontractkit/libocr/internal/loghelper" + "github.com/smartcontractkit/libocr/ragep2p/types" +) + +// Aggregator aggregates the rate limits of all streams of a peer for o11y +// and sanity checking purposes. +type Aggregator struct { + logger loghelper.LoggerWithContext + + mutex sync.Mutex + messagesTokenBucketAggregate TokenBucketAggregate + bytesTokenBucketAggregate TokenBucketAggregate +} + +type TokenBucketAggregate struct { + Rate float64 + Capacity float64 +} + +func NewAggregator(logger loghelper.LoggerWithContext) *Aggregator { + return &Aggregator{ + logger, + + sync.Mutex{}, + TokenBucketAggregate{}, + TokenBucketAggregate{}, + } +} + +// These are so large that nobody sane should hit them in practice. +const ( + warnThresholdMessagesRate = 10_000 + warnThresholdMessagesCapacity = 5 * warnThresholdMessagesRate + warnThresholdBytesRate = 1_024 * 1_024 * 1_024 + warnThresholdBytesCapacity = 5 * warnThresholdBytesRate +) + +func (crl *Aggregator) AddStream(messagesLimit types.TokenBucketParams, bytesLimit types.TokenBucketParams) { + crl.mutex.Lock() + defer crl.mutex.Unlock() + + crl.messagesTokenBucketAggregate.Rate += messagesLimit.Rate + crl.messagesTokenBucketAggregate.Capacity += float64(messagesLimit.Capacity) + crl.bytesTokenBucketAggregate.Rate += bytesLimit.Rate + crl.bytesTokenBucketAggregate.Capacity += float64(bytesLimit.Capacity) + + if crl.messagesTokenBucketAggregate.Rate > warnThresholdMessagesRate { + crl.logger.Warn("aggregate messages rate exceeds warning threshold, this likely points to buggy configuration of some ragep2p Stream", commontypes.LogFields{ + "messagesRate": crl.messagesTokenBucketAggregate.Rate, + "threshold": warnThresholdMessagesRate, + }) + } + if crl.messagesTokenBucketAggregate.Capacity > warnThresholdMessagesCapacity { + crl.logger.Warn("aggregate messages capacity exceeds warning threshold, this likely points to buggy configuration of some ragep2p Stream", commontypes.LogFields{ + "messagesCapacity": crl.messagesTokenBucketAggregate.Capacity, + "threshold": warnThresholdMessagesCapacity, + }) + } + if crl.bytesTokenBucketAggregate.Rate > warnThresholdBytesRate { + crl.logger.Warn("aggregate bytes rate exceeds warning threshold, this likely points to buggy configuration of some ragep2p Stream", commontypes.LogFields{ + "bytesRate": crl.bytesTokenBucketAggregate.Rate, + "threshold": warnThresholdBytesRate, + }) + } + if crl.bytesTokenBucketAggregate.Capacity > warnThresholdBytesCapacity { + crl.logger.Warn("aggregate bytes capacity exceeds warning threshold, this likely points to buggy configuration of some ragep2p Stream", commontypes.LogFields{ + "bytesCapacity": crl.bytesTokenBucketAggregate.Capacity, + "threshold": warnThresholdBytesCapacity, + }) + } +} + +func (crl *Aggregator) RemoveStream(messagesLimit types.TokenBucketParams, bytesLimit types.TokenBucketParams) { + crl.mutex.Lock() + defer crl.mutex.Unlock() + + crl.messagesTokenBucketAggregate.Rate -= messagesLimit.Rate + crl.messagesTokenBucketAggregate.Capacity -= float64(messagesLimit.Capacity) + crl.bytesTokenBucketAggregate.Rate -= bytesLimit.Rate + crl.bytesTokenBucketAggregate.Capacity -= float64(bytesLimit.Capacity) +} + +func (crl *Aggregator) Aggregates() (messages TokenBucketAggregate, bytes TokenBucketAggregate) { + crl.mutex.Lock() + defer crl.mutex.Unlock() + + return crl.messagesTokenBucketAggregate, crl.bytesTokenBucketAggregate +} diff --git a/networking/internal/ocrendpointv3/responselimit/checker.go b/ragep2p/ragep2pnew/internal/responselimit/checker.go similarity index 88% rename from networking/internal/ocrendpointv3/responselimit/checker.go rename to ragep2p/ragep2pnew/internal/responselimit/checker.go index 9ece7032..b9967506 100644 --- a/networking/internal/ocrendpointv3/responselimit/checker.go +++ b/ragep2p/ragep2pnew/internal/responselimit/checker.go @@ -5,7 +5,7 @@ import ( "sync" "time" - "github.com/smartcontractkit/libocr/networking/internal/ocrendpointv3/types" + "github.com/smartcontractkit/libocr/ragep2p/ragep2pnew/internal/internaltypes" ) type ResponseCheckResult byte @@ -30,7 +30,7 @@ const ( type responseCheckerMapEntry struct { index int policy ResponsePolicy - streamID types.StreamID + streamID internaltypes.StreamID } // Data structure for keeping track of open requests until a set expiry date. @@ -42,16 +42,16 @@ type responseCheckerMapEntry struct { // SetPolicy(...) and CheckResponse(...) are O(1) operations. type ResponseChecker struct { mutex sync.Mutex - rids []types.RequestID - policies map[types.RequestID]responseCheckerMapEntry + rids []internaltypes.RequestID + policies map[internaltypes.RequestID]responseCheckerMapEntry rng *rand.Rand } func NewResponseChecker() *ResponseChecker { return &ResponseChecker{ sync.Mutex{}, - make([]types.RequestID, 0), - make(map[types.RequestID]responseCheckerMapEntry), + make([]internaltypes.RequestID, 0), + make(map[internaltypes.RequestID]responseCheckerMapEntry), rand.New(rand.NewSource(time.Now().UnixNano())), } } @@ -59,7 +59,7 @@ func NewResponseChecker() *ResponseChecker { // Sets the policy for a given (fresh) request ID. After setting the policy, calling Pop(...) for the same ID before the // policy expires returns the policy Set with this function. If a policy with the provided ID is already present, it // will be overwritten. -func (c *ResponseChecker) SetPolicy(sid types.StreamID, rid types.RequestID, policy ResponsePolicy) { +func (c *ResponseChecker) SetPolicy(sid internaltypes.StreamID, rid internaltypes.RequestID, policy ResponsePolicy) { c.mutex.Lock() defer c.mutex.Unlock() @@ -87,7 +87,7 @@ func (c *ResponseChecker) SetPolicy(sid types.StreamID, rid types.RequestID, pol // Lookup the policy for a given response and check if it should be allowed or rejected. // See responseCheckResult for additional documentation on the potential return values of this function. -func (c *ResponseChecker) CheckResponse(sid types.StreamID, rid types.RequestID, size int) ResponseCheckResult { +func (c *ResponseChecker) CheckResponse(sid internaltypes.StreamID, rid internaltypes.RequestID, size int) ResponseCheckResult { c.mutex.Lock() defer c.mutex.Unlock() @@ -118,7 +118,7 @@ func (c *ResponseChecker) CheckResponse(sid types.StreamID, rid types.RequestID, // Removes all currently tracked policies for the given stream ID. To ensure that responses sent to a stream cannot be // accepted after this stream is closed and reopened, this function is called when the Stream is closed (and removed // from the demuxer). -func (c *ResponseChecker) ClearPoliciesForStream(sid types.StreamID) { +func (c *ResponseChecker) ClearPoliciesForStream(sid internaltypes.StreamID) { c.mutex.Lock() defer c.mutex.Unlock() @@ -157,7 +157,7 @@ func (c *ResponseChecker) cleanupExpired() { } // Remove the policy for a given request ID from (1) the map of policies and (2) the list of request IDs. -func (c *ResponseChecker) removeEntry(id types.RequestID, index int) { +func (c *ResponseChecker) removeEntry(id internaltypes.RequestID, index int) { // Remove the entry from the map of polices. delete(c.policies, id) diff --git a/networking/internal/ocrendpointv3/responselimit/policies.go b/ragep2p/ragep2pnew/internal/responselimit/policies.go similarity index 88% rename from networking/internal/ocrendpointv3/responselimit/policies.go rename to ragep2p/ragep2pnew/internal/responselimit/policies.go index 6c5449e4..e6b6dd0f 100644 --- a/networking/internal/ocrendpointv3/responselimit/policies.go +++ b/ragep2p/ragep2pnew/internal/responselimit/policies.go @@ -3,9 +3,11 @@ package responselimit import ( "time" - "github.com/smartcontractkit/libocr/networking/internal/ocrendpointv3/types" + "github.com/smartcontractkit/libocr/ragep2p/ragep2pnew/internal/internaltypes" ) +//go-sumtype:decl ResponsePolicy + // Interface for specifying rate-limit exceptions for responses. // // When a request is made, a response policy is used to specify if a response (or in principle multiple responses) @@ -21,7 +23,7 @@ type ResponsePolicy interface { // Specifies whether a response for the given request ID should be allowed or rejected. // Before and after checkResponse(...) is called internally, a policy is always checked for expiry. // checkResponse(...) is never called on an expired policy. - checkResponse(requestID types.RequestID, responseSize int, responseTimestamp time.Time) ResponseCheckResult + checkResponse(requestID internaltypes.RequestID, responseSize int, responseTimestamp time.Time) ResponseCheckResult } var _ ResponsePolicy = &SingleUseSizedLimitedResponsePolicy{} @@ -39,7 +41,7 @@ func (p *SingleUseSizedLimitedResponsePolicy) isPolicyExpired(timestamp time.Tim } func (p *SingleUseSizedLimitedResponsePolicy) checkResponse( - requestID types.RequestID, + requestID internaltypes.RequestID, responseSize int, responseTimestamp time.Time, ) ResponseCheckResult { diff --git a/ragep2p/ragep2pnew/internal/stream2types/stream2types.go b/ragep2p/ragep2pnew/internal/stream2types/stream2types.go new file mode 100644 index 00000000..558541e4 --- /dev/null +++ b/ragep2p/ragep2pnew/internal/stream2types/stream2types.go @@ -0,0 +1,129 @@ +package stream2types + +import ( + "fmt" + + "github.com/smartcontractkit/libocr/ragep2p/ragep2pnew/internal/internaltypes" + "github.com/smartcontractkit/libocr/ragep2p/ragep2pnew/internal/responselimit" + "github.com/smartcontractkit/libocr/ragep2p/types" +) + +type StreamPriority byte + +const ( + _ StreamPriority = iota + StreamPriorityLow + StreamPriorityDefault +) + +type Stream2Limits struct { + MaxOutgoingBufferedMessages int // number of messages that fit in the outgoing buffer + MaxIncomingBufferedMessages int // number of messages that fit in the incoming buffer + MaxMessageLength int + MessagesLimit types.TokenBucketParams // rate limit for (the number of) incoming messages + BytesLimit types.TokenBucketParams // rate limit for (the accumulated size in bytes of) incoming messages +} + +func (limits Stream2Limits) Validate() (ValidatedStream2Limits, error) { + if !(limits.MaxOutgoingBufferedMessages > 0) { + return ValidatedStream2Limits{}, fmt.Errorf("maxOutgoingBufferedMessages %v is not positive", limits.MaxOutgoingBufferedMessages) + } + if !(limits.MaxIncomingBufferedMessages > 0) { + return ValidatedStream2Limits{}, fmt.Errorf("maxIncomingBufferedMessages %v is not positive", limits.MaxIncomingBufferedMessages) + } + if !(limits.MaxMessageLength <= types.MaxMessageLength) { + return ValidatedStream2Limits{}, fmt.Errorf("maxMessageLength %v is not less than or equal to global MaxMessageLength %v", limits.MaxMessageLength, types.MaxMessageLength) + } + if !(0 <= limits.MessagesLimit.Rate) { + return ValidatedStream2Limits{}, fmt.Errorf("messagesLimit.Rate %v is not non-negative", limits.MessagesLimit.Rate) + } + //lint:ignore SA4003 + if !(0 <= limits.MessagesLimit.Capacity) { //nolint:staticcheck + return ValidatedStream2Limits{}, fmt.Errorf("messagesLimit.Capacity %v is not non-negative", limits.MessagesLimit.Capacity) + } + if !(0 <= limits.BytesLimit.Rate) { + return ValidatedStream2Limits{}, fmt.Errorf("bytesLimit.Rate %v is not non-negative", limits.BytesLimit.Rate) + } + //lint:ignore SA4003 + if !(0 <= limits.BytesLimit.Capacity) { //nolint:staticcheck + return ValidatedStream2Limits{}, fmt.Errorf("bytesLimit.Capacity %v is not non-negative", limits.BytesLimit.Capacity) + } + return ValidatedStream2Limits{limits, struct{}{}}, nil +} + +type ValidatedStream2Limits struct { + Stream2Limits + private struct{} +} + +type RequestHandle internaltypes.RequestID + +func (r *RequestHandle) MakeResponse(payload []byte) OutboundBinaryMessageResponse { + return OutboundBinaryMessageResponse{ + internaltypes.RequestID(*r), + payload, + } +} + +//go-sumtype:decl InboundBinaryMessage + +type InboundBinaryMessage interface { + isInboundBinaryMessage() +} + +var _ InboundBinaryMessage = InboundBinaryMessagePlain{} +var _ InboundBinaryMessage = InboundBinaryMessageRequest{} +var _ InboundBinaryMessage = InboundBinaryMessageResponse{} + +type InboundBinaryMessagePlain struct { + Payload []byte +} + +func (InboundBinaryMessagePlain) isInboundBinaryMessage() {} + +type InboundBinaryMessageRequest struct { + RequestHandle RequestHandle + Payload []byte +} + +func (InboundBinaryMessageRequest) isInboundBinaryMessage() {} + +type InboundBinaryMessageResponse struct { + Payload []byte +} + +func (InboundBinaryMessageResponse) isInboundBinaryMessage() {} + +//go-sumtype:decl OutboundBinaryMessage + +type OutboundBinaryMessage interface { + isOutboundBinaryMessage() +} + +var _ OutboundBinaryMessage = OutboundBinaryMessagePlain{} +var _ OutboundBinaryMessage = OutboundBinaryMessageRequest{} +var _ OutboundBinaryMessage = OutboundBinaryMessageResponse{} + +type OutboundBinaryMessagePlain struct { + Payload []byte +} + +func (OutboundBinaryMessagePlain) isOutboundBinaryMessage() {} + +type OutboundBinaryMessageRequest struct { + ResponsePolicy responselimit.ResponsePolicy + Payload []byte +} + +func (OutboundBinaryMessageRequest) isOutboundBinaryMessage() {} + +type OutboundBinaryMessageResponse struct { + requestID internaltypes.RequestID + Payload []byte +} + +func (OutboundBinaryMessageResponse) isOutboundBinaryMessage() {} + +func RequestIDOfOutboundBinaryMessageResponse(m OutboundBinaryMessageResponse) internaltypes.RequestID { + return m.requestID +} diff --git a/ragep2p/ragep2pnew/loggers/logrus.go b/ragep2p/ragep2pnew/loggers/logrus.go new file mode 100644 index 00000000..eca8ae1c --- /dev/null +++ b/ragep2p/ragep2pnew/loggers/logrus.go @@ -0,0 +1,44 @@ +package loggers + +import ( + "github.com/sirupsen/logrus" + "github.com/smartcontractkit/libocr/commontypes" +) + +var _ commontypes.Logger = LogrusLogger{} + +type LogrusLogger struct { + logger *logrus.Logger +} + +func MakeLogrusLogger() LogrusLogger { + logger := logrus.New() + logger.SetLevel(logrus.TraceLevel) + return LogrusLogger{ + logger, + } +} + +func (l LogrusLogger) Trace(msg string, fields commontypes.LogFields) { + l.logger.WithFields(logrus.Fields(fields)).Trace(msg) +} + +func (l LogrusLogger) Debug(msg string, fields commontypes.LogFields) { + l.logger.WithFields(logrus.Fields(fields)).Debug(msg) +} + +func (l LogrusLogger) Info(msg string, fields commontypes.LogFields) { + l.logger.WithFields(logrus.Fields(fields)).Info(msg) +} + +func (l LogrusLogger) Warn(msg string, fields commontypes.LogFields) { + l.logger.WithFields(logrus.Fields(fields)).Warn(msg) +} + +func (l LogrusLogger) Error(msg string, fields commontypes.LogFields) { + l.logger.WithFields(logrus.Fields(fields)).Error(msg) +} + +func (l LogrusLogger) Critical(msg string, fields commontypes.LogFields) { + l.logger.WithFields(logrus.Fields(fields)).Error("CRITICAL: " + msg) +} diff --git a/ragep2p/ragep2pnew/metrics.go b/ragep2p/ragep2pnew/metrics.go new file mode 100644 index 00000000..f3fad659 --- /dev/null +++ b/ragep2p/ragep2pnew/metrics.go @@ -0,0 +1,216 @@ +package ragep2pnew + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/smartcontractkit/libocr/commontypes" + "github.com/smartcontractkit/libocr/internal/metricshelper" + "github.com/smartcontractkit/libocr/ragep2p/ragep2pnew/internal/ratelimitaggregator" + "github.com/smartcontractkit/libocr/ragep2p/types" +) + +type hostMetrics struct { + registerer prometheus.Registerer + inboundDialsTotal prometheus.Counter +} + +func newHostMetrics(registerer prometheus.Registerer, logger commontypes.Logger, self types.PeerID) *hostMetrics { + labels := map[string]string{"peer_id": self.String()} + + inboundDialsTotal := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "ragep2p_host_inbound_dials_total", + Help: "The number of inbound dial attempts received by the host", + ConstLabels: labels, + }) + + metricshelper.RegisterOrLogError(logger, registerer, inboundDialsTotal, "ragep2p_host_inbound_dials_total") + + return &hostMetrics{ + registerer, + inboundDialsTotal, + } +} + +func (m *hostMetrics) Close() { + m.registerer.Unregister(m.inboundDialsTotal) +} + +type peerMetrics struct { + registerer prometheus.Registerer + connEstablishedTotal prometheus.Counter + connEstablishedInboundTotal prometheus.Counter + connReadProcessedBytesTotal prometheus.Counter + connReadSkippedBytesTotal prometheus.Counter + connWrittenBytesTotal prometheus.Counter + rawconnReadBytesTotal prometheus.Counter + rawconnWrittenBytesTotal prometheus.Counter + rawconnRateLimitRate prometheus.Gauge + rawconnRateLimitCapacity prometheus.Gauge + messagesRateLimitRate prometheus.Gauge + messagesRateLimitCapacity prometheus.Gauge + bytesRateLimitRate prometheus.Gauge + bytesRateLimitCapacity prometheus.Gauge + messageBytes prometheus.Histogram +} + +func newPeerMetrics(registerer prometheus.Registerer, logger commontypes.Logger, self types.PeerID, other types.PeerID) *peerMetrics { + labels := map[string]string{"peer_id": self.String(), "remote_peer_id": other.String()} + + connEstablishedTotal := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "ragep2p_peer_conn_established_total", + Help: "The number of secure connections established with the remote peer. At most one connection can be active at any time.", + ConstLabels: labels, + }) + + metricshelper.RegisterOrLogError(logger, registerer, connEstablishedTotal, "ragep2p_peer_conn_established_total") + + connEstablishedInboundTotal := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "ragep2p_peer_conn_established_inbound_total", + Help: "The number of secure connections established with the remote peer from inbound dials. At most one connection can be active at any time.", + ConstLabels: labels, + }) + + metricshelper.RegisterOrLogError(logger, registerer, connEstablishedInboundTotal, "ragep2p_peer_conn_established_inbound_total") + + connReadProcessedBytesTotal := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "ragep2p_peer_conn_read_processed_bytes_total", + Help: "The number of bytes read on secure connections with the remote peer for processing", + ConstLabels: labels, + }) + + metricshelper.RegisterOrLogError(logger, registerer, connReadProcessedBytesTotal, "ragep2p_peer_conn_read_processed_bytes_total") + + connReadSkippedBytesTotal := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "ragep2p_peer_conn_read_skipped_bytes_total", + Help: "The number of bytes read on secure connections with the remote peer that have been skipped, e.g. due to rate limits being exceeded", + ConstLabels: labels, + }) + + metricshelper.RegisterOrLogError(logger, registerer, connReadSkippedBytesTotal, "ragep2p_peer_conn_read_skipped_bytes_total") + + connWrittenBytesTotal := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "ragep2p_peer_conn_written_bytes_total", + Help: "The number of bytes written on secure connections with the remote peer", + ConstLabels: labels, + }) + + metricshelper.RegisterOrLogError(logger, registerer, connWrittenBytesTotal, "ragep2p_peer_conn_written_bytes_total") + + rawconnReadBytesTotal := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "ragep2p_peer_rawconn_read_bytes_total", + Help: "The number of raw bytes read on raw (post-knock, tcp) connections with the remote " + + "peer. Knocks are ~100 bytes and thus have negligible impact. This metric is useful for " + + "tracking overall bandwidth usage.", + ConstLabels: labels, + }) + + metricshelper.RegisterOrLogError(logger, registerer, rawconnReadBytesTotal, "ragep2p_peer_rawconn_read_bytes_total") + + rawconnWrittenBytesTotal := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "ragep2p_peer_rawconn_written_bytes_total", + Help: "The number of raw bytes written on raw (post-knock, tcp) connections with the remote " + + "peer. Knocks are ~100 bytes and thus have negligible impact. This metric is useful for " + + "tracking overall bandwidth usage.", + ConstLabels: labels, + }) + + metricshelper.RegisterOrLogError(logger, registerer, rawconnWrittenBytesTotal, "ragep2p_peer_rawconn_written_bytes_total") + + rawconnRateLimitRate := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "ragep2p_peer_rawconn_rate_limit_rate", + Help: "[deprecated: use ragep2p_peer_bytes_rate_limit_rate instead] The refill rate in bytes per second for the token bucket rate limiting reads from raw connections with the remote peer", + ConstLabels: labels, + }) + + metricshelper.RegisterOrLogError(logger, registerer, rawconnRateLimitRate, "ragep2p_peer_rawconn_rate_limit_rate") + + rawconnRateLimitCapacity := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "ragep2p_peer_rawconn_rate_limit_capacity", + Help: "[deprecated: use ragep2p_peer_bytes_rate_limit_capacity instead] The capacity in bytes for the token bucket rate limiting reads from raw connections with the remote peer", + ConstLabels: labels, + }) + + metricshelper.RegisterOrLogError(logger, registerer, rawconnRateLimitCapacity, "ragep2p_peer_rawconn_rate_limit_capacity") + + messagesRateLimitRate := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "ragep2p_peer_messages_rate_limit_rate", + Help: "The aggregate refill rate in messages per second for the token bucket rate limiting reads from streams with the remote peer", + ConstLabels: labels, + }) + + metricshelper.RegisterOrLogError(logger, registerer, messagesRateLimitRate, "ragep2p_peer_messages_rate_limit_rate") + + messagesRateLimitCapacity := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "ragep2p_peer_messages_rate_limit_capacity", + Help: "The aggregate capacity in messages for the token bucket rate limiting reads from streams with the remote peer", + ConstLabels: labels, + }) + + metricshelper.RegisterOrLogError(logger, registerer, messagesRateLimitCapacity, "ragep2p_peer_messages_rate_limit_capacity") + + bytesRateLimitRate := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "ragep2p_peer_bytes_rate_limit_rate", + Help: "The aggregate refill rate in bytes per second for the token bucket rate limiting reads from streams with the remote peer", + ConstLabels: labels, + }) + + metricshelper.RegisterOrLogError(logger, registerer, bytesRateLimitRate, "ragep2p_peer_bytes_rate_limit_rate") + + bytesRateLimitCapacity := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "ragep2p_peer_bytes_rate_limit_capacity", + Help: "The aggregate capacity in bytes for the token bucket rate limiting reads from streams with the remote peer", + ConstLabels: labels, + }) + + metricshelper.RegisterOrLogError(logger, registerer, bytesRateLimitCapacity, "ragep2p_peer_bytes_rate_limit_capacity") + + messageBytes := prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "ragep2p_experimental_peer_message_bytes", + Help: "The size of messages sent to the remote peer", + ConstLabels: labels, + Buckets: []float64{1 << 8, 1 << 10, 1 << 12, 1 << 14, 1 << 16, 1 << 18, 1 << 20, 1 << 22, 1 << 24, 1 << 26, 1 << 28}, // 256 bytes, 1KiB, ..., 256MiB + }) + + metricshelper.RegisterOrLogError(logger, registerer, messageBytes, "ragep2p_experimental_peer_message_bytes") + + return &peerMetrics{ + registerer, + connEstablishedTotal, + connEstablishedInboundTotal, + connReadProcessedBytesTotal, + connReadSkippedBytesTotal, + connWrittenBytesTotal, + rawconnReadBytesTotal, + rawconnWrittenBytesTotal, + rawconnRateLimitRate, + rawconnRateLimitCapacity, + messagesRateLimitRate, + messagesRateLimitCapacity, + bytesRateLimitRate, + bytesRateLimitCapacity, + messageBytes, + } +} + +func (m *peerMetrics) Close() { + m.registerer.Unregister(m.connEstablishedTotal) + m.registerer.Unregister(m.connEstablishedInboundTotal) + m.registerer.Unregister(m.connReadProcessedBytesTotal) + m.registerer.Unregister(m.connReadSkippedBytesTotal) + m.registerer.Unregister(m.connWrittenBytesTotal) + m.registerer.Unregister(m.rawconnReadBytesTotal) + m.registerer.Unregister(m.rawconnWrittenBytesTotal) + m.registerer.Unregister(m.rawconnRateLimitRate) + m.registerer.Unregister(m.rawconnRateLimitCapacity) + m.registerer.Unregister(m.messagesRateLimitRate) + m.registerer.Unregister(m.messagesRateLimitCapacity) + m.registerer.Unregister(m.bytesRateLimitRate) + m.registerer.Unregister(m.bytesRateLimitCapacity) + m.registerer.Unregister(m.messageBytes) +} + +func (m *peerMetrics) SetRateLimits(messagesTokenBucketAggregate ratelimitaggregator.TokenBucketAggregate, bytesTokenBucketAggregate ratelimitaggregator.TokenBucketAggregate) { + m.messagesRateLimitRate.Set(messagesTokenBucketAggregate.Rate) + m.messagesRateLimitCapacity.Set(messagesTokenBucketAggregate.Capacity) + m.bytesRateLimitRate.Set(bytesTokenBucketAggregate.Rate) + m.bytesRateLimitCapacity.Set(bytesTokenBucketAggregate.Capacity) +} diff --git a/ragep2p/ragep2pnew/ragep2p.go b/ragep2p/ragep2pnew/ragep2p.go new file mode 100644 index 00000000..d38eab1c --- /dev/null +++ b/ragep2p/ragep2pnew/ragep2p.go @@ -0,0 +1,1599 @@ +package ragep2pnew + +import ( + "context" + "crypto/tls" + "fmt" + "io" + "math/rand" + "net" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + + "github.com/smartcontractkit/libocr/commontypes" + "github.com/smartcontractkit/libocr/internal/loghelper" + "github.com/smartcontractkit/libocr/networking/ragep2pwrapper" + "github.com/smartcontractkit/libocr/ragep2p/internal/knock" + "github.com/smartcontractkit/libocr/ragep2p/internal/mtls" + "github.com/smartcontractkit/libocr/ragep2p/ragep2pnew/internal/demuxer" + "github.com/smartcontractkit/libocr/ragep2p/ragep2pnew/internal/frame" + "github.com/smartcontractkit/libocr/ragep2p/ragep2pnew/internal/muxer" + "github.com/smartcontractkit/libocr/ragep2p/ragep2pnew/internal/overheadawareconn" + "github.com/smartcontractkit/libocr/ragep2p/ragep2pnew/internal/ratelimit" + "github.com/smartcontractkit/libocr/ragep2p/ragep2pnew/internal/ratelimitaggregator" + "github.com/smartcontractkit/libocr/ragep2p/ragep2pnew/internal/stream2types" + + "github.com/smartcontractkit/libocr/ragep2p/types" + "github.com/smartcontractkit/libocr/subprocesses" + + "github.com/smartcontractkit/libocr/ragep2p/ragep2pnew/internal/internaltypes" +) + +// Maximum number of streams with another peer that can be opened on a host +// Deprecated: use types.MaxStreamsPerPeer instead +const MaxStreamsPerPeer = types.MaxStreamsPerPeer + +// Maximum stream name length +// Deprecated: use types.MaxStreamNameLength instead +const MaxStreamNameLength = types.MaxStreamNameLength + +// Maximum length of messages sent with ragep2p +// Deprecated: use types.MaxMessageLength instead +const MaxMessageLength = types.MaxMessageLength + +// The 5 second value is cribbed from go standard library's tls package as of version 1.16 and later +// https://cs.opensource.google/go/go/+/master:src/crypto/tls/conn.go;drc=059a9eedf45f4909db6a24242c106be15fb27193;l=1454 +const netTimeout = 5 * time.Second + +type hostState uint8 + +const ( + _ hostState = iota + hostStatePending + hostStateOpen + hostStateClosed +) + +type peerStreamOpenRequest struct { + streamID internaltypes.StreamID + streamName string + streamPriority stream2types.StreamPriority + limits stream2types.ValidatedStream2Limits +} + +type peerStreamOpenResponse struct { + err error +} + +type peerStreamUpdateLimitsRequest struct { + streamID internaltypes.StreamID + limits stream2types.ValidatedStream2Limits +} + +type peerStreamUpdateLimitsResponse struct { + err error +} + +type peerStreamCloseRequest struct { + streamID internaltypes.StreamID +} + +type peerStreamCloseResponse struct { + peerHasNoStreams bool + err error +} + +type newConnNotification struct { + chConnTerminated <-chan struct{} +} + +type streamStateNotification struct { + streamID internaltypes.StreamID + streamName string // Used for sanity check, populated only on stream open and empty on stream close + open bool +} + +type peerConnLifeCycle struct { + connCancel context.CancelFunc + connSubs subprocesses.Subprocesses + chConnTerminated <-chan struct{} +} + +type peer struct { + chDone <-chan struct{} + + other types.PeerID + logger loghelper.LoggerWithContext + + metrics *peerMetrics + + incomingConnsLimiterMu sync.Mutex + incomingConnsLimiter *ratelimit.TokenBucket + + rateLimitAggregator *ratelimitaggregator.Aggregator + + connLifeCycleMu sync.Mutex + connLifeCycle peerConnLifeCycle + + mux *muxer.Muxer + demux *demuxer.Demuxer + + chNewConnNotification chan<- newConnNotification + + chOtherStreamStateNotification chan<- streamStateNotification + chSelfStreamStateNotification <-chan streamStateNotification + + chStreamOpenRequest chan<- peerStreamOpenRequest + chStreamOpenResponse <-chan peerStreamOpenResponse + + chStreamUpdateLimitsRequest chan<- peerStreamUpdateLimitsRequest + chStreamUpdateLimitsResponse <-chan peerStreamUpdateLimitsResponse + + chStreamCloseRequest chan<- peerStreamCloseRequest + chStreamCloseResponse <-chan peerStreamCloseResponse +} + +type HostConfig struct { + // DurationBetweenDials is the minimum duration between two dials. It is + // not the exact duration because of jitter. + DurationBetweenDials time.Duration +} + +// A Host allows users to establish Streams with other peers identified by their +// PeerID. The host will transparently handle peer discovery, secure connection +// (re)establishment, multiplexing streams over the connection and rate +// limiting. +type Host struct { + // Constructor args + config HostConfig + keyring types.PeerKeyring + listenAddresses []string + discoverer Discoverer + logger loghelper.LoggerWithContext + metricsRegisterer prometheus.Registerer + + hostMetrics *hostMetrics + + // Derived from keyring + id types.PeerID + tlsCert tls.Certificate + + // Host state + stateMu sync.Mutex + state hostState + + // Manage various subprocesses of host + subprocesses subprocesses.Subprocesses + ctx context.Context + cancel context.CancelFunc + + // Peers + peersMu sync.Mutex + peers map[types.PeerID]*peer +} + +// NewHost creates a new Host with the provided config, Ed25519 secret key, +// network listen address. A Discoverer is also provided to NewHost for +// discovering addresses of peers. +func NewHost( + config HostConfig, + keyring types.PeerKeyring, + listenAddresses []string, + discoverer Discoverer, + logger commontypes.Logger, + metricsRegisterer prometheus.Registerer, +) (*Host, error) { + if len(listenAddresses) == 0 { + return nil, fmt.Errorf("no listen addresses provided") + } + + id := types.PeerIDFromKeyring(keyring) + + tlsCert, err := mtls.NewMinimalX509CertFromKeyring(keyring) + if err != nil { + return nil, fmt.Errorf("failed to create certificate from keyring for host: %w", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + return &Host{ + config, + keyring, + listenAddresses, + discoverer, + // peerID might already be set to the same value if we are managed, but we don't take any chances + loghelper.MakeRootLoggerWithContext(logger).MakeChild(commontypes.LogFields{"id": "ragep2p", "peerID": id}), + metricsRegisterer, + + newHostMetrics(metricsRegisterer, logger, id), + + id, + tlsCert, + + sync.Mutex{}, + hostStatePending, + + subprocesses.Subprocesses{}, + ctx, + cancel, + + sync.Mutex{}, + map[types.PeerID]*peer{}, + }, nil +} + +// Start listening on the network interfaces and dialling peers. +func (ho *Host) Start() error { + succeeded := false + defer func() { + if !succeeded { + ho.Close() + } + }() + ho.logger.Trace("ragep2p Start()", commontypes.LogFields{"listenAddresses": ho.listenAddresses}) + ho.stateMu.Lock() + defer ho.stateMu.Unlock() + + if ho.state != hostStatePending { + return fmt.Errorf("cannot Start() host that has already been started") + } + ho.state = hostStateOpen + + ho.subprocesses.Go(func() { + ho.dialLoop() + }) + for _, addr := range ho.listenAddresses { + ln, err := net.Listen("tcp", addr) + if err != nil { + return fmt.Errorf("net.Listen(%q) failed: %w", addr, err) + } + ho.subprocesses.Go(func() { + ho.listenLoop(ln) + }) + } + + err := ho.discoverer.Start(Wrapped(ho), ho.keyring, ho.logger) + if err != nil { + return fmt.Errorf("failed to start discoverer: %w", err) + } + + succeeded = true + return nil +} + +func remotePeerIDField(other types.PeerID) commontypes.LogFields { + return commontypes.LogFields{ + "remotePeerID": other, + } +} + +// Caller should hold peersMu. +func (ho *Host) findOrCreatePeer(other types.PeerID) *peer { + if _, ok := ho.peers[other]; !ok { + logger := ho.logger.MakeChild(remotePeerIDField(other)) + + metrics := newPeerMetrics(ho.metricsRegisterer, logger, ho.id, other) + + chDone := make(chan struct{}) + + chConnTerminated := make(chan struct{}) + // close so that we re-dial and establish a connection + close(chConnTerminated) + + mux := muxer.NewMuxer(logger) + demux := demuxer.NewDemuxer() + + chNewConnNotification := make(chan newConnNotification) + + chOtherStreamStateNotification := make(chan streamStateNotification) + chSelfStreamStateNotification := make(chan streamStateNotification) + + chStreamOpenRequest := make(chan peerStreamOpenRequest) + chStreamOpenResponse := make(chan peerStreamOpenResponse) + + chStreamUpdateLimitsRequest := make(chan peerStreamUpdateLimitsRequest) + chStreamUpdateLimitsResponse := make(chan peerStreamUpdateLimitsResponse) + + chStreamCloseRequest := make(chan peerStreamCloseRequest) + chStreamCloseResponse := make(chan peerStreamCloseResponse) + + incomingConnsLimiter := ratelimit.NewTokenBucket(incomingConnsRateLimit(ho.config.DurationBetweenDials), 4, true) + + rateLimitAggregator := ratelimitaggregator.NewAggregator(logger) + metrics.SetRateLimits(rateLimitAggregator.Aggregates()) + + p := peer{ + chDone, + + other, + logger, + + metrics, + + sync.Mutex{}, + incomingConnsLimiter, + + rateLimitAggregator, + + sync.Mutex{}, + peerConnLifeCycle{ + func() {}, + subprocesses.Subprocesses{}, + chConnTerminated, + }, + + mux, + demux, + + chNewConnNotification, + + chOtherStreamStateNotification, + chSelfStreamStateNotification, + + chStreamOpenRequest, + chStreamOpenResponse, + + chStreamUpdateLimitsRequest, + chStreamUpdateLimitsResponse, + + chStreamCloseRequest, + chStreamCloseResponse, + } + ho.peers[other] = &p + + ho.subprocesses.Go(func() { + peerLoop( + ho.ctx, + chDone, + rateLimitAggregator, + chNewConnNotification, + chOtherStreamStateNotification, + chSelfStreamStateNotification, + mux, + demux, + chStreamOpenRequest, + chStreamOpenResponse, + chStreamUpdateLimitsRequest, + chStreamUpdateLimitsResponse, + chStreamCloseRequest, + chStreamCloseResponse, + logger, + metrics, + ) + }) + } + return ho.peers[other] +} + +func peerLoop( + ctx context.Context, + chDone chan<- struct{}, + rateLimitAggregator *ratelimitaggregator.Aggregator, + chNewConnNotification <-chan newConnNotification, + chOtherStreamStateNotification <-chan streamStateNotification, + chSelfStreamStateNotification chan<- streamStateNotification, + mux *muxer.Muxer, + demux *demuxer.Demuxer, + chStreamOpenRequest <-chan peerStreamOpenRequest, + chStreamOpenResponse chan<- peerStreamOpenResponse, + chStreamUpdateLimitsRequest <-chan peerStreamUpdateLimitsRequest, + chStreamUpdateLimitsResponse chan<- peerStreamUpdateLimitsResponse, + chStreamCloseRequest <-chan peerStreamCloseRequest, + chStreamCloseResponse chan<- peerStreamCloseResponse, + logger loghelper.LoggerWithContext, + metrics *peerMetrics, +) { + defer close(chDone) + defer logger.Info("peerLoop exiting", nil) + + defer metrics.Close() + + type stream struct { + name string + priority stream2types.StreamPriority + messagesLimit, bytesLimit types.TokenBucketParams + } + streams := map[internaltypes.StreamID]stream{} + otherStreams := map[internaltypes.StreamID]struct{}{} + + var chConnTerminated <-chan struct{} + + pendingSelfStreamStateNotifications := map[internaltypes.StreamID]bool{} + var selfStreamStateNotification streamStateNotification + var chSelfStreamStateNotificationOrNil chan<- streamStateNotification + + for { + chSelfStreamStateNotificationOrNil = nil + // fake loop, we only perform zero or one iteration of this + for streamID, state := range pendingSelfStreamStateNotifications { + chSelfStreamStateNotificationOrNil = chSelfStreamStateNotification + selfStreamStateNotification = streamStateNotification{ + streamID, + streams[streamID].name, + state, + } + break + } + + select { + case chSelfStreamStateNotificationOrNil <- selfStreamStateNotification: + + delete(pendingSelfStreamStateNotifications, selfStreamStateNotification.streamID) + + // if the stream has been opened by the other end already, switch it on right away + if _, other := otherStreams[selfStreamStateNotification.streamID]; other && selfStreamStateNotification.open { + if !mux.EnableStream(selfStreamStateNotification.streamID) { + logger.Error("Assumption violation. Failed to enable stream on muxer. This stream may not work correctly.", commontypes.LogFields{ + "streamStateNotification": selfStreamStateNotification, + }) + } + } + + case notification := <-chNewConnNotification: + logger.Trace("New connection, creating pending notifications of all streams", nil) + + chConnTerminated = notification.chConnTerminated + for streamID := range streams { + pendingSelfStreamStateNotifications[streamID] = true + } + + case <-chConnTerminated: + chConnTerminated = nil + logger.Trace("Connection terminated, pausing all streams", nil) + + // Clear pending notifications + pendingSelfStreamStateNotifications = map[internaltypes.StreamID]bool{} + + // Reset streams on other side + otherStreams = map[internaltypes.StreamID]struct{}{} + + // Pause all streams on our side + for streamID := range streams { + if !mux.DisableStream(streamID) { + logger.Error("Assumption violation. Failed to disable stream on muxer. This stream may not work correctly.", commontypes.LogFields{ + "streamID": streamID, + }) + } + } + + logger.Trace("Connection terminated, paused all streams", nil) + + case notification := <-chOtherStreamStateNotification: + logger.Trace("Received stream state notification", commontypes.LogFields{ + "notification": notification, + }) + + _, other := otherStreams[notification.streamID] + if other == notification.open { + break + } + if notification.open { + otherStreams[notification.streamID] = struct{}{} + } else { + delete(otherStreams, notification.streamID) + } + if _, ok := streams[notification.streamID]; ok { + selfStreamName := streams[notification.streamID].name + if notification.open && selfStreamName != notification.streamName { + logger.Warn("Name mismatch between self and other stream", commontypes.LogFields{ + "localStreamName": selfStreamName, + "remoteStreamName": notification.streamName, + }) + } + if notification.open { + if !mux.EnableStream(notification.streamID) { + logger.Error("Assumption violation. Failed to enable stream on muxer. This stream may not work correctly.", commontypes.LogFields{ + "streamStateNotification": notification, + }) + } + } else { + if !mux.DisableStream(notification.streamID) { + logger.Error("Assumption violation. Failed to disable stream on muxer. This stream may not work correctly.", commontypes.LogFields{ + "streamStateNotification": notification, + }) + } + } + } + + case req := <-chStreamOpenRequest: + if _, ok := streams[req.streamID]; ok { + chStreamOpenResponse <- peerStreamOpenResponse{ + fmt.Errorf("stream already exists"), + } + } else if len(streams) >= MaxStreamsPerPeer { + chStreamOpenResponse <- peerStreamOpenResponse{ + fmt.Errorf("too many streams, expected at most %d", MaxStreamsPerPeer), + } + } else { + if !mux.AddStream(req.streamID, req.streamName, req.streamPriority, req.limits.MaxOutgoingBufferedMessages) { + logger.Error("Assumption violation. Failed to add already existing stream to muxer. This stream may not work correctly.", commontypes.LogFields{ + "streamOpenRequest": req, + }) + // let's try to fix the problem by removing and adding the stream again + _ = mux.RemoveStream(req.streamID) + _ = mux.AddStream(req.streamID, req.streamName, req.streamPriority, req.limits.MaxOutgoingBufferedMessages) + } + + if !demux.AddStream(req.streamID, req.limits.MaxIncomingBufferedMessages, req.limits.MaxMessageLength, req.limits.MessagesLimit, req.limits.BytesLimit) { + logger.Error("Assumption violation. Failed to add already existing stream to demuxer. This stream may not work correctly.", commontypes.LogFields{ + "streamOpenRequest": req, + }) + // let's try to fix the problem by removing and adding the stream again + demux.RemoveStream(req.streamID) + demux.AddStream(req.streamID, req.limits.MaxIncomingBufferedMessages, req.limits.MaxMessageLength, req.limits.MessagesLimit, req.limits.BytesLimit) + } + + rateLimitAggregator.AddStream(req.limits.MessagesLimit, req.limits.BytesLimit) + metrics.SetRateLimits(rateLimitAggregator.Aggregates()) + + streams[req.streamID] = stream{ + req.streamName, + req.streamPriority, + req.limits.MessagesLimit, + req.limits.BytesLimit, + } + if chConnTerminated != nil { + pendingSelfStreamStateNotifications[req.streamID] = true + } + chStreamOpenResponse <- peerStreamOpenResponse{ + nil, + } + } + + case req := <-chStreamUpdateLimitsRequest: + if oldS, ok := streams[req.streamID]; ok { + s := stream{ + oldS.name, + oldS.priority, + req.limits.MessagesLimit, + req.limits.BytesLimit, + } + streams[req.streamID] = s + + if !mux.UpdateStream(req.streamID, req.limits.MaxOutgoingBufferedMessages) { + logger.Error("Assumption violation. Failed to update stream on muxer. This stream may not work correctly.", commontypes.LogFields{ + "streamUpdateLimitsRequest": req, + }) + // let's try to fix the problem by removing and adding the stream again + _ = mux.RemoveStream(req.streamID) + _ = mux.AddStream(req.streamID, s.name, s.priority, req.limits.MaxOutgoingBufferedMessages) + } + + if !demux.UpdateStream( + req.streamID, + req.limits.MaxIncomingBufferedMessages, + req.limits.MaxMessageLength, + req.limits.MessagesLimit, + req.limits.BytesLimit, + ) { + logger.Error("Assumption violation. Failed to update stream on demuxer. This stream may not work correctly.", commontypes.LogFields{ + "streamUpdateLimitsRequest": req, + }) + // let's try to fix the problem by removing and adding the stream again + demux.RemoveStream(req.streamID) + _ = demux.AddStream(req.streamID, req.limits.MaxIncomingBufferedMessages, req.limits.MaxMessageLength, req.limits.MessagesLimit, req.limits.BytesLimit) + } + + rateLimitAggregator.RemoveStream(oldS.messagesLimit, oldS.bytesLimit) + rateLimitAggregator.AddStream(s.messagesLimit, s.bytesLimit) + metrics.SetRateLimits(rateLimitAggregator.Aggregates()) + + chStreamUpdateLimitsResponse <- peerStreamUpdateLimitsResponse{nil} + } else { + chStreamUpdateLimitsResponse <- peerStreamUpdateLimitsResponse{ + fmt.Errorf("stream not found"), + } + } + + case req := <-chStreamCloseRequest: + if s, ok := streams[req.streamID]; ok { + if !mux.RemoveStream(req.streamID) { + logger.Error("Assumption violation. Failed to remove stream from muxer. Proceeding anyways because what else can I do...", commontypes.LogFields{ + "streamCloseRequest": req, + }) + } + + demux.RemoveStream(req.streamID) + + rateLimitAggregator.RemoveStream(s.messagesLimit, s.bytesLimit) + metrics.SetRateLimits(rateLimitAggregator.Aggregates()) + + delete(streams, req.streamID) + if chConnTerminated != nil { + pendingSelfStreamStateNotifications[req.streamID] = false + } + chStreamCloseResponse <- peerStreamCloseResponse{ + len(streams) == 0, + nil, + } + + if len(streams) == 0 { + return + } + } else { + chStreamCloseResponse <- peerStreamCloseResponse{ + false, + fmt.Errorf("stream not found"), + } + } + + case <-ctx.Done(): + return + } + } +} + +// Close stops listening on the network interface(s) and closes all active +// streams. +func (ho *Host) Close() error { + ho.stateMu.Lock() + defer ho.stateMu.Unlock() + + if ho.state != hostStateOpen { + return fmt.Errorf("cannot Close() host that isn't open") + } + ho.logger.Info("Host closing discoverer", nil) + err := ho.discoverer.Close() + ho.logger.Info("Host winding down", nil) + ho.state = hostStateClosed + ho.cancel() + ho.subprocesses.Wait() + ho.hostMetrics.Close() + ho.logger.Info("Host exiting", nil) + if err != nil { + return fmt.Errorf("failed to close discoverer: %w", err) + } + return nil +} + +func (ho *Host) ID() types.PeerID { + return ho.id +} + +func (ho *Host) dialLoop() { + type dialState struct { + next uint + } + dialStates := make(map[types.PeerID]*dialState) + for { + var dialProcesses subprocesses.Subprocesses + ho.peersMu.Lock() + peers := make([]*peer, 0, len(ho.peers)) + for pid, p := range ho.peers { + peers = append(peers, p) + if dialStates[pid] == nil { + dialStates[pid] = &dialState{0} + } + } + // Some peers may have been discarded, garbage collect dial states + for pid := range dialStates { + if ho.peers[pid] == nil { + delete(dialStates, pid) + } + } + ho.peersMu.Unlock() + for _, p := range peers { + ds := dialStates[p.other] + dialProcesses.Go(func() { + p.connLifeCycleMu.Lock() + chConnTerminated := p.connLifeCycle.chConnTerminated + p.connLifeCycleMu.Unlock() + select { + case <-chConnTerminated: + p.logger.Debug("Dialing", nil) + default: + p.logger.Trace("Dial skip", nil) + return + } + + addresses, err := ho.discoverer.FindPeer(p.other) + if err != nil { + p.logger.Warn("Discoverer error", commontypes.LogFields{"error": err}) + return + } + if len(addresses) == 0 { + p.logger.Warn("Discoverer found no addresses", nil) + return + } + + address := string(addresses[ds.next%uint(len(addresses))]) + + // We used to increment this only on dial error but a connection might fail after the Dial itself has + // succeeded (eg. this happens with self-dials where the connection is reset after the incorrect knock + // is received). Tracking an error so far down the stack is much harder so increment every time to give + // a fair chance to every address. + ds.next++ + + logger := p.logger.MakeChild(commontypes.LogFields{"direction": "out", "remoteAddr": address}) + + dialer := net.Dialer{ + Timeout: ho.config.DurationBetweenDials, + } + conn, err := dialer.DialContext(ho.ctx, "tcp", address) + if err != nil { + logger.Warn("Dial error", commontypes.LogFields{"error": err}) + return + } + + logger.Trace("Dial succeeded", nil) + ho.subprocesses.Go(func() { + ho.handleOutgoingConnection(conn, p.other, logger) + }) + }) + + } + dialProcesses.Wait() + + select { + //case <-time.After(5 * time.Second): // good for testing simultaneous dials, real version is on next line + case <-time.After(ho.config.DurationBetweenDials + time.Duration(rand.Float32()*float32(ho.config.DurationBetweenDials))): + case <-ho.ctx.Done(): + ho.logger.Trace("Host.dialLoop exiting", nil) + return + } + } +} + +func (ho *Host) listenLoop(ln net.Listener) { + ho.subprocesses.Go(func() { + <-ho.ctx.Done() + if err := ln.Close(); err != nil { + ho.logger.Warn("Failed to close listener", commontypes.LogFields{"error": err}) + } + }) + + for { + conn, err := ln.Accept() + ho.hostMetrics.inboundDialsTotal.Inc() + if err != nil { + ho.logger.Info("Exiting Host.listenLoop due to error while Accepting", commontypes.LogFields{"error": err}) + return + } + ho.subprocesses.Go(func() { + ho.handleIncomingConnection(conn) + }) + } +} + +func (ho *Host) handleOutgoingConnection(conn net.Conn, other types.PeerID, logger loghelper.LoggerWithContext) { + shouldClose := true + defer func() { + if shouldClose { + if err := safeClose(conn); err != nil { + logger.Warn("Failed to close outgoing connection", commontypes.LogFields{"error": err}) + } + } + }() + + knck, err := knock.BuildKnock(other, ho.id, ho.keyring) + if err != nil { + logger.Warn("Error while building knock", commontypes.LogFields{"error": err}) + return + } + if err := conn.SetWriteDeadline(time.Now().Add(netTimeout)); err != nil { + logger.Warn("Closing connection, error during SetWriteDeadline", commontypes.LogFields{"error": err}) + return + } + if _, err := conn.Write(knck); err != nil { + logger.Warn("Error while sending knock", commontypes.LogFields{"error": err}) + return + } + + ho.peersMu.Lock() + peer, ok := ho.peers[other] + ho.peersMu.Unlock() + if !ok { + // peer must have been deleted in the time between the dial being + // started and now + return + } + + shouldClose = false + + overheadAwareConn := overheadawareconn.NewOverheadAwareConn( + conn, + peer.metrics.rawconnReadBytesTotal, + peer.metrics.rawconnWrittenBytesTotal, + ) + + tlsConfig := newTLSConfig( + ho.tlsCert, + mtls.VerifyCertMatchesPubKey(other), + ) + tlsConn := tls.Client(overheadAwareConn, tlsConfig) + ho.handleConnection(false, overheadAwareConn, tlsConn, peer, logger) +} + +func (ho *Host) handleIncomingConnection(conn net.Conn) { + remoteAddrLogFields := commontypes.LogFields{"direction": "in", "remoteAddr": conn.RemoteAddr()} + logger := ho.logger.MakeChild(remoteAddrLogFields) + shouldClose := true + defer func() { + if shouldClose { + if err := safeClose(conn); err != nil { + logger.Warn("Failed to close incoming connection", commontypes.LogFields{"error": err}) + } + } + }() + + knck := make([]byte, knock.KnockSize) + if err := conn.SetReadDeadline(time.Now().Add(netTimeout)); err != nil { + logger.Warn("Closing connection, error during SetReadDeadline", commontypes.LogFields{"error": err}) + return + } + n, err := conn.Read(knck) + if err != nil { + logger.Warn("Error while reading knock", commontypes.LogFields{"error": err}) + return + } + if n != knock.KnockSize { + logger.Warn("Knock too short", nil) + return + } + + other, err := knock.VerifyKnock(ho.id, knck) + if err != nil { + if errors.Is(err, knock.ErrFromSelfDial) { + logger.Info("Self-dial knock, dropping connection. Someone has likely misconfigured their announce addresses.", nil) + } else { + logger.Warn("Invalid knock", commontypes.LogFields{"error": err}) + } + return + } + + ho.peersMu.Lock() + peer, ok := ho.peers[*other] + ho.peersMu.Unlock() + if !ok { + logger.Warn("Received incoming connection from an unknown peer, closing", remotePeerIDField(*other)) + return + } + logger = peer.logger.MakeChild(remoteAddrLogFields) // introduce remotePeerID in our logs since we now know it + overheadAwareConn := overheadawareconn.NewOverheadAwareConn( + conn, + peer.metrics.rawconnReadBytesTotal, + peer.metrics.rawconnWrittenBytesTotal, + ) + + shouldClose = false + + tlsConfig := newTLSConfig( + ho.tlsCert, + mtls.VerifyCertMatchesPubKey(*other), + ) + tlsConn := tls.Server(overheadAwareConn, tlsConfig) + ho.handleConnection(true, overheadAwareConn, tlsConn, peer, logger) +} + +func (ho *Host) handleConnection( + incoming bool, + overheadAwareConn *overheadawareconn.OverheadAwareConn, + tlsConn *tls.Conn, + peer *peer, + logger loghelper.LoggerWithContext, +) { + shouldClose := true + defer func() { + if shouldClose { + if err := safeClose(tlsConn); err != nil { + logger.Warn("Failed to close connection", commontypes.LogFields{"error": err}) + } + } + }() + + // Handshake reads and write to the connection. Set a deadline to prevent tarpitting + if err := tlsConn.SetDeadline(time.Now().Add(netTimeout)); err != nil { + logger.Warn("Closing connection, error during SetDeadline", commontypes.LogFields{"error": err}) + return + } + // Perform handshake so that we know the public key + if err := tlsConn.Handshake(); err != nil { + logger.Warn("Closing connection, error during Handshake", commontypes.LogFields{"error": err}) + return + } + // Disable deadline. Whoever uses the connection next will have to set their own timeouts. + if err := tlsConn.SetDeadline(time.Time{}); err != nil { + logger.Warn("Closing connection, error during SetDeadline", commontypes.LogFields{"error": err}) + return + } + + // get public key + pubKey, err := mtls.PubKeyFromCert(tlsConn.ConnectionState().PeerCertificates[0]) + if err != nil { + logger.Warn("Closing connection, error getting public key", commontypes.LogFields{"error": err}) + return + } + if peer.other != types.PeerIDFromPeerPublicKey(pubKey) { + logger.Warn("TLS handshake PeerID mismatch", commontypes.LogFields{ + "expected": peer.other, + "actual": types.PeerIDFromPeerPublicKey(pubKey), + }) + return + } + + if incoming { + peer.incomingConnsLimiterMu.Lock() + allowed := peer.incomingConnsLimiter.RemoveTokens(1) + peer.incomingConnsLimiterMu.Unlock() + if !allowed { + logger.Warn("Incoming connection rate limited", nil) + return + } + } + + overheadAwareConn.SetupComplete() + + logger.Info("Connection established", nil) + peer.metrics.connEstablishedTotal.Inc() + if incoming { + peer.metrics.connEstablishedInboundTotal.Inc() + } + + // the lock here ensures there is at most one active connection at any time. + // it also prevents races on connLifeCycle.connSubs. + peer.connLifeCycleMu.Lock() + peer.connLifeCycle.connCancel() + peer.connLifeCycle.connSubs.Wait() + connCtx, connCancel := context.WithCancel(ho.ctx) + chConnTerminated := make(chan struct{}) + peer.connLifeCycle.connCancel = connCancel + peer.connLifeCycle.chConnTerminated = chConnTerminated + peer.connLifeCycle.connSubs.Go(func() { + defer connCancel() + authenticatedConnectionLoop( + connCtx, + overheadAwareConn, + tlsConn, + peer.chOtherStreamStateNotification, + peer.chSelfStreamStateNotification, + peer.mux, + peer.demux, + chConnTerminated, + logger, + peer.metrics, + ) + }) + peer.connLifeCycleMu.Unlock() + + select { + case peer.chNewConnNotification <- newConnNotification{chConnTerminated}: + // keep the connection + shouldClose = false + case <-peer.chDone: + case <-ho.ctx.Done(): + } +} + +// NewStream creates a new bidirectional stream with peer other for streamName. +// It is parameterized with a maxMessageLength, the maximum size of a message in +// bytes and two parameters for rate limiting. +// +// Deprecated: Please switch to NewStream2. +func (ho *Host) NewStream( + other types.PeerID, + streamName string, + outgoingBufferSize int, // number of messages that fit in the outgoing buffer + incomingBufferSize int, // number of messages that fit in the incoming buffer + maxMessageLength int, + messagesLimit types.TokenBucketParams, // rate limit for (the number of) incoming messages + bytesLimit types.TokenBucketParams, // rate limit for (the accumulated size in bytes of) incoming messages +) (*Stream, error) { + stream2, err := ho.NewStream2( + other, + streamName, + stream2types.StreamPriorityDefault, + Stream2Limits{ + outgoingBufferSize, + incomingBufferSize, + maxMessageLength, + messagesLimit, + bytesLimit, + }, + ) + if err != nil { + return nil, err + } + + return newStreamFromStream2(stream2) +} + +// NewStream2 creates a new bidirectional stream with peer other for streamName. +// It is parameterized with a maxMessageLength, the maximum size of a message in +// bytes and two parameters for rate limiting. Compared to Stream, Stream2 +// introduces an additional parameter: the message priority level. +func (ho *Host) NewStream2( + other types.PeerID, + streamName string, + priority stream2types.StreamPriority, + limits Stream2Limits, +) (Stream2, error) { + if other == ho.id { + return nil, fmt.Errorf("stream with self is forbidden") + } + + if len(streamName) == 0 { + return nil, fmt.Errorf("streamName cannot be empty") + } + if types.MaxStreamNameLength < len(streamName) { + return nil, fmt.Errorf("streamName '%v' is longer than maximum length %v", streamName, types.MaxStreamNameLength) + } + + validatedLimits, err := limits.Validate() + if err != nil { + return nil, err + } + + ho.peersMu.Lock() + defer ho.peersMu.Unlock() + p := ho.findOrCreatePeer(other) + + sid := internaltypes.MakeStreamID(ho.id, other, streamName) + + var response peerStreamOpenResponse + select { + // it's important that we hold peersMu here. otherwise the peer could have + // shut down and we'd block on the send until the host is shut down + case p.chStreamOpenRequest <- peerStreamOpenRequest{ + sid, + streamName, + priority, + validatedLimits, + }: + response = <-p.chStreamOpenResponse + if response.err != nil { + return nil, response.err + } + case <-ho.ctx.Done(): + return nil, fmt.Errorf("host shut down") + } + + streamLogger := loghelper.MakeRootLoggerWithContext(p.logger).MakeChild(commontypes.LogFields{ + "streamID": sid, + "streamName": streamName, + }) + + ctx, cancel := context.WithCancel(ho.ctx) + s := stream2{ + sync.Mutex{}, + false, + + streamName, + other, + sid, + + limits.MaxOutgoingBufferedMessages, + limits.MaxMessageLength, + ho, + + subprocesses.Subprocesses{}, + ctx, + cancel, + streamLogger, + make(chan InboundBinaryMessage, 1), + + p.mux, + p.demux, + + p.chStreamUpdateLimitsRequest, + p.chStreamUpdateLimitsResponse, + + p.chStreamCloseRequest, + p.chStreamCloseResponse, + } + + s.subprocesses.Go(func() { + s.receiveLoop() + }) + + streamLogger.Info("NewStream2 succeeded", commontypes.LogFields{ + "maxOutgoingBufferedMessages": limits.MaxOutgoingBufferedMessages, + "maxIncomingBufferedMessages": limits.MaxIncomingBufferedMessages, + "maxMessageLength": limits.MaxMessageLength, + "messagesLimit": limits.MessagesLimit, + "bytesLimit": limits.BytesLimit, + }) + + return &s, nil +} + +///////////////////////////////////////////// +// authenticated connection handling +////////////////////////////////////////////// + +func authenticatedConnectionLoop( + ctx context.Context, + overheadAwareConn *overheadawareconn.OverheadAwareConn, + conn net.Conn, + chOtherStreamStateNotification chan<- streamStateNotification, + chSelfStreamStateNotification <-chan streamStateNotification, + mux *muxer.Muxer, + demux *demuxer.Demuxer, + chTerminated chan<- struct{}, + logger loghelper.LoggerWithContext, + metrics *peerMetrics, +) { + defer func() { + close(chTerminated) + logger.Info("authenticatedConnectionLoop: exited", nil) + }() + + var subs subprocesses.Subprocesses + defer subs.Wait() + + defer func() { + if err := safeClose(conn); err != nil { + logger.Warn("Failed to close connection", commontypes.LogFields{"error": err}) + } + }() + + childCtx, childCancel := context.WithCancel(ctx) + defer childCancel() + + chReadTerminated := make(chan struct{}) + subs.Go(func() { + authenticatedConnectionReadLoop( + childCtx, + overheadAwareConn, + conn, + chOtherStreamStateNotification, + demux, + chReadTerminated, + logger, + metrics, + ) + }) + + chWriteTerminated := make(chan struct{}) + subs.Go(func() { + authenticatedConnectionWriteLoop( + childCtx, + conn, + chSelfStreamStateNotification, + mux, + demux, // added for request/response tracking + chWriteTerminated, + logger, + metrics, + ) + }) + + select { + case <-ctx.Done(): + case <-chReadTerminated: + case <-chWriteTerminated: + } + + logger.Info("authenticatedConnectionLoop: winding down", nil) +} + +func authenticatedConnectionReadLoop( + ctx context.Context, + overheadAwareConn *overheadawareconn.OverheadAwareConn, + conn net.Conn, + chOtherStreamStateNotification chan<- streamStateNotification, + demux *demuxer.Demuxer, + chReadTerminated chan<- struct{}, + logger loghelper.LoggerWithContext, + metrics *peerMetrics, +) { + defer close(chReadTerminated) + + readInternal := func(buf []byte) bool { + _, err := io.ReadFull(conn, buf) + if err != nil { + logger.Warn("Error reading from connection", commontypes.LogFields{"error": err}) + return false + } + + metrics.connReadProcessedBytesTotal.Add(float64(len(buf))) + + if err := overheadAwareConn.AddDeliveredApplicationDataBytes(len(buf)); err != nil { + logger.Warn("OverheadAwareConn is asking us to drop the connection", commontypes.LogFields{"error": err}) + return false + } + + return true + } + + skipInternal := func(n int) bool { + r, err := io.Copy(io.Discard, io.LimitReader(conn, int64(n))) + if err != nil || r != int64(n) { + logger.Warn("Error reading from connection", commontypes.LogFields{"error": err}) + return false + } + + metrics.connReadSkippedBytesTotal.Add(float64(n)) + + if err := overheadAwareConn.AddDeliveredApplicationDataBytes(n); err != nil { + logger.Warn("OverheadAwareConn is asking us to drop the connection", commontypes.LogFields{"error": err}) + return false + } + + return true + } + + // We taper some logs to prevent an adversary from spamming our logs + limitsExceededTaper := loghelper.LogarithmicTaper{} + // Note that we never reset this taper. There shouldn't be many messages + // with unknown stream id. + unknownStreamIDTaper := loghelper.LogarithmicTaper{} + + // We keep track of stream names for logging. + // Note that entries in this map are not checked for truthfulness, the remote + // could lie about the stream name. + remoteStreamNameByID := make(map[internaltypes.StreamID]string) + + logWithHeaderInternal := func(header frame.FrameHeader) commontypes.Logger { + return logger.MakeChild(commontypes.LogFields{ + "payloadLength": header.GetPayloadSize(), + "streamID": header.GetStreamID(), + "remoteStreamName": remoteStreamNameByID[header.GetStreamID()], + }) + } + + logNegativeDemuxResultInternal := func(header frame.FrameHeader, demuxShouldPushResult demuxer.ShouldPushResult) { + switch demuxShouldPushResult { + case demuxer.ShouldPushResultMessageTooBig: + limitsExceededTaper.Trigger(func(count uint64) { + logWithHeaderInternal(header).Warn("authenticatedConnectionReadLoop: message too big, dropping message", commontypes.LogFields{ + "limitsExceededDroppedCount": count, + }) + }) + case demuxer.ShouldPushResultMessagesLimitExceeded: + limitsExceededTaper.Trigger(func(count uint64) { + logWithHeaderInternal(header).Warn("authenticatedConnectionReadLoop: message limit exceeded, dropping message", commontypes.LogFields{ + "limitsExceededDroppedCount": count, + }) + }) + case demuxer.ShouldPushResultBytesLimitExceeded: + limitsExceededTaper.Trigger(func(count uint64) { + logWithHeaderInternal(header).Warn("authenticatedConnectionReadLoop: bytes limit exceeded, dropping message", commontypes.LogFields{ + "limitsExceededDroppedCount": count, + }) + }) + case demuxer.ShouldPushResultUnknownStream: + unknownStreamIDTaper.Trigger(func(count uint64) { + logWithHeaderInternal(header).Warn("authenticatedConnectionReadLoop: unknown stream id, dropping message", commontypes.LogFields{ + "unknownStreamIDDroppedCount": count, + }) + }) + case demuxer.ShouldPushResultResponseRejected: + limitsExceededTaper.Trigger(func(count uint64) { + logWithHeaderInternal(header).Warn( + "authenticatedConnectionReadLoop: response rejected, dropping message", commontypes.LogFields{ + "limitsExceededDroppedCount": count, + }, + ) + }) + case demuxer.ShouldPushResultYes: + logger.Critical("authenticatedConnectionReadLoop.logNegativeDemuxResultInternal should never hit shouldPushResultYes", nil) + } + } + + demuxPushInternal := func(header frame.FrameHeader, msg InboundBinaryMessage) { + switch demux.PushMessage(header.GetStreamID(), msg) { + case demuxer.PushResultDropped: + logWithHeaderInternal(header).Trace("authenticatedConnectionReadLoop: demuxer is overflowing for stream, dropping oldest message", nil) + case demuxer.PushResultUnknownStream: + unknownStreamIDTaper.Trigger(func(count uint64) { + logWithHeaderInternal(header).Warn("authenticatedConnectionReadLoop: unknown stream id, dropping message", commontypes.LogFields{ + "unknownStreamIDDroppedCount": count, + }) + }) + case demuxer.PushResultSuccess: + } + } + + // We keep track of the number of open & close frames that we have received. + openCloseFramesReceived := 0 + const maxOpenCloseFramesReceived = 2 * MaxStreamsPerPeer + + maxOpenCloseFramesExceededInternal := func(streamID internaltypes.StreamID, payloadSize int) bool { + if openCloseFramesReceived <= maxOpenCloseFramesReceived { + return false + } + + childLogger := logger.MakeChild(commontypes.LogFields{ + "payloadLength": payloadSize, + "streamID": streamID, + "remoteStreamName": remoteStreamNameByID[streamID], + }) + childLogger.Warn("authenticatedConnectionReadLoop: peer received too many open/close frames, closing connection", + commontypes.LogFields{ + "maxOpenCloseFramesReceived": maxOpenCloseFramesReceived, + }) + return true + } + + frameHeaderReader := frame.MakeFrameHeaderReader(readInternal) + + for { + header, err := frameHeaderReader.ReadFrameHeader() + if err != nil { + if errors.Is(err, frame.ErrReadFrameHeaderReadFailed) { + // no need to log, readInternal will have already done so + } else { + logger.Warn("authenticatedConnectionReadLoop: could not read frame header, closing connection", commontypes.LogFields{ + "error": err, + }) + } + return + } + + switch header := header.(type) { + case frame.OpenStreamFrameHeader: + openCloseFramesReceived++ + + streamName := make([]byte, header.PayloadSize) + if !readInternal(streamName) { + return + } + remoteStreamNameByID[header.StreamID] = string(streamName) + + select { + case chOtherStreamStateNotification <- streamStateNotification{header.StreamID, string(streamName), true}: + case <-ctx.Done(): + return + } + + if maxOpenCloseFramesExceededInternal(header.StreamID, header.PayloadSize) { + return + } + case frame.CloseStreamFrameHeader: + openCloseFramesReceived++ + + delete(remoteStreamNameByID, header.StreamID) + select { + case chOtherStreamStateNotification <- streamStateNotification{header.StreamID, "", false}: + case <-ctx.Done(): + return + } + + if maxOpenCloseFramesExceededInternal(header.StreamID, 0) { + return + } + case frame.MessagePlainFrameHeader: + demuxShouldPushResult := demux.ShouldPush(header.StreamID, header.PayloadSize) + switch demuxShouldPushResult { + case demuxer.ShouldPushResultMessagesLimitExceeded, demuxer.ShouldPushResultBytesLimitExceeded, demuxer.ShouldPushResultUnknownStream, demuxer.ShouldPushResultResponseRejected, demuxer.ShouldPushResultMessageTooBig: + logNegativeDemuxResultInternal(header, demuxShouldPushResult) + if !skipInternal(header.PayloadSize) { + return + } + case demuxer.ShouldPushResultYes: + payload := make([]byte, header.PayloadSize) + if !readInternal(payload) { + return + } + demuxPushInternal( + header, + InboundBinaryMessagePlain{payload}, + ) + } + case frame.MessageRequestFrameHeader: + demuxShouldPushResult := demux.ShouldPush(header.StreamID, header.PayloadSize) + switch demuxShouldPushResult { + case demuxer.ShouldPushResultMessagesLimitExceeded, demuxer.ShouldPushResultBytesLimitExceeded, demuxer.ShouldPushResultUnknownStream, demuxer.ShouldPushResultResponseRejected, demuxer.ShouldPushResultMessageTooBig: + logNegativeDemuxResultInternal(header, demuxShouldPushResult) + if !skipInternal(header.PayloadSize) { + return + } + case demuxer.ShouldPushResultYes: + payload := make([]byte, header.PayloadSize) + if !readInternal(payload) { + return + } + demuxPushInternal( + header, + InboundBinaryMessageRequest{RequestHandle(header.RequestID), payload}, + ) + } + case frame.MessageResponseFrameHeader: + demuxShouldPushResult := demux.ShouldPushResponse(header.StreamID, header.RequestID, header.PayloadSize) + switch demuxShouldPushResult { + case demuxer.ShouldPushResultMessagesLimitExceeded, demuxer.ShouldPushResultBytesLimitExceeded, demuxer.ShouldPushResultUnknownStream, demuxer.ShouldPushResultResponseRejected, demuxer.ShouldPushResultMessageTooBig: + logNegativeDemuxResultInternal(header, demuxShouldPushResult) + if !skipInternal(header.PayloadSize) { + return + } + case demuxer.ShouldPushResultYes: + limitsExceededTaper.Reset(func(oldCount uint64) { + logWithHeaderInternal(header).Info("authenticatedConnectionReadLoop: limits are no longer being exceeded", commontypes.LogFields{ + "droppedCount": oldCount, + }) + }) + + payload := make([]byte, header.PayloadSize) + if !readInternal(payload) { + return + } + demuxPushInternal( + header, + InboundBinaryMessageResponse{payload}, + ) + } + default: + panic("unknown type of frame.FrameHeader") + } + } +} + +func authenticatedConnectionWriteLoop( + ctx context.Context, + conn net.Conn, + chSelfStreamStateNotification <-chan streamStateNotification, + mux *muxer.Muxer, + demux *demuxer.Demuxer, + chWriteTerminated chan<- struct{}, + logger loghelper.LoggerWithContext, + metrics *peerMetrics, +) { + writeInternal := func(buf []byte) bool { + _, err := conn.Write(buf) + if err != nil { + logger.Warn("Error writing to connection", commontypes.LogFields{"error": err}) + // shut everything down + if err := safeClose(conn); err != nil { + logger.Warn("Failed to close connection", commontypes.LogFields{"error": err}) + } + close(chWriteTerminated) + return false + } + metrics.connWrittenBytesTotal.Add(float64(len(buf))) + return true + } + + // If two bufs are smallish, try to coalesce the two writes since each write + // to the underlying TLS conn will emit a new TLS record. + // 2048 bytes since TLS per-(application data)-record overhead for us is 22 bytes. + // When we exceed that size we lose only ~1% to the additional record's overhead. + writeTwoInternalBuf := make([]byte, 0, 2048) + writeTwoInternal := func(buf1 []byte, buf2 []byte) bool { + if len(buf1)+len(buf2) <= cap(writeTwoInternalBuf) { + writeTwoInternalBuf = writeTwoInternalBuf[:0] + writeTwoInternalBuf = append(writeTwoInternalBuf, buf1...) + writeTwoInternalBuf = append(writeTwoInternalBuf, buf2...) + return writeInternal(writeTwoInternalBuf) + } else { + return writeInternal(buf1) && writeInternal(buf2) + } + } + + sendInternal := func(streamID internaltypes.StreamID, message OutboundBinaryMessage) bool { + if err := conn.SetWriteDeadline(time.Now().Add(netTimeout)); err != nil { + logger.Warn("Closing connection, error during SetWriteDeadline", commontypes.LogFields{"error": err}) + return false + } + + // The header's value is set based on the type of message in the switch statement below. + var header []byte + var payload []byte + + switch m := message.(type) { + case OutboundBinaryMessageRequest: + requestID, err := internaltypes.MakeRandomRequestID() + if err != nil { + logger.Error("Error while sending request (failed to generate random request id)", commontypes.LogFields{ + "error": err, + }) + return false + } + demux.SetPolicy(streamID, requestID, m.ResponsePolicy) + header = frame.MessageRequestFrameHeader{streamID, len(m.Payload), requestID}.Encode() + payload = m.Payload + + case OutboundBinaryMessageResponse: + header = frame.MessageResponseFrameHeader{ + streamID, + len(m.Payload), + stream2types.RequestIDOfOutboundBinaryMessageResponse(m), + }.Encode() + payload = m.Payload + + case OutboundBinaryMessagePlain: + header = frame.MessagePlainFrameHeader{streamID, len(m.Payload)}.Encode() + payload = m.Payload + } + + if !writeTwoInternal( + header, + payload, + ) { + return false + } + metrics.messageBytes.Observe(float64(len(payload))) + return true + } + + handleStreamStateNotificationsInternal := func(notification streamStateNotification) bool { + if err := conn.SetWriteDeadline(time.Now().Add(netTimeout)); err != nil { + logger.Warn("Closing connection, error during SetWriteDeadline", commontypes.LogFields{"error": err}) + return false + } + if notification.open { + streamName := []byte(notification.streamName) + if !writeTwoInternal( + frame.OpenStreamFrameHeader{notification.streamID, len(streamName)}.Encode(), + streamName, + ) { + return false + } + } else { + if !writeInternal(frame.CloseStreamFrameHeader{notification.streamID}.Encode()) { + return false + } + } + return true + } + + for { + select { + case <-ctx.Done(): + return + + case notification := <-chSelfStreamStateNotification: + if !handleStreamStateNotificationsInternal(notification) { + return + } + continue + + case <-mux.SignalMaybePending(): + } + + for { + select { + case <-ctx.Done(): + return + + case notification := <-chSelfStreamStateNotification: + if !handleStreamStateNotificationsInternal(notification) { + return + } + + default: + } + + msg, sid := mux.Pop() + if msg == nil { + break + } + if !sendInternal(sid, msg) { + return + } + } + } +} + +// gotta be careful about closing tls connections to make sure we don't get +// tarpitted +func safeClose(conn net.Conn) error { + // This isn't needed in more recent versions of go, but better safe than sorry! + errDeadline := conn.SetWriteDeadline(time.Now().Add(netTimeout)) + errClose := conn.Close() + if errClose != nil { + return errClose + } + if errDeadline != nil { + return errDeadline + } + return nil +} + +func incomingConnsRateLimit(durationBetweenDials time.Duration) ratelimit.MillitokensPerSecond { + // 2 dials per DurationBetweenDials are okay + result := ratelimit.MillitokensPerSecond(2.0 / durationBetweenDials.Seconds() * 1000.0) + // dialing once every two seconds is always okay + if result < 500 { + result = 500 + } + return result +} + +// Discoverer is responsible for discovering the addresses of peers on the network. +type Discoverer interface { + Start(host ragep2pwrapper.Host, keyring types.PeerKeyring, logger loghelper.LoggerWithContext) error + Close() error + FindPeer(peer types.PeerID) ([]types.Address, error) +} diff --git a/ragep2p/ragep2pnew/stream.go b/ragep2p/ragep2pnew/stream.go new file mode 100644 index 00000000..4f29893d --- /dev/null +++ b/ragep2p/ragep2pnew/stream.go @@ -0,0 +1,95 @@ +package ragep2pnew + +import ( + "fmt" + + "github.com/smartcontractkit/libocr/commontypes" + "github.com/smartcontractkit/libocr/internal/loghelper" + "github.com/smartcontractkit/libocr/ragep2p/types" +) + +// Deprecated: Please switch to Stream2. +type Stream struct { + stream2 *stream2 + chReceive chan []byte +} + +// Helper function for initializing a legacy Stream using a new Stream2. +func newStreamFromStream2(wrappedStream2 Stream2) (*Stream, error) { + rawStream2, ok := wrappedStream2.(*stream2) + if !ok { + return nil, fmt.Errorf("assumption violation: wrappedStream2 is not of type *stream2") + } + stream := &Stream{ + rawStream2, + make(chan []byte, 5), + } + go stream.receiveForwardingLoop() + return stream, nil +} + +// Other returns the peer ID of the stream counterparty. +func (st *Stream) Other() types.PeerID { + return st.stream2.Other() +} + +// Name returns the name of the stream. +func (st *Stream) Name() string { + return st.stream2.Name() +} + +// Best effort sending of messages. May fail without returning an error. +func (st *Stream) SendMessage(data []byte) { + st.stream2.Send(OutboundBinaryMessagePlain{data}) +} + +// Best effort receiving of messages. The returned channel will be closed when +// the stream is closed. Note that this function may return the same channel +// across invocations. +func (st *Stream) ReceiveMessages() <-chan []byte { + // Here, return the st.chReceive (instead of type incompatible st.stream2.chReceive). + // See NewStream(...), which starts a go-routines forwarding all messages from st.stream2.chReceive to st.chReceive. + return st.chReceive +} + +// Close the stream. This closes any channel returned by ReceiveMessages earlier. +// After close the stream cannot be reopened. If the stream is needed in the +// future it should be created again through NewStream. +// After close, any messages passed to SendMessage will be dropped. +func (st *Stream) Close() error { + return st.stream2.Close() +} + +// Implements forwarding of received messages as workaround for incompatibility of the chReceive types. +func (st *Stream) receiveForwardingLoop() { + defer close(st.chReceive) + logTaper := loghelper.LogarithmicTaper{} + + for { + select { + case msg := <-st.stream2.Receive(): + if msg == nil { + // stream2 was closed, so we stop the forwarding loop + return + } + + if msg, ok := msg.(InboundBinaryMessagePlain); ok { + select { + case st.chReceive <- msg.Payload: + case <-st.stream2.ctx.Done(): + return + } + } else { + logTaper.Trigger(func(newCount uint64) { + st.stream2.logger.Warn( + "Stream: dropping InboundBinaryMessage that is not InboundBinaryMessagePlain. Use Stream2 for support of these.", + commontypes.LogFields{}, + ) + }) + } + + case <-st.stream2.ctx.Done(): + return + } + } +} diff --git a/ragep2p/ragep2pnew/stream2.go b/ragep2p/ragep2pnew/stream2.go new file mode 100644 index 00000000..a1a58bf6 --- /dev/null +++ b/ragep2p/ragep2pnew/stream2.go @@ -0,0 +1,249 @@ +package ragep2pnew + +import ( + "context" + "fmt" + "sync" + + "github.com/smartcontractkit/libocr/commontypes" + "github.com/smartcontractkit/libocr/internal/loghelper" + "github.com/smartcontractkit/libocr/ragep2p/ragep2pnew/internal/demuxer" + "github.com/smartcontractkit/libocr/ragep2p/ragep2pnew/internal/internaltypes" + "github.com/smartcontractkit/libocr/ragep2p/ragep2pnew/internal/muxer" + "github.com/smartcontractkit/libocr/ragep2p/ragep2pnew/internal/responselimit" + "github.com/smartcontractkit/libocr/ragep2p/ragep2pnew/internal/stream2types" + "github.com/smartcontractkit/libocr/ragep2p/types" + "github.com/smartcontractkit/libocr/subprocesses" +) + +type StreamPriority = stream2types.StreamPriority + +const ( + StreamPriorityLow = stream2types.StreamPriorityLow + StreamPriorityDefault = stream2types.StreamPriorityDefault +) + +type Stream2 interface { + Other() types.PeerID + Name() string + Send(msg OutboundBinaryMessage) + Receive() <-chan InboundBinaryMessage + UpdateLimits(limits Stream2Limits) error + Close() error +} + +var _ Stream2 = &stream2{} + +// Stream2 is an over-the-network channel between two peers. Two peers may share +// multiple disjoint streams with different names. Streams are persistent and +// agnostic to the state of the connection. They abstract the underlying +// connection. Messages are delivered on a best effort basis. +type stream2 struct { + closedMu sync.Mutex + closed bool + + name string + other types.PeerID + streamID internaltypes.StreamID + + maxOutgoingBufferedMessages int + maxMessageLength int + + host *Host + + subprocesses subprocesses.Subprocesses + ctx context.Context + cancel context.CancelFunc + logger loghelper.LoggerWithContext + chReceive chan InboundBinaryMessage + + mux *muxer.Muxer + demux *demuxer.Demuxer + + chStreamUpdateLimitsRequest chan<- peerStreamUpdateLimitsRequest + chStreamUpdateLimitsResponse <-chan peerStreamUpdateLimitsResponse + + chStreamCloseRequest chan<- peerStreamCloseRequest + chStreamCloseResponse <-chan peerStreamCloseResponse +} + +// Other returns the peer ID of the stream's counterparty. +func (st *stream2) Other() types.PeerID { + return st.other +} + +// Name returns the name of the stream. +func (st *stream2) Name() string { + return st.name +} + +// Best effort sending of messages. May fail without returning an error. +func (st *stream2) Send(msg OutboundBinaryMessage) { + var ( + ok bool + payloadLength int + ) + switch msg := msg.(type) { + case OutboundBinaryMessagePlain: + ok = len(msg.Payload) <= st.maxMessageLength + payloadLength = len(msg.Payload) + case OutboundBinaryMessageRequest: + ok = len(msg.Payload) <= st.maxMessageLength + payloadLength = len(msg.Payload) + case OutboundBinaryMessageResponse: + // Response size is limited by the policy of the corresponding request + // and may exceed the stream's default max message length. + // Responses must never exceed the global ragep2p max message length. + ok = len(msg.Payload) <= types.MaxMessageLength + payloadLength = len(msg.Payload) + default: + panic(fmt.Sprintf("unknown OutboundBinaryMessage type: %T", msg)) + } + + if !ok { + st.logger.Warn("dropping outbound message that is too large", commontypes.LogFields{ + "messagePayloadLength": payloadLength, + "streamMaxMessageLength": st.maxMessageLength, + "ragep2pMaxMessageLength": types.MaxMessageLength, + }) + return + } + + _ = st.mux.PushEvict(st.streamID, msg) +} + +// Best effort receiving of messages. The returned channel will be closed when +// the stream is closed. Note that this function may return the same channel +// across invocations. +func (st *stream2) Receive() <-chan InboundBinaryMessage { + return st.chReceive +} + +func (st *stream2) UpdateLimits(limits Stream2Limits) error { + validatedLimits, err := limits.Validate() + if err != nil { + return err + } + + select { + case st.chStreamUpdateLimitsRequest <- peerStreamUpdateLimitsRequest{st.streamID, validatedLimits}: + resp := <-st.chStreamUpdateLimitsResponse + if resp.err != nil { + return resp.err + } + return nil + case <-st.ctx.Done(): + + return fmt.Errorf("UpdateLimts: called after Stream internal context already expired") + } +} + +// Close the stream. This closes any channel returned by ReceiveMessages earlier. +// After close the stream cannot be reopened. If the stream is needed in the +// future it should be created again through NewStream2. +// After close, any messages passed to SendMessage will be dropped. +func (st *stream2) Close() error { + st.closedMu.Lock() + defer st.closedMu.Unlock() + host := st.host + + if st.closed { + return fmt.Errorf("already closed stream") + } + + st.logger.Info("Stream winding down", nil) + + err := func() error { + // Grab peersMu in case the peer has no streams left and we need to + // delete it + host.peersMu.Lock() + defer host.peersMu.Unlock() + + select { + case st.chStreamCloseRequest <- peerStreamCloseRequest{st.streamID}: + resp := <-st.chStreamCloseResponse + if resp.err != nil { + st.logger.Error("Unexpected error during stream Close()", commontypes.LogFields{ + "error": resp.err, + }) + return resp.err + } + if resp.peerHasNoStreams { + st.logger.Trace("Garbage collecting peer", nil) + peer := host.peers[st.other] + host.subprocesses.Go(func() { + peer.connLifeCycleMu.Lock() + defer peer.connLifeCycleMu.Unlock() + peer.connLifeCycle.connCancel() + peer.connLifeCycle.connSubs.Wait() + }) + delete(host.peers, st.other) + } + case <-st.ctx.Done(): + } + return nil + }() + if err != nil { + return err + } + + st.closed = true + st.cancel() + st.subprocesses.Wait() + close(st.chReceive) + st.logger.Info("Stream exiting", nil) + return nil +} + +func (st *stream2) receiveLoop() { + chSignalMaybePending := st.demux.SignalMaybePending(st.streamID) + chDone := st.ctx.Done() + for { + select { + case <-chSignalMaybePending: + msg, popResult := st.demux.PopMessage(st.streamID) + switch popResult { + case demuxer.PopResultEmpty: + st.logger.Debug("Demuxer buffer is empty", nil) + case demuxer.PopResultUnknownStream: + // Closing of streams does not happen in a single step, and so + // it could be that in the process of closing, the stream has + // been removed from demuxer, but receiveLoop has not stopped + // yet (but should stop soon). + st.logger.Info("Demuxer does not know of the stream, it is likely we are in the process of closing the stream", nil) + case demuxer.PopResultSuccess: + if msg != nil { + select { + case st.chReceive <- msg: + case <-chDone: + } + } else { + st.logger.Error("Demuxer indicated success but we received nil msg, this should not happen", nil) + } + } + case <-chDone: + return + } + } +} + +//////////////////////////////////////////////////////// +// Types for "new" messages +//////////////////////////////////////////////////////// + +type Stream2Limits = stream2types.Stream2Limits + +type ResponsePolicy = responselimit.ResponsePolicy +type SingleUseSizedLimitedResponsePolicy = responselimit.SingleUseSizedLimitedResponsePolicy + +type RequestHandle = stream2types.RequestHandle + +type InboundBinaryMessage = stream2types.InboundBinaryMessage +type InboundBinaryMessagePlain = stream2types.InboundBinaryMessagePlain +type InboundBinaryMessageRequest = stream2types.InboundBinaryMessageRequest +type InboundBinaryMessageResponse = stream2types.InboundBinaryMessageResponse + +type OutboundBinaryMessage = stream2types.OutboundBinaryMessage +type OutboundBinaryMessagePlain = stream2types.OutboundBinaryMessagePlain +type OutboundBinaryMessageRequest = stream2types.OutboundBinaryMessageRequest +type OutboundBinaryMessageResponse = stream2types.OutboundBinaryMessageResponse diff --git a/ragep2p/ragep2pnew/tls_config.go b/ragep2p/ragep2pnew/tls_config.go new file mode 100644 index 00000000..7e9410b2 --- /dev/null +++ b/ragep2p/ragep2pnew/tls_config.go @@ -0,0 +1,22 @@ +package ragep2pnew + +import ( + "crypto/tls" + "crypto/x509" +) + +func newTLSConfig(cert tls.Certificate, verifyPeerCertificate func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error) *tls.Config { + return &tls.Config{ + Certificates: []tls.Certificate{cert}, + ClientAuth: tls.RequireAnyClientCert, + + // Since our clients use self-signed certs, we skip verification here. + // Instead, we use VerifyPeerCertificate for our own check + InsecureSkipVerify: true, + + MaxVersion: tls.VersionTLS13, + MinVersion: tls.VersionTLS13, + + VerifyPeerCertificate: verifyPeerCertificate, + } +} diff --git a/ragep2p/ragep2pnew/wrapper.go b/ragep2p/ragep2pnew/wrapper.go new file mode 100644 index 00000000..3e86e097 --- /dev/null +++ b/ragep2p/ragep2pnew/wrapper.go @@ -0,0 +1,74 @@ +package ragep2pnew + +import ( + "github.com/smartcontractkit/libocr/networking/ragep2pwrapper" + "github.com/smartcontractkit/libocr/ragep2p/types" +) + +func Wrapped(host *Host) ragep2pwrapper.Host { + return &hostWrapper{host} +} + +var _ ragep2pwrapper.Host = &hostWrapper{} + +type hostWrapper struct { + host *Host +} + +var _ ragep2pwrapper.Stream = &streamWrapper{} + +type streamWrapper struct { + stream *Stream +} + +func (h *hostWrapper) Start() error { + return h.host.Start() +} + +func (h *hostWrapper) Close() error { + return h.host.Close() +} + +func (h *hostWrapper) ID() types.PeerID { + return h.host.ID() +} + +func (h *hostWrapper) NewStream( + other types.PeerID, + streamName string, + maxOutgoingBufferedMessages int, + maxIncomingBufferedMessages int, + maxMessageLength int, + messagesLimit types.TokenBucketParams, + bytesLimit types.TokenBucketParams, +) (ragep2pwrapper.Stream, error) { + stream, err := h.host.NewStream(other, streamName, maxOutgoingBufferedMessages, maxIncomingBufferedMessages, maxMessageLength, messagesLimit, bytesLimit) + if err != nil { + return nil, err + } + return &streamWrapper{stream}, nil +} + +func (h *hostWrapper) RawWrappee() any { + return h.host +} + +func (s *streamWrapper) Other() types.PeerID { + return s.stream.Other() +} + +func (s *streamWrapper) Name() string { + return s.stream.Name() +} + +func (s *streamWrapper) SendMessage(data []byte) { + s.stream.SendMessage(data) +} + +func (s *streamWrapper) ReceiveMessages() <-chan []byte { + return s.stream.ReceiveMessages() +} + +func (s *streamWrapper) Close() error { + return s.stream.Close() +} diff --git a/ragep2p/types/types.go b/ragep2p/types/types.go index 11ed6732..8799a4d7 100644 --- a/ragep2p/types/types.go +++ b/ragep2p/types/types.go @@ -132,3 +132,16 @@ type PeerKeyring interface { // PublicKey returns the public component of the keypair used in Sign. PublicKey() PeerPublicKey } + +// TokenBucketParams contains the two parameters for a token bucket rate +// limiter. +type TokenBucketParams struct { + Rate float64 + Capacity uint32 +} + +const MaxStreamsPerPeer = 2_000 + +const MaxMessageLength = 1024 * 1024 * 1024 // 1 GiB. This must be smaller than INT32_MAX + +const MaxStreamNameLength = 256 diff --git a/ragep2p/wrapper.go b/ragep2p/wrapper.go new file mode 100644 index 00000000..c5134bd2 --- /dev/null +++ b/ragep2p/wrapper.go @@ -0,0 +1,74 @@ +package ragep2p + +import ( + "github.com/smartcontractkit/libocr/networking/ragep2pwrapper" + "github.com/smartcontractkit/libocr/ragep2p/types" +) + +func Wrapped(host *Host) ragep2pwrapper.Host { + return &hostWrapper{host} +} + +var _ ragep2pwrapper.Host = &hostWrapper{} + +type hostWrapper struct { + host *Host +} + +var _ ragep2pwrapper.Stream = &streamWrapper{} + +type streamWrapper struct { + stream *Stream +} + +func (h *hostWrapper) Start() error { + return h.host.Start() +} + +func (h *hostWrapper) Close() error { + return h.host.Close() +} + +func (h *hostWrapper) ID() types.PeerID { + return h.host.ID() +} + +func (h *hostWrapper) NewStream( + other types.PeerID, + streamName string, + outgoingBufferSize int, + incomingBufferSize int, + maxMessageLength int, + messagesLimit types.TokenBucketParams, + bytesLimit types.TokenBucketParams, +) (ragep2pwrapper.Stream, error) { + stream, err := h.host.NewStream(other, streamName, outgoingBufferSize, incomingBufferSize, maxMessageLength, messagesLimit, bytesLimit) + if err != nil { + return nil, err + } + return &streamWrapper{stream}, nil +} + +func (h *hostWrapper) RawWrappee() any { + return h.host +} + +func (s *streamWrapper) Other() types.PeerID { + return s.stream.Other() +} + +func (s *streamWrapper) Name() string { + return s.stream.Name() +} + +func (s *streamWrapper) SendMessage(data []byte) { + s.stream.SendMessage(data) +} + +func (s *streamWrapper) ReceiveMessages() <-chan []byte { + return s.stream.ReceiveMessages() +} + +func (s *streamWrapper) Close() error { + return s.stream.Close() +}