Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions barretenberg/cpp/bootstrap.sh
Original file line number Diff line number Diff line change
Expand Up @@ -282,8 +282,8 @@ function test_cmds_native {
while read -r test; do
local prefix=$hash
# A little extra resource for these tests.
# IPARecursiveTests and AcirHonkRecursionConstraint fail with 2 threads.
if [[ "$test" =~ ^(AcirAvmRecursionConstraint|ChonkKernelCapacity|AvmRecursiveTests|IPARecursiveTests|AcirHonkRecursionConstraint) ]]; then
# IPARecursiveTests fails with 2 threads.
if [[ "$test" =~ ^(AcirAvmRecursionConstraint|ChonkKernelCapacity|AvmRecursiveTests|IPARecursiveTests|HonkRecursionConstraintTest) ]]; then
prefix="$prefix:CPUS=4:MEM=8g"
fi
echo -e "$prefix barretenberg/cpp/scripts/run_test.sh $bin_name $test"
Expand All @@ -306,7 +306,7 @@ function test_cmds_asan {
["commitment_schemes_recursion_tests"]="IPARecursiveTests.AccumulationAndFullRecursiveVerifier"
["chonk_tests"]="ChonkTests.Basic"
["ultra_honk_tests"]="MegaHonkTests/0.Basic"
["dsl_tests"]="AcirHonkRecursionConstraint/1.TestBasicDoubleHonkRecursionConstraints"
["dsl_tests"]="HonkRecursionConstraintTestWithoutPredicate/2.Tampering"
)
for bin_name in "${!asan_tests[@]}"; do
local filter=${asan_tests[$bin_name]}
Expand Down
2 changes: 1 addition & 1 deletion barretenberg/cpp/scripts/ci_benchmark_ivc_flows.sh
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ chonk_flow $1 $2
runtime="$1"
flow_name="$(basename $2)"

if [[ "${CI:-}" == "1" ]] && [[ "${CI_ENABLE_DISK_LOGS:-0}" == "1" ]]; then
if [[ "${CI:-}" == "1" ]] && [[ "${CI_USE_BUILD_INSTANCE_KEY:-0}" == "1" ]]; then
echo_header "Uploading Barretenberg benchmark breakdowns for $flow_name"

benchmark_breakdown_file="bench-out/app-proving/$flow_name/$runtime/benchmark_breakdown.json"
Expand Down
1 change: 0 additions & 1 deletion barretenberg/cpp/src/barretenberg/api/api_chonk.test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
#include "barretenberg/chonk/private_execution_steps.hpp"
#include "barretenberg/common/serialize.hpp"
#include "barretenberg/dsl/acir_format/acir_format.hpp"
#include "barretenberg/dsl/acir_format/acir_format_mocks.hpp"
#include "barretenberg/dsl/acir_format/acir_to_constraint_buf.hpp"
#include "barretenberg/dsl/acir_format/serde/acir.hpp"
#include "barretenberg/dsl/acir_format/serde/witness_stack.hpp"
Expand Down
1 change: 0 additions & 1 deletion barretenberg/cpp/src/barretenberg/api/api_ultra_honk.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
#include "barretenberg/common/map.hpp"
#include "barretenberg/common/throw_or_abort.hpp"
#include "barretenberg/dsl/acir_format/acir_to_constraint_buf.hpp"
#include "barretenberg/dsl/acir_format/proof_surgeon.hpp"
#include "barretenberg/dsl/acir_proofs/honk_contract.hpp"
#include "barretenberg/dsl/acir_proofs/honk_optimized_contract.hpp"
#include "barretenberg/dsl/acir_proofs/honk_zk_contract.hpp"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
#include "barretenberg/common/serialize.hpp"
#include "barretenberg/dsl/acir_format/acir_format.hpp"
#include "barretenberg/dsl/acir_format/acir_to_constraint_buf.hpp"
#include "barretenberg/dsl/acir_format/proof_surgeon.hpp"
#include "barretenberg/flavor/ultra_flavor.hpp"
#include "barretenberg/flavor/ultra_rollup_flavor.hpp"
#include "barretenberg/ultra_honk/prover_instance.hpp"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -209,4 +209,80 @@ TEST(MegaCircuitBuilder, CompleteSelectorPartitioningCheck)
}
}

/**
* @brief Verify that the ecc_op block is first in the trace and starts at offset 1 (after the zero row)
*/
TEST(MegaCircuitBuilder, EccOpBlockIsFirstInTrace)
{
auto builder = MegaCircuitBuilder();

// Add ECC ops
auto P1 = g1::affine_element::random_element();
builder.queue_ecc_add_accum(P1);
builder.queue_ecc_eq();

// Add some arithmetic gates (goes to a different block)
auto a = builder.add_variable(fr::random_element());
auto b = builder.add_variable(fr::random_element());
auto c = builder.add_variable(builder.get_variable(a) + builder.get_variable(b));
builder.create_add_gate({ a, b, c, 1, 1, -1, 0 });

builder.finalize_circuit(true);
builder.blocks.compute_offsets();

// Verify ecc_op block starts at offset 1 (after zero row)
EXPECT_EQ(builder.blocks.ecc_op.trace_offset(), 1);

// Verify no other non-empty block starts before ecc_op ends
size_t ecc_op_end = builder.blocks.ecc_op.trace_offset() + builder.blocks.ecc_op.size();
for (auto& block : builder.blocks.get()) {
if (&block != &builder.blocks.ecc_op && block.size() > 0) {
EXPECT_GE(block.trace_offset(), ecc_op_end) << "Block starts before ecc_op ends";
}
}

EXPECT_TRUE(CircuitChecker::check(builder));
}

/**
* @brief Verify that an empty circuit can be finalized and passes circuit checks
* @details Finalization should add required gates to ensure all polynomials are non-zero
* @note This is a "completeness" test; unlikely to be a use-case.
*/
TEST(MegaCircuitBuilder, EmptyCircuitFinalization)
{
auto builder = MegaCircuitBuilder();

// Completely empty circuit - no gates added
EXPECT_EQ(builder.blocks.ecc_op.size(), 0);

builder.finalize_circuit(true);

// After finalization, should have non-zero content for required polynomials
EXPECT_GT(builder.blocks.ecc_op.size(), 0) << "Finalization should add ECC ops for non-zero polynomials";
EXPECT_GT(builder.get_calldata().size(), 0) << "Finalization should add databus entries";
EXPECT_GT(builder.get_secondary_calldata().size(), 0);
EXPECT_GT(builder.get_return_data().size(), 0);

EXPECT_TRUE(CircuitChecker::check(builder));
}

/**
* @brief Verify that databus read with out-of-bounds index is caught
*/
TEST(MegaCircuitBuilder, DatabusOutOfBoundsReadFails)
{
auto builder = MegaCircuitBuilder();

// Add single entry to calldata
auto val = builder.add_variable(fr(42));
builder.add_public_calldata(val);

// Try to read at index 1 (out of bounds - only index 0 exists)
auto bad_idx = builder.add_variable(fr(1));

// This should trigger an assertion in read_calldata
EXPECT_THROW(builder.read_calldata(bad_idx), std::runtime_error);
}

} // namespace bb
Original file line number Diff line number Diff line change
Expand Up @@ -40,42 +40,22 @@ Where:

| Method | Description |
|--------|-------------|
| `update()` | updates the internal state of the verifier given a linear combination and the inverse of the vanishing eval |
| `reduce_verification()` | Static method that processes all claims and returns an `OpeningClaim` |
| `reduce_verification_no_finalize()` | Static method that processes claims and returns a verifier instance for further operations |
| `finalize()` | Executes the MSM and returns an `OpeningClaim` |
| `export_batch_opening_claim()` | Exports `BatchOpeningClaim` without executing MSM (allows combining with KZG's $ [W] $) |

**Usage Pattern:**
```cpp
// 1. Initialize verifier with commitments
ShplonkVerifier_<Curve> verifier(polynomial_commitments, transcript, num_claims);
// Simple usage - processes all claims and returns result
OpeningClaim<Curve> result = ShplonkVerifier::reduce_verification(
g1_identity, claims, transcript);

// 2. Accumulate claims (updates scalars internally)
for (auto& claim : claims) {
verifier.update(claim, inverse_vanishing_eval);
}

// 3a. Finalize with MSM execution
OpeningClaim<Curve> result = verifier.finalize(g1_identity);

// 3b. OR export for deferred MSM (e.g., to combine with KZG)
// Or for deferred MSM (e.g., to combine with KZG)
auto verifier = ShplonkVerifier::reduce_verification_no_finalize(claims, transcript);
BatchOpeningClaim<Curve> batch_claim = verifier.export_batch_opening_claim(g1_identity);
```

### Handling Linear Combinations

When polynomials share commitments (e.g., $ p_2 = a \cdot p_1 $), Shplonk avoids redundant MSM entries by accumulating scalars:

Instead of computing:
$$ [Q] - \frac{1}{z - x_1}[p_1] - \frac{\nu}{z - x_2}[p_2] + \ldots $$

We compute:
$$ [Q] - \left(\frac{1}{z - x_1} + \frac{a\nu}{z - x_2}\right)[p_1] + \ldots $$

This is achieved via the `LinearCombinationOfClaims` structure which stores:
- `indices`: which base commitments are involved
- `scalars`: the coefficients in the linear combination
- `opening_pair`: the evaluation point and claimed value

## Shplemini

Shplemini combines Gemini and Shplonk into a single protocol, providing:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -348,7 +348,6 @@ template <typename Curve> class ShplonkVerifier_ {

// Random challenges
std::vector<Fr> pows_of_nu;
size_t pow_idx = 0;
// Commitment to quotient polynomial
Commitment quotient;
// Partial evaluation challenge
Expand Down Expand Up @@ -393,59 +392,6 @@ template <typename Curve> class ShplonkVerifier_ {
}
}

/**
* Structure used to update the internal state of the Shplonk verifier. It represents a claim which is constructed
* as a linear combination of the commitments stored by the Shplonk verifier. The structure is composed of:
* - A list of indices = \f$(i_1, \dots, i_k)\f$
* - A list of scalar coefficients = \f$(a_1, \dots, a_k)\f$
* - An opening pair \f$(x, v)\f$
* The state of the Shplonk verifier is updated so to add the check:
* \f[ \sum_{j=1}^k a_j f_{i_j}(x) = v \f]
* where \f${f_i}_i\f$ are the polynomials whose commitments are stored in the Shplonk verifier
*
* @note The challenge \f$x\f$ is stored redundantly for the purpose of the `update` method, but it is useful to
* expose the method `reduce_verification_vector_claims_no_finalize`
*/
// It is composed
struct LinearCombinationOfClaims {
std::vector<size_t> indices;
std::vector<Fr> scalars;
OpeningPair<Curve> opening_pair;
};

/**
* @brief Update the internal state of the Shplonk verifier
*
* @details Given a list of indices = \f$(i_1, \dots, i_k)\f$, a list of scalar coefficients = \f$(a_1, \dots,
* a_k)\f$, an opening pair $\f(x,v)\f$, and the inverse vanishing eval \f$\frac{1}{z - x}\f$, update the internal
* state of the Shplonk verifier so to add the check \f[ \sum_{j=1}^k a_j f_{i_j}(x) = v \f] This amounts to update:
* - \f$s_{i_j} -= \frac{\nu^{i-1} * a_j}{z - x}\f$
* - \f$\theta += \nu^{i-1} \frac{v}{z - x}\f$
*
* @param update_data
* @param inverse_vanishing_eval
*/
void update(const LinearCombinationOfClaims& update_data, const Fr& inverse_vanishing_eval)
{
BB_ASSERT_LT(pow_idx, pows_of_nu.size(), "Shplonk verifier: pow_idx out of bounds");
// Compute \nu^{i-1} / (z - x)
auto scalar_factor = pows_of_nu[pow_idx] * inverse_vanishing_eval;

for (const auto& [index, coefficient] : zip_view(update_data.indices, update_data.scalars)) {
// \nu^{i-1} * a_j / (z - x)
auto scaling_factor = scalar_factor * coefficient;
// s_{i_j} -= \nu^{i-1} * a_j / (z - x)
BB_ASSERT_LT(index + 1, scalars.size(), "Shplonk verifier: index out of bounds");
scalars[index + 1] -= scaling_factor;
}

// \theta += \nu^{i-1} * v / (z - x)
identity_scalar_coefficient += scalar_factor * update_data.opening_pair.evaluation;

// Update `pow_idx`
pow_idx += 1;
}

/**
* @brief Finalize the Shplonk verification and return the KZG opening claim
*
Expand Down Expand Up @@ -525,61 +471,20 @@ template <typename Curve> class ShplonkVerifier_ {
Fr::batch_invert(inverse_vanishing_evals);
}

// Update the Shplonk verifier state with each claim
// For each claim: s_i -= ν^i / (z - x_i) and θ += ν^i * v_i / (z - x_i)
for (size_t idx = 0; idx < claims.size(); idx++) {
verifier.update({ { idx }, { Fr(1) }, claims[idx].opening_pair }, inverse_vanishing_evals[idx]);
// Compute ν^i / (z - x_i)
auto scalar_factor = verifier.pows_of_nu[idx] * inverse_vanishing_evals[idx];
// s_i -= ν^i / (z - x_i)
verifier.scalars[idx + 1] -= scalar_factor;
// θ += ν^i * v_i / (z - x_i)
verifier.identity_scalar_coefficient += scalar_factor * claims[idx].opening_pair.evaluation;
}

return verifier;
};

/**
* @brief Instantiate a Shplonk verifier and update its state with the provided data.
*
* @param claims List of LinearCombinationOfClaims \f$\{ ( (i_{j_1}, \dots, i_{j_k}), (a_{j_1}, \dots, a_{j_k}),
* (r_k, v_k) )
* \}_k\f$ s.t. \f[ \sum_{l=1}^k a_{j_l} f_{j_l}(r_k) = v_k \f] where \f$f_1, \dots, f_m\f$ are the polynomials
* whose commitments are held by the Shplonk verifier.
*/
void reduce_verification_vector_claims_no_finalize(std::span<const LinearCombinationOfClaims> claims)
{
const size_t num_claims = claims.size();

// Compute { 1 / (z - x_i) }
std::vector<Fr> inverse_vanishing_evals;
inverse_vanishing_evals.reserve(num_claims);
if constexpr (Curve::is_stdlib_type) {
for (const auto& claim : claims) {
inverse_vanishing_evals.emplace_back((this->z_challenge - claim.opening_pair.challenge).invert());
}
} else {
for (const auto& claim : claims) {
inverse_vanishing_evals.emplace_back(this->z_challenge - claim.opening_pair.challenge);
}
Fr::batch_invert(inverse_vanishing_evals);
}

for (const auto& [claim, inv] : zip_view(claims, inverse_vanishing_evals)) {
this->update(claim, inv);
}
}

/**
* @brief Recomputes the new claim commitment [G] given the proof and
* the challenge r. No verification happens so this function always succeeds.
*
* @param g1_identity the identity element for the Curve
* @param claims List of LinearCombinationOfClaims \f$\{ ( (i_{j_1}, \dots, i_{j_k}), (a_{j_1}, \dots, a_{j_k}),
* (r_k, v_k) )
* \}_k\f$ s.t. \f[ \sum_{l=1}^k a_{j_l} f_{j_l}(r_k) = v_k \f] where \f$f_1, \dots, f_m\f$ are the polynomials
* whose commitments are held by the Shplonk verifier.
*/
OpeningClaim<Curve> reduce_verification_vector_claims(Commitment g1_identity,
std::span<const LinearCombinationOfClaims> claims)
{
this->reduce_verification_vector_claims_no_finalize(claims);
return this->finalize(g1_identity);
};

/**
* @brief Recomputes the new claim commitment [G] given the proof and
* the challenge r. No verification happens so this function always succeeds.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,48 +46,6 @@ TYPED_TEST(ShplonkTest, ShplonkSimple)
this->verify_opening_claim(batched_verifier_claim, batched_opening_claim.polynomial);
}

// Test of Shplonk prover/verifier for polynomials that are linearly dependent
TYPED_TEST(ShplonkTest, ShplonkLinearlyDependent)
{
using ClaimData = UnivariateClaimData<TypeParam>;
using ShplonkProver = ShplonkProver_<TypeParam>;
using ShplonkVerifier = ShplonkVerifier_<TypeParam>;
using Fr = typename TypeParam::ScalarField;

auto prover_transcript = NativeTranscript::prover_init_empty();

// Generate two random (unrelated) polynomials of two different sizes and a random linear combinations
auto setup = this->generate_claim_data({ MAX_POLY_DEGREE, MAX_POLY_DEGREE / 2 });

// Extract the commitments to be used in the Shplonk verifier
auto commitments = ClaimData::polynomial_commitments(setup);

// Linearly combine the polynomials and evaluations
auto [coefficients, evals] = this->combine_claims(setup);

// Execute the shplonk prover functionality
auto prover_opening_claims = ClaimData::prover_opening_claims(setup);
const auto batched_opening_claim = ShplonkProver::prove(this->ck(), prover_opening_claims, prover_transcript);
// An intermediate check to confirm the opening of the shplonk prover witness Q
this->verify_opening_pair(batched_opening_claim.opening_pair, batched_opening_claim.polynomial);

// Shplonk verification
auto verifier_opening_claims = ClaimData::verifier_opening_claims(setup);
std::vector<typename ShplonkVerifier::LinearCombinationOfClaims> update_data = {
{ { 0 }, { Fr(1) }, verifier_opening_claims[0].opening_pair },
{ { 1 }, { Fr(1) }, verifier_opening_claims[1].opening_pair },
{ { 0, 1 }, coefficients, verifier_opening_claims[2].opening_pair },
};
auto verifier_transcript = NativeTranscript::verifier_init_empty(prover_transcript);
ShplonkVerifier verifier(commitments, verifier_transcript, verifier_opening_claims.size());

// Execute the shplonk verifier functionality
const auto batched_verifier_claim =
verifier.reduce_verification_vector_claims(this->vk().get_g1_identity(), update_data);

this->verify_opening_claim(batched_verifier_claim, batched_opening_claim.polynomial);
}

// Test exporting batch claim from Shplonk verifier and verification
TYPED_TEST(ShplonkTest, ExportBatchClaimAndVerify)
{
Expand Down
Loading
Loading