Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions cspell.json
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,7 @@
"ierc",
"IGSE",
"incentivized",
"incrementation",
"indexeddb",
"interruptible",
"IPFS",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ use crate::{
FinalBlobBatchingChallenges,
},
blob::barycentric_evaluate_blob_at_z,
utils::{compress_to_blob_commitment, validate_canonical_point_at_infinity},
utils::{compress_to_blob_commitment, validate_canonical_representation_if_infinity},
};
use bigcurve::BigCurve;
use bignum::BLS12_381_Fr;
Expand Down Expand Up @@ -42,7 +42,7 @@ fn compute_blob_challenge(
challenge
}

/// Evaluates each blob required for an L1 block:
/// Evaluates each blob required for an L1 checkpoint:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
/// Evaluates each blob required for an L1 checkpoint:
/// Evaluates each blob required for a checkpoint:

/// - Compresses each of the blob's injected commitments.
/// - Evaluates each blob individually to find its challenge `z_i` and evaluation `y_i`.
/// - Updates the batched blob accumulator.
Expand All @@ -54,21 +54,24 @@ pub fn evaluate_blobs_and_batch<let NumBlobs: u32>(
final_blob_challenges: FinalBlobBatchingChallenges,
start_accumulator: BlobAccumulator,
) -> BlobAccumulator {
// The rationale behind this is that the start accumulator is hinted, and in merge we just assert that left.end.eq(right.start)
// The rationale behind this line is that the start accumulator is hinted, and in merge we just assert that left.end.eq(right.start)
// And on root we check that left.start.eq(empty()).
// However, currently the equals method of big curve points doesn't check for equality of the limbs on infinity points.
// This means that infinity points can be swapped with non canonical versions without the eq realizing it.
// I don't think there is a way to take advantage of that, since we should never have infinity points in c_acc,
// but as good measure we'll validate that it's canonical here.
validate_canonical_point_at_infinity(start_accumulator.c_acc);
// Also, the Root Rollup circuit technically validates that the start accumulator of the checkpoint is Empty,
// and the Empty impl uses the canonical representation of infinity.
validate_canonical_representation_if_infinity(start_accumulator.c_acc);

let mut end_accumulator = start_accumulator;
for i in 0..NumBlobs {
let single_blob_fields = subarray(blobs_as_fields, i * FIELDS_PER_BLOB);
let commitment_point = kzg_commitments_points[i];
// We need to validate that the point is on the curve and canonical (0,0 if infinity), since it's hinted in this verification scheme
commitment_point.validate_on_curve();
validate_canonical_point_at_infinity(commitment_point);
// Strangely, `validate_on_curve` doesn't validate the point at infinity, so we check that here:
validate_canonical_representation_if_infinity(commitment_point);
let c_i = compress_to_blob_commitment(commitment_point);

// Note that with multiple blobs per block, each blob uses the same blob_fields_hash in z_i.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,4 @@ mod validate_point;

pub use compress_to_blob_commitment::compress_to_blob_commitment;
pub use validate_final_blob_batching_challenges::validate_final_blob_batching_challenges;
pub use validate_point::validate_canonical_point_at_infinity;
pub use validate_point::validate_canonical_representation_if_infinity;
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ use bigcurve::BigCurve;
use bignum::{BigNum, fields::bls12_381Fq::BLS12_381_Fq};

/// Validates that if the point is at infinity, it has the canonical x and y coordinates.
pub fn validate_canonical_point_at_infinity(point: BLSPoint) {
pub fn validate_canonical_representation_if_infinity(point: BLSPoint) {
if point.is_infinity() {
let canonical_point_at_infinity = BLSPoint::point_at_infinity();
assert_eq(point.x, canonical_point_at_infinity.x, "Non canonical x coordinate at infinity");
Expand All @@ -14,21 +14,21 @@ pub fn validate_canonical_point_at_infinity(point: BLSPoint) {
#[test]
fn should_accept_canonical_point_at_infinity() {
let point = BLSPoint { x: BLS12_381_Fq::zero(), y: BLS12_381_Fq::zero(), is_infinity: true };
validate_canonical_point_at_infinity(point);
validate_canonical_representation_if_infinity(point);
}

#[test(should_fail_with = "Non canonical x coordinate at infinity")]
fn should_fail_if_non_canonical_x_coordinate_point_at_infinity() {
let x = BLS12_381_Fq::one();
let y = BLS12_381_Fq::zero();
let point = BLSPoint { x, y, is_infinity: true };
validate_canonical_point_at_infinity(point);
validate_canonical_representation_if_infinity(point);
}

#[test(should_fail_with = "Non canonical y coordinate at infinity")]
fn should_fail_if_non_canonical_y_coordinate_point_at_infinity() {
let x = BLS12_381_Fq::zero();
let y = BLS12_381_Fq::one();
let point = BLSPoint { x, y, is_infinity: true };
validate_canonical_point_at_infinity(point);
validate_canonical_representation_if_infinity(point);
}
Original file line number Diff line number Diff line change
Expand Up @@ -35,12 +35,16 @@ fn assert_prev_block_rollups_follow_on_from_each_other(
"Mismatched sponge blobs: expected right.start_sponge_blob to match left.end_sponge_blob",
);

// Notice: consecutive blocks within a checkpoint can have the same timestamp.
// I.e. the `<=` is _intentional_.
assert(
left.end_timestamp <= right.start_timestamp,
"Rollup block timestamps do not follow on from each other",
);

// Non-empty `in_hash` originates from the first block root and must propagate through all merge steps via the left
// rollup only. Which means the right rollup must not carry or propagate the `in_hash`.
// Non-empty `in_hash` originates from the first block root and is propagated through all merge
// steps via the left rollup only (see merge_block_rollups.nr).
// We prevent the right rollup from propagating a nonzero `in_hash`, so it's impossible for any
// block but the first to propagate a nonzero `in_hash` up to the checkpoint root.
assert_eq(right.in_hash, 0, "Right rollup must not carry in_hash");
}
Original file line number Diff line number Diff line change
Expand Up @@ -203,6 +203,11 @@ impl BlockRollupPublicInputsComposer {
// Absorb data for this block into the end sponge blob.
let mut block_end_sponge_blob = self.end_sponge_blob;
// `in_hash` is not 0 if and only if it's the first block in the checkpoint.
// Note: the `in_hash` of the first block in the checkpoint is asserted to be nonzero
// by the Checkpoint Root circuit.
// Note: the `in_hash` of subsequent blocks in the checkpoint is asserted to be 0
// within `validate_consecutive_block_rollups.nr`, by asserting that the `in_hash` of all
// "right" rollups is `0`.
let is_first_block_in_checkpoint = self.in_hash != 0;
block_end_sponge_blob.absorb_block_end_data(
global_variables,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ use types::{

pub struct CheckpointRollupPublicInputsComposer<let NumBlobs: u32> {
merged_rollup: BlockRollupPublicInputs,
// The below are all hints:
start_blob_accumulator: BlobAccumulator,
final_blob_challenges: FinalBlobBatchingChallenges,
blobs_fields: [Field; FIELDS_PER_BLOB * NumBlobs],
Expand All @@ -30,6 +31,7 @@ pub struct CheckpointRollupPublicInputsComposer<let NumBlobs: u32> {
impl<let NumBlobs: u32> CheckpointRollupPublicInputsComposer<NumBlobs> {
pub fn new<let NumPreviousRollups: u32>(
previous_rollups: [BlockRollupPublicInputs; NumPreviousRollups],
// The below args are all hints:
start_blob_accumulator: BlobAccumulator,
final_blob_challenges: FinalBlobBatchingChallenges,
blobs_fields: [Field; NumBlobs * FIELDS_PER_BLOB],
Expand Down Expand Up @@ -114,12 +116,26 @@ impl<let NumBlobs: u32> CheckpointRollupPublicInputsComposer<NumBlobs> {
end_sponge_blob.absorb_checkpoint_end_marker();

let num_blob_fields = end_sponge_blob.num_absorbed_fields;
// Check that the first `num_blob_fields` of the given fields do match what's been absorbed into the sponge.
let mut expected_end_sponge = SpongeBlob::init();
expected_end_sponge.absorb(self.blobs_fields, num_blob_fields);

let mut expected_end_sponge_blob = SpongeBlob::init();

// Check that the first `num_blob_fields` of the hinted `self.blobs_fields` do match what's
// been absorbed into the sponge.
// Elaboration:
// We have an `end_sponge_blob`, which is the result of incrementally absorbing data to the sponge
// over many circuits (Tx Base circuits, Block Root circuits, and this Checkpoint Root circuit).
// We need to give this Checkpoint Root circuit access to _all_ of that underlying data, flattened,
// because later in this circuit we'll be interpolating that data into polynomials (one polynomial
// per blob that we've used-up).
// The flattened, underlying data is hinted to this circuit as `self.blobs_fields`.
// To ensure `self.blobs_fields` actually matches the data that all of the previous circuits saw,
// we repeat the entire sponge computation -- the one that was previously split across many
// circuits -- entirely within this circuit in the line below.
// We hope that `expected_end_sponge_blob == end_sponge_blob`.
expected_end_sponge_blob.absorb(self.blobs_fields, num_blob_fields);
assert_eq(
end_sponge_blob,
expected_end_sponge,
expected_end_sponge_blob,
"Provided blob fields do not match the fields that were absorbed",
);
// Check that all fields after `num_blob_fields` are zero.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,9 @@ impl<let NumPreviousRollups: u32, let NumVkIndices: u32> CheckpointRootInputsVal
"The start state of the checkpoint does not match the state in the previous block header",
);

// Notice: unlike blocks within the same checkpoint (which can share the same timestamp),
// the blocks of _different_ checkpoints must have strictly increasing timestamps.
// I.e. the `>` here is _intentional_.
assert(
first_rollup.start_timestamp > self.previous_block_header.global_variables.timestamp,
"The start timestamp must be after the previous block's timestamp",
Expand All @@ -87,6 +90,7 @@ impl<let NumPreviousRollups: u32, let NumVkIndices: u32> CheckpointRootInputsVal
// In `validate_consecutive_block_rollups`, it ensures that the hash only propagates through all merge steps
// exclusively via the left rollup. The value in the left rollup must not be 0 when it reaches the checkpoint
// root (this point).
// Q: what if the L1 subtree being copied to L2 is empty? What is the value in this case?
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The value will be the root of an empty tree, i.e. all messages/leaves are 0s.

assert(
first_rollup.in_hash != 0,
"in_hash must be set via the first block root in the checkpoint",
Expand Down Expand Up @@ -118,15 +122,13 @@ impl<let NumPreviousRollups: u32, let NumVkIndices: u32> CheckpointRootInputsVal
fn validate_end_states(self) {
let end_sponge_blob =
self.previous_rollups[NumPreviousRollups - 1].public_inputs.end_sponge_blob;

// Check that the number of absorbed blob fields is not larger than the maximum number of fields allowed.
// Note: It must not equal the max allowed number because an additional checkpoint end marker will be absorbed
// in `checkpoint_rollup_public_inputs_composer`.
assert(
end_sponge_blob.num_absorbed_fields < FIELDS_PER_BLOB * BLOBS_PER_CHECKPOINT,
"Attempted to overfill blobs",
);

// The `end_sponge_blob` is validated against the injected blob fields in
// `checkpoint_rollup_public_inputs_composer.nr`, after the checkpoint end marker is absorbed.
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ impl RootRollupPrivateInputs {
// Below we check:
// 1. The first accumulator of the entire epoch (left.start_blob_accumulator) is empty.
merged.start_blob_accumulator.assert_empty("Epoch did not start with empty blob state.");
// 2. The `final_blob_challenges` matches the final state of the accumulator.
// 2. The claimed `final_blob_challenges` match the final state of the accumulator.
validate_final_blob_batching_challenges(
merged.end_blob_accumulator,
merged.final_blob_challenges,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,14 @@ use dep::types::{
utils::arrays::array_padded_with,
};

/// Validate the tx constants against the block constants.
/// Validate some of the tx constants that were propagated from private-land (from the private_tail's public inputs)
/// against the corresponding items of this circuit's block constants.
/// - For private-only txs, the block constants are provided via private inputs.
/// - For public-inclusive txs, some values are copied over from the AVM proof's public inputs. This function indirectly
/// checks that the values in `private_tail` match those from the AVM.
/// - For public-inclusive txs, some values are copied over from the AVM proof's public inputs. This function
/// checks that the relevant values in `private_tail` match those from the AVM.
pub fn validate_tx_constant_data(
tx_constants: TxConstantData,
block_constants: BlockConstantData,
block_constants: BlockConstantData, // where applicable, these constants have been copied-over from the avm's public inputs.
anchor_block_archive_sibling_path: [Field; ARCHIVE_HEIGHT],
) {
// Values from `tx_constants` to be checked in this function:
Expand All @@ -35,11 +36,13 @@ pub fn validate_tx_constant_data(
let tx_vk_tree_root = tx_constants.vk_tree_root;
let tx_protocol_contracts_hash = tx_constants.protocol_contracts_hash;

// Values from `block_constants` to be checked against those from `tx_constants`:
// Values from `block_constants` to be checked against those from `tx_constants`.
// (When executing this fn as part of the public tx base, we comment when an item has been copied-over
// from the avm's public inputs).
let block_last_archive_root = block_constants.last_archive.root;
let block_chain_id = block_constants.global_variables.chain_id;
let block_version = block_constants.global_variables.version;
let block_gas_fees = block_constants.global_variables.gas_fees;
let block_chain_id = block_constants.global_variables.chain_id; // from avm
let block_version = block_constants.global_variables.version; // from avm
let block_gas_fees = block_constants.global_variables.gas_fees; // from avm
let block_vk_tree_root = block_constants.vk_tree_root;
let block_protocol_contracts_hash = block_constants.protocol_contracts_hash;

Expand Down Expand Up @@ -75,14 +78,17 @@ pub fn validate_tx_constant_data(
"Mismatched vk_tree_root between kernel and rollup",
);

// This assertion is not strictly necessary for public tx base, since the value in `block_constants` is copied over
// from `tx_constants`. However, using this single function for both private and public tx bases is cleaner.
assert_eq(
tx_protocol_contracts_hash,
block_protocol_contracts_hash,
"Mismatched protocol_contracts_hash between kernel and rollup",
);

// Ensure that the `max_fees_per_gas` specified by the tx is greater than or equal to the `gas_fees` for the block.
// Note that for private only txs, the gas against the limit is checked on tail, since it doesn't have public execution.
// Note that for private-only txs, the gas against the limit is checked in the private-kernel-tail,
// since it doesn't have public execution.
assert(
tx_gas_settings.max_fees_per_gas.fee_per_da_gas >= block_gas_fees.fee_per_da_gas,
"da gas is higher than the maximum specified by the tx",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,9 @@ pub fn build_tx_effect(
) -> (TxEffect, TxEffectArrayLengths) {
let accumulated_data = private_to_rollup.end;

// Silo L2 to L1 messages.
// Compute siloed L2-to-L1 message hashes (siloed with the contract address of the function that
// emitted the message). We do this sequencer-side, because the hashes are computed with sha256
// for EVM compatibility.
let l2_to_l1_msgs = accumulated_data.l2_to_l1_msgs.map(|message| silo_l2_to_l1_message(
message,
private_to_rollup.constants.tx_context.version,
Expand All @@ -48,7 +50,7 @@ pub fn build_tx_effect(
[PublicDataWrite::empty(); MAX_TOTAL_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX];
public_data_writes[0] = fee_payer_balance_write;

// Construct contract class logs from the hashes from kernel circuits and the log fields given via private inputs.
// Construct ContractClassLogs from the hashes from kernel circuits and the log fields given via private inputs.
let contract_class_logs = accumulated_data.contract_class_logs_hashes.mapi(|i, log_hash| {
ContractClassLog {
log: Log::new(contract_class_log_fields[i], log_hash.inner.length),
Expand Down Expand Up @@ -83,7 +85,7 @@ pub fn build_tx_effect(
note_hashes: array_length(accumulated_data.note_hashes),
nullifiers: array_length(accumulated_data.nullifiers),
l2_to_l1_msgs: array_length(l2_to_l1_msgs),
public_data_writes: 1,
public_data_writes: 1, // No public functions were called. This is the fee_payer's FeeJuice balance decrementation.
private_logs: private_logs_array_length,
contract_class_logs: contract_class_logs_array_length,
};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ impl PublicTxBaseInputsValidator {

private_tail_validator::validate_tx_constant_data(
private_tail.constants,
self.constants,
self.constants, // where applicable, these constants have been copied-over from the avm's public inputs.
self.anchor_block_archive_sibling_path,
);

Expand Down
Loading