diff --git a/yarn-project/end-to-end/src/e2e_pending_note_hashes_contract.test.ts b/yarn-project/end-to-end/src/e2e_pending_note_hashes_contract.test.ts index 26781f241ec7..5ecbc10e4505 100644 --- a/yarn-project/end-to-end/src/e2e_pending_note_hashes_contract.test.ts +++ b/yarn-project/end-to-end/src/e2e_pending_note_hashes_contract.test.ts @@ -287,6 +287,8 @@ describe('e2e_pending_note_hashes_contract', () => { }); it('Should handle overflowing the kernel data structures in nested calls', async () => { + // This test verifies that a transaction can emit more notes than MAX_NOTE_HASHES_PER_TX without failing, since + // the notes are nullified and will be squashed by the kernel reset circuit. const sender = owner; const notesPerIteration = Math.min(MAX_NOTE_HASHES_PER_CALL, MAX_NOTE_HASH_READ_REQUESTS_PER_CALL); const minToNeedReset = Math.min(MAX_NOTE_HASHES_PER_TX, MAX_NOTE_HASH_READ_REQUESTS_PER_TX) + 1; diff --git a/yarn-project/pxe/src/block_synchronizer/block_synchronizer.test.ts b/yarn-project/pxe/src/block_synchronizer/block_synchronizer.test.ts index 762839fb5f92..ba68a92e6bc9 100644 --- a/yarn-project/pxe/src/block_synchronizer/block_synchronizer.test.ts +++ b/yarn-project/pxe/src/block_synchronizer/block_synchronizer.test.ts @@ -11,7 +11,7 @@ import { type MockProxy, mock } from 'jest-mock-extended'; import { AnchorBlockDataProvider } from '../storage/anchor_block_data_provider/anchor_block_data_provider.js'; import { NoteDataProvider } from '../storage/note_data_provider/note_data_provider.js'; -import { TaggingDataProvider } from '../storage/tagging_data_provider/tagging_data_provider.js'; +import { RecipientTaggingDataProvider } from '../storage/tagging_data_provider/recipient_tagging_data_provider.js'; import { BlockSynchronizer } from './block_synchronizer.js'; describe('BlockSynchronizer', () => { @@ -19,7 +19,7 @@ describe('BlockSynchronizer', () => { let tipsStore: L2TipsKVStore; let anchorBlockDataProvider: AnchorBlockDataProvider; let noteDataProvider: NoteDataProvider; - let taggingDataProvider: TaggingDataProvider; + let recipientTaggingDataProvider: RecipientTaggingDataProvider; let aztecNode: MockProxy; let blockStream: MockProxy; @@ -36,12 +36,12 @@ describe('BlockSynchronizer', () => { tipsStore = new L2TipsKVStore(store, 'pxe'); anchorBlockDataProvider = new AnchorBlockDataProvider(store); noteDataProvider = await NoteDataProvider.create(store); - taggingDataProvider = new TaggingDataProvider(store); + recipientTaggingDataProvider = new RecipientTaggingDataProvider(store); synchronizer = new TestSynchronizer( aztecNode, anchorBlockDataProvider, noteDataProvider, - taggingDataProvider, + recipientTaggingDataProvider, tipsStore, ); }); @@ -59,7 +59,7 @@ describe('BlockSynchronizer', () => { .spyOn(noteDataProvider, 'rollbackNotesAndNullifiers') .mockImplementation(() => Promise.resolve()); const resetNoteSyncData = jest - .spyOn(taggingDataProvider, 'resetNoteSyncData') + .spyOn(recipientTaggingDataProvider, 'resetNoteSyncData') .mockImplementation(() => Promise.resolve()); aztecNode.getBlockHeader.mockImplementation(async blockNumber => (await L2Block.random(BlockNumber(blockNumber as number))).getBlockHeader(), diff --git a/yarn-project/pxe/src/block_synchronizer/block_synchronizer.ts b/yarn-project/pxe/src/block_synchronizer/block_synchronizer.ts index 59e0e66344b6..3649511b59b0 100644 --- a/yarn-project/pxe/src/block_synchronizer/block_synchronizer.ts +++ b/yarn-project/pxe/src/block_synchronizer/block_synchronizer.ts @@ -7,7 +7,7 @@ import type { AztecNode } from '@aztec/stdlib/interfaces/client'; import type { PXEConfig } from '../config/index.js'; import type { AnchorBlockDataProvider } from '../storage/anchor_block_data_provider/anchor_block_data_provider.js'; import type { NoteDataProvider } from '../storage/note_data_provider/note_data_provider.js'; -import type { TaggingDataProvider } from '../storage/tagging_data_provider/tagging_data_provider.js'; +import type { RecipientTaggingDataProvider } from '../storage/tagging_data_provider/recipient_tagging_data_provider.js'; /** * The BlockSynchronizer class orchestrates synchronization between PXE and Aztec node, maintaining an up-to-date @@ -23,7 +23,7 @@ export class BlockSynchronizer implements L2BlockStreamEventHandler { private node: AztecNode, private anchorBlockDataProvider: AnchorBlockDataProvider, private noteDataProvider: NoteDataProvider, - private taggingDataProvider: TaggingDataProvider, + private recipientTaggingDataProvider: RecipientTaggingDataProvider, private l2TipsStore: L2TipsKVStore, config: Partial> = {}, loggerOrSuffix?: string | Logger, @@ -66,7 +66,10 @@ export class BlockSynchronizer implements L2BlockStreamEventHandler { await this.noteDataProvider.rollbackNotesAndNullifiers(event.block.number, lastSynchedBlockNumber); // Remove all note tagging indexes to force a full resync. This is suboptimal, but unless we track the // block number in which each index is used it's all we can do. - await this.taggingDataProvider.resetNoteSyncData(); + // Note: This is now unnecessary for the sender tagging data provider because the new algorithm handles reorgs. + // TODO(#17775): Once this issue is implemented we will have the index-block number mapping, so we can + // implement this more intelligently. + await this.recipientTaggingDataProvider.resetNoteSyncData(); // Update the header to the last block. const newHeader = await this.node.getBlockHeader(event.block.number); if (!newHeader) { diff --git a/yarn-project/pxe/src/contract_function_simulator/execution_data_provider.ts b/yarn-project/pxe/src/contract_function_simulator/execution_data_provider.ts index 072c975a8160..3b62b4602c86 100644 --- a/yarn-project/pxe/src/contract_function_simulator/execution_data_provider.ts +++ b/yarn-project/pxe/src/contract_function_simulator/execution_data_provider.ts @@ -6,12 +6,14 @@ import type { FunctionArtifactWithContractName, FunctionSelector } from '@aztec/ import type { AztecAddress } from '@aztec/stdlib/aztec-address'; import type { L2Block } from '@aztec/stdlib/block'; import type { CompleteAddress, ContractInstance } from '@aztec/stdlib/contract'; +import type { AztecNode } from '@aztec/stdlib/interfaces/client'; import type { KeyValidationRequest } from '@aztec/stdlib/kernel'; import type { DirectionalAppTaggingSecret } from '@aztec/stdlib/logs'; import type { NoteStatus } from '@aztec/stdlib/note'; import { type MerkleTreeId, type NullifierMembershipWitness, PublicDataWitness } from '@aztec/stdlib/trees'; import type { NodeStats } from '@aztec/stdlib/tx'; +import type { SenderTaggingDataProvider } from '../storage/tagging_data_provider/sender_tagging_data_provider.js'; import type { NoteData } from './oracle/interfaces.js'; import type { MessageLoadOracleInputs } from './oracle/message_load_oracle_inputs.js'; @@ -219,25 +221,6 @@ export interface ExecutionDataProvider { recipient: AztecAddress, ): Promise; - /** - * Updates the local index of the shared tagging secret of a (sender, recipient, contract) tuple if a log with - * a larger index is found from the node. - * @param secret - The secret that's unique for (sender, recipient, contract) tuple while the direction - * of sender -> recipient matters. - * @param contractAddress - The address of the contract that the logs are tagged for. Needs to be provided to store - * because the function performs second round of siloing which is necessary because kernels do it as well (they silo - * first field of the private log which corresponds to the tag). - */ - syncTaggedLogsAsSender(secret: DirectionalAppTaggingSecret, contractAddress: AztecAddress): Promise; - - /** - * Returns the last used index when sending a log with a given secret. - * @param secret - The directional app tagging secret. - * @returns The last used index for the given directional app tagging secret, or undefined if we never sent a log - * from this sender to a recipient in a given contract (implicitly included in the secret). - */ - getLastUsedIndexAsSender(secret: DirectionalAppTaggingSecret): Promise; - /** * Synchronizes the private logs tagged with scoped addresses and all the senders in the address book. Stores the found * logs in CapsuleArray ready for a later retrieval in Aztec.nr. @@ -332,4 +315,8 @@ export interface ExecutionDataProvider { * @returns The execution statistics. */ getStats(): ExecutionStats; + + // Exposed when moving in the direction of #17776 + get aztecNode(): AztecNode; + get senderTaggingDataProvider(): SenderTaggingDataProvider; } diff --git a/yarn-project/pxe/src/contract_function_simulator/execution_tagging_index_cache.ts b/yarn-project/pxe/src/contract_function_simulator/execution_tagging_index_cache.ts index 7d32dc1fe1a3..85a45f7fb108 100644 --- a/yarn-project/pxe/src/contract_function_simulator/execution_tagging_index_cache.ts +++ b/yarn-project/pxe/src/contract_function_simulator/execution_tagging_index_cache.ts @@ -21,7 +21,7 @@ export class ExecutionTaggingIndexCache { } /** - * Returns the pre tags that were used in this execution (and that need to be stored in the db). + * Returns the pre-tags that were used in this execution (and that need to be stored in the db). */ public getUsedPreTags(): PreTag[] { return Array.from(this.taggingIndexMap.entries()).map(([secret, index]) => ({ diff --git a/yarn-project/pxe/src/contract_function_simulator/oracle/private_execution.test.ts b/yarn-project/pxe/src/contract_function_simulator/oracle/private_execution.test.ts index 20ec21edbd2e..1106d8fde0c4 100644 --- a/yarn-project/pxe/src/contract_function_simulator/oracle/private_execution.test.ts +++ b/yarn-project/pxe/src/contract_function_simulator/oracle/private_execution.test.ts @@ -40,6 +40,7 @@ import { } from '@aztec/stdlib/contract'; import { GasFees, GasSettings } from '@aztec/stdlib/gas'; import { computeNoteHashNonce, computeSecretHash, computeUniqueNoteHash, siloNoteHash } from '@aztec/stdlib/hash'; +import type { AztecNode } from '@aztec/stdlib/interfaces/client'; import { KeyValidationRequest } from '@aztec/stdlib/kernel'; import { computeAppNullifierSecretKey, deriveKeys } from '@aztec/stdlib/keys'; import { DirectionalAppTaggingSecret } from '@aztec/stdlib/logs'; @@ -60,6 +61,7 @@ import { jest } from '@jest/globals'; import { Matcher, type MatcherCreator, type MockProxy, mock } from 'jest-mock-extended'; import { toFunctionSelector } from 'viem'; +import type { SenderTaggingDataProvider } from '../../storage/tagging_data_provider/sender_tagging_data_provider.js'; import { ContractFunctionSimulator } from '../contract_function_simulator.js'; import type { ExecutionDataProvider } from '../execution_data_provider.js'; import type { NoteData } from './interfaces.js'; @@ -102,6 +104,8 @@ describe('Private Execution test suite', () => { const simulator = new WASMSimulator(); let executionDataProvider: MockProxy; + let senderTaggingDataProvider: MockProxy; + let aztecNode: MockProxy; let acirSimulator: ContractFunctionSimulator; let anchorBlockHeader = BlockHeader.empty(); @@ -266,7 +270,30 @@ describe('Private Execution test suite', () => { beforeEach(async () => { trees = {}; executionDataProvider = mock(); + senderTaggingDataProvider = mock(); + aztecNode = mock(); contracts = {}; + + // Mock the senderTaggingDataProvider getter + Object.defineProperty(executionDataProvider, 'senderTaggingDataProvider', { + get: () => senderTaggingDataProvider, + }); + + // Mock the aztecNode getter + Object.defineProperty(executionDataProvider, 'aztecNode', { + get: () => aztecNode, + }); + + // Mock sender tagging data provider methods + senderTaggingDataProvider.getLastFinalizedIndex.mockResolvedValue(undefined); + senderTaggingDataProvider.getLastUsedIndex.mockResolvedValue(undefined); + senderTaggingDataProvider.getTxHashesOfPendingIndexes.mockResolvedValue([]); + senderTaggingDataProvider.storePendingIndexes.mockResolvedValue(); + + // Mock aztec node methods - the return array needs to have the same length as the number of tags + // on the input. + aztecNode.getLogsByTags.mockImplementation((tags: Fr[]) => Promise.resolve(tags.map(() => []))); + executionDataProvider.getKeyValidationRequest.mockImplementation( async (pkMHash: Fr, contractAddress: AztecAddress) => { if (pkMHash.equals(await ownerCompleteAddress.publicKeys.masterNullifierPublicKey.hash())) { @@ -303,9 +330,6 @@ describe('Private Execution test suite', () => { throw new Error(`Unknown address: ${address}. Recipient: ${recipient}, Owner: ${owner}`); }); - executionDataProvider.getLastUsedIndexAsSender.mockImplementation((_secret: DirectionalAppTaggingSecret) => { - return Promise.resolve(undefined); - }); executionDataProvider.getFunctionArtifact.mockImplementation(async (address, selector) => { const contract = contracts[address.toString()]; if (!contract) { @@ -323,9 +347,6 @@ describe('Private Execution test suite', () => { executionDataProvider.calculateDirectionalAppTaggingSecret.mockImplementation((_contract, _sender, _recipient) => { return Promise.resolve(DirectionalAppTaggingSecret.fromString('0x1')); }); - executionDataProvider.syncTaggedLogsAsSender.mockImplementation((_directionalAppTaggingSecret, _contractAddress) => - Promise.resolve(), - ); executionDataProvider.loadCapsule.mockImplementation((_, __) => Promise.resolve(null)); executionDataProvider.getPublicStorageAt.mockImplementation( diff --git a/yarn-project/pxe/src/contract_function_simulator/oracle/private_execution_oracle.ts b/yarn-project/pxe/src/contract_function_simulator/oracle/private_execution_oracle.ts index 04f34fa7b2bb..4e57a1d42cb4 100644 --- a/yarn-project/pxe/src/contract_function_simulator/oracle/private_execution_oracle.ts +++ b/yarn-project/pxe/src/contract_function_simulator/oracle/private_execution_oracle.ts @@ -26,6 +26,7 @@ import { type TxContext, } from '@aztec/stdlib/tx'; +import { syncSenderTaggingIndexes } from '../../tagging/sync/sync_sender_tagging_indexes.js'; import { Tag } from '../../tagging/tag.js'; import type { ExecutionDataProvider } from '../execution_data_provider.js'; import type { ExecutionNoteCache } from '../execution_note_cache.js'; @@ -152,7 +153,7 @@ export class PrivateExecutionOracle extends UtilityExecutionOracle implements IP } /** - * Returns the pre tags that were used in this execution (and that need to be stored in the db). + * Returns the pre-tags that were used in this execution (and that need to be stored in the db). */ public getUsedPreTags(): PreTag[] { return this.taggingIndexCache.getUsedPreTags(); @@ -224,11 +225,16 @@ export class PrivateExecutionOracle extends UtilityExecutionOracle implements IP if (lastUsedIndexInTx !== undefined) { return lastUsedIndexInTx + 1; } else { + // TODO(#17776): Don't access the Aztec node and senderTaggingDataProvider via the executionDataProvider. + const aztecNode = this.executionDataProvider.aztecNode; + const senderTaggingDataProvider = this.executionDataProvider.senderTaggingDataProvider; + // This is a tagging secret we've not yet used in this tx, so first sync our store to make sure its indices // are up to date. We do this here because this store is not synced as part of the global sync because // that'd be wasteful as most tagging secrets are not used in each tx. - await this.executionDataProvider.syncTaggedLogsAsSender(secret, this.contractAddress); - const lastUsedIndex = await this.executionDataProvider.getLastUsedIndexAsSender(secret); + await syncSenderTaggingIndexes(secret, this.contractAddress, aztecNode, senderTaggingDataProvider); + + const lastUsedIndex = await senderTaggingDataProvider.getLastUsedIndex(secret); // If lastUsedIndex is undefined, we've never used this secret, so start from 0 // Otherwise, the next index to use is one past the last used index return lastUsedIndex === undefined ? 0 : lastUsedIndex + 1; diff --git a/yarn-project/pxe/src/contract_function_simulator/pxe_oracle_interface.test.ts b/yarn-project/pxe/src/contract_function_simulator/pxe_oracle_interface.test.ts index a7721f5f480c..3daccb6823b0 100644 --- a/yarn-project/pxe/src/contract_function_simulator/pxe_oracle_interface.test.ts +++ b/yarn-project/pxe/src/contract_function_simulator/pxe_oracle_interface.test.ts @@ -32,7 +32,8 @@ import { CapsuleDataProvider } from '../storage/capsule_data_provider/capsule_da import { ContractDataProvider } from '../storage/contract_data_provider/contract_data_provider.js'; import { NoteDataProvider } from '../storage/note_data_provider/note_data_provider.js'; import { PrivateEventDataProvider } from '../storage/private_event_data_provider/private_event_data_provider.js'; -import { TaggingDataProvider } from '../storage/tagging_data_provider/tagging_data_provider.js'; +import { RecipientTaggingDataProvider } from '../storage/tagging_data_provider/recipient_tagging_data_provider.js'; +import { SenderTaggingDataProvider } from '../storage/tagging_data_provider/sender_tagging_data_provider.js'; import { WINDOW_HALF_SIZE } from '../tagging/constants.js'; import { SiloedTag } from '../tagging/siloed_tag.js'; import { Tag } from '../tagging/tag.js'; @@ -65,8 +66,9 @@ describe('PXEOracleInterface', () => { let privateEventDataProvider: PrivateEventDataProvider; let contractDataProvider: ContractDataProvider; let noteDataProvider: NoteDataProvider; + let senderTaggingDataProvider: SenderTaggingDataProvider; + let recipientTaggingDataProvider: RecipientTaggingDataProvider; let anchorBlockDataProvider: AnchorBlockDataProvider; - let taggingDataProvider: TaggingDataProvider; let capsuleDataProvider: CapsuleDataProvider; let keyStore: KeyStore; @@ -90,7 +92,8 @@ describe('PXEOracleInterface', () => { privateEventDataProvider = new PrivateEventDataProvider(store); noteDataProvider = await NoteDataProvider.create(store); anchorBlockDataProvider = new AnchorBlockDataProvider(store); - taggingDataProvider = new TaggingDataProvider(store); + senderTaggingDataProvider = new SenderTaggingDataProvider(store); + recipientTaggingDataProvider = new RecipientTaggingDataProvider(store); capsuleDataProvider = new CapsuleDataProvider(store); keyStore = new KeyStore(store); pxeOracleInterface = new PXEOracleInterface( @@ -100,7 +103,8 @@ describe('PXEOracleInterface', () => { noteDataProvider, capsuleDataProvider, anchorBlockDataProvider, - taggingDataProvider, + senderTaggingDataProvider, + recipientTaggingDataProvider, addressDataProvider, privateEventDataProvider, ); // Set up contract address @@ -209,7 +213,7 @@ describe('PXEOracleInterface', () => { return { completeAddress, ivsk: keys.masterIncomingViewingSecretKey, secretKey: new Fr(index) }; }); for (const sender of senders) { - await taggingDataProvider.addSenderAddress(sender.completeAddress.address); + await recipientTaggingDataProvider.addSenderAddress(sender.completeAddress.address); } aztecNode.getLogsByTags.mockReset(); aztecNode.getTxEffect.mockResolvedValue({ @@ -245,7 +249,7 @@ describe('PXEOracleInterface', () => { // First sender should have 2 logs, but keep index 0 since they were built using the same tag // Next 4 senders should also have index 0 = offset + 0 // Last 5 senders should have index 1 = offset + 1 - const indexes = await taggingDataProvider.getLastUsedIndexesAsRecipient(secrets); + const indexes = await recipientTaggingDataProvider.getLastUsedIndexes(secrets); expect(indexes).toHaveLength(NUM_SENDERS); expect(indexes).toEqual([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]); @@ -255,88 +259,6 @@ describe('PXEOracleInterface', () => { expect(aztecNode.getLogsByTags.mock.calls.length).toBe(2); }); - it('should sync tagged logs as senders', async () => { - for (const sender of senders) { - await addressDataProvider.addCompleteAddress(sender.completeAddress); - await keyStore.addAccount(sender.secretKey, sender.completeAddress.partialAddress); - } - - let tagIndex = 0; - await generateMockLogs(tagIndex); - - // Recompute the secrets (as recipient) to ensure indexes are updated - const ivsk = await keyStore.getMasterIncomingViewingSecretKey(recipient.address); - // An array of directional secrets for each sender-recipient pair - const secrets = await Promise.all( - senders.map(sender => - DirectionalAppTaggingSecret.compute( - recipient, - ivsk, - sender.completeAddress.address, - contractAddress, - recipient.address, - ), - ), - ); - - const getTaggingSecretsIndexesAsSenderForSenders = () => - Promise.all(secrets.map(secret => taggingDataProvider.getLastUsedIndexesAsSender(secret))); - - const indexesAsSender = await getTaggingSecretsIndexesAsSenderForSenders(); - expect(indexesAsSender).toStrictEqual([ - undefined, - undefined, - undefined, - undefined, - undefined, - undefined, - undefined, - undefined, - undefined, - undefined, - ]); - - expect(aztecNode.getLogsByTags.mock.calls.length).toBe(0); - - for (let i = 0; i < senders.length; i++) { - const directionalAppTaggingSecret = await DirectionalAppTaggingSecret.compute( - senders[i].completeAddress, - senders[i].ivsk, - recipient.address, - contractAddress, - recipient.address, - ); - await pxeOracleInterface.syncTaggedLogsAsSender(directionalAppTaggingSecret, contractAddress); - } - - let indexesAsSenderAfterSync = await getTaggingSecretsIndexesAsSenderForSenders(); - expect(indexesAsSenderAfterSync).toStrictEqual([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]); - - // Only 1 window is obtained for each sender - expect(aztecNode.getLogsByTags.mock.calls.length).toBe(NUM_SENDERS); - aztecNode.getLogsByTags.mockReset(); - - // We add more logs to the second half of the window to test that a second iteration in `syncTaggedLogsAsSender` - // is handled correctly. - tagIndex = 11; - await generateMockLogs(tagIndex); - for (let i = 0; i < senders.length; i++) { - const directionalAppTaggingSecret = await DirectionalAppTaggingSecret.compute( - senders[i].completeAddress, - senders[i].ivsk, - recipient.address, - contractAddress, - recipient.address, - ); - await pxeOracleInterface.syncTaggedLogsAsSender(directionalAppTaggingSecret, contractAddress); - } - - indexesAsSenderAfterSync = await getTaggingSecretsIndexesAsSenderForSenders(); - expect(indexesAsSenderAfterSync).toStrictEqual([10, 10, 10, 10, 10, 11, 11, 11, 11, 11]); - - expect(aztecNode.getLogsByTags.mock.calls.length).toBe(NUM_SENDERS * 2); - }); - it('should sync tagged logs with a sender index offset', async () => { const tagIndex = 5; await generateMockLogs(tagIndex); @@ -363,7 +285,7 @@ describe('PXEOracleInterface', () => { // First sender should have 2 logs, but keep index 5 since they were built using the same tag // Next 4 senders should also have index 5 = offset // Last 5 senders should have index 6 = offset + 1 - const indexes = await taggingDataProvider.getLastUsedIndexesAsRecipient(secrets); + const indexes = await recipientTaggingDataProvider.getLastUsedIndexes(secrets); expect(indexes).toHaveLength(NUM_SENDERS); expect(indexes).toEqual([5, 5, 5, 5, 5, 6, 6, 6, 6, 6]); @@ -392,7 +314,7 @@ describe('PXEOracleInterface', () => { ); // Set last used indexes to 1 (so next scan starts at 2) - await taggingDataProvider.setLastUsedIndexesAsRecipient(secrets.map(secret => ({ secret, index: 1 }))); + await recipientTaggingDataProvider.setLastUsedIndexes(secrets.map(secret => ({ secret, index: 1 }))); await pxeOracleInterface.syncTaggedLogs(contractAddress, PENDING_TAGGED_LOG_ARRAY_BASE_SLOT); @@ -403,7 +325,7 @@ describe('PXEOracleInterface', () => { // First sender should have 2 logs, but keep index 1 since they were built using the same tag // Next 4 senders should also have index 1 = tagIndex // Last 5 senders should have index 2 = tagIndex + 1 - const indexes = await taggingDataProvider.getLastUsedIndexesAsRecipient(secrets); + const indexes = await recipientTaggingDataProvider.getLastUsedIndexes(secrets); expect(indexes).toHaveLength(NUM_SENDERS); expect(indexes).toEqual([1, 1, 1, 1, 1, 2, 2, 2, 2, 2]); @@ -434,7 +356,7 @@ describe('PXEOracleInterface', () => { // We set the last used indexes to WINDOW_HALF_SIZE so that next scan starts at WINDOW_HALF_SIZE + 1, // which is outside the window, and for this reason no updates should be triggered. const index = WINDOW_HALF_SIZE + 1; - await taggingDataProvider.setLastUsedIndexesAsRecipient(secrets.map(secret => ({ secret, index }))); + await recipientTaggingDataProvider.setLastUsedIndexes(secrets.map(secret => ({ secret, index }))); await pxeOracleInterface.syncTaggedLogs(contractAddress, PENDING_TAGGED_LOG_ARRAY_BASE_SLOT); @@ -443,7 +365,7 @@ describe('PXEOracleInterface', () => { await expectPendingTaggedLogArrayLengthToBe(contractAddress, NUM_SENDERS / 2); // Indexes should remain where we set them (window_size) - const indexes = await taggingDataProvider.getLastUsedIndexesAsRecipient(secrets); + const indexes = await recipientTaggingDataProvider.getLastUsedIndexes(secrets); expect(indexes).toHaveLength(NUM_SENDERS); expect(indexes).toEqual([index, index, index, index, index, index, index, index, index, index]); @@ -470,7 +392,7 @@ describe('PXEOracleInterface', () => { ), ); - await taggingDataProvider.setLastUsedIndexesAsRecipient( + await recipientTaggingDataProvider.setLastUsedIndexes( secrets.map(secret => ({ secret, index: WINDOW_HALF_SIZE + 2 })), ); @@ -486,14 +408,14 @@ describe('PXEOracleInterface', () => { aztecNode.getLogsByTags.mockClear(); // Wipe the database - await taggingDataProvider.resetNoteSyncData(); + await recipientTaggingDataProvider.resetNoteSyncData(); await pxeOracleInterface.syncTaggedLogs(contractAddress, PENDING_TAGGED_LOG_ARRAY_BASE_SLOT); // First sender should have 2 logs, but keep index 0 since they were built using the same tag // Next 4 senders should also have index 0 = offset // Last 5 senders should have index 1 = offset + 1 - const indexes = await taggingDataProvider.getLastUsedIndexesAsRecipient(secrets); + const indexes = await recipientTaggingDataProvider.getLastUsedIndexes(secrets); expect(indexes).toHaveLength(NUM_SENDERS); expect(indexes).toEqual([0, 0, 0, 0, 0, 1, 1, 1, 1, 1]); diff --git a/yarn-project/pxe/src/contract_function_simulator/pxe_oracle_interface.ts b/yarn-project/pxe/src/contract_function_simulator/pxe_oracle_interface.ts index 1814a459666c..d35d6c917c43 100644 --- a/yarn-project/pxe/src/contract_function_simulator/pxe_oracle_interface.ts +++ b/yarn-project/pxe/src/contract_function_simulator/pxe_oracle_interface.ts @@ -1,5 +1,4 @@ import type { L1_TO_L2_MSG_TREE_HEIGHT } from '@aztec/constants'; -import { timesParallel } from '@aztec/foundation/collection'; import { Fr } from '@aztec/foundation/curves/bn254'; import { Point } from '@aztec/foundation/curves/grumpkin'; import { createLogger } from '@aztec/foundation/log'; @@ -35,7 +34,8 @@ import type { CapsuleDataProvider } from '../storage/capsule_data_provider/capsu import type { ContractDataProvider } from '../storage/contract_data_provider/contract_data_provider.js'; import type { NoteDataProvider } from '../storage/note_data_provider/note_data_provider.js'; import type { PrivateEventDataProvider } from '../storage/private_event_data_provider/private_event_data_provider.js'; -import type { TaggingDataProvider } from '../storage/tagging_data_provider/tagging_data_provider.js'; +import type { RecipientTaggingDataProvider } from '../storage/tagging_data_provider/recipient_tagging_data_provider.js'; +import type { SenderTaggingDataProvider } from '../storage/tagging_data_provider/sender_tagging_data_provider.js'; import { DirectionalAppTaggingSecret, SiloedTag, @@ -54,14 +54,19 @@ import type { ProxiedNode } from './proxied_node.js'; * A data layer that provides and stores information needed for simulating/proving a transaction. */ export class PXEOracleInterface implements ExecutionDataProvider { + // Note: The Aztec node and senderDataProvider are exposed publicly since PXEOracleInterface will be deprecated soon + // (issue #17776). When refactoring tagging, it made sense to align with this future change by moving the sender + // tagging index sync functionality elsewhere. This required exposing these two properties since there is currently + // no alternative way to access them in the PrivateExecutionOracle. constructor( - private aztecNode: AztecNode | ProxiedNode, + public readonly aztecNode: AztecNode | ProxiedNode, private keyStore: KeyStore, private contractDataProvider: ContractDataProvider, private noteDataProvider: NoteDataProvider, private capsuleDataProvider: CapsuleDataProvider, private anchorBlockDataProvider: AnchorBlockDataProvider, - private taggingDataProvider: TaggingDataProvider, + public readonly senderTaggingDataProvider: SenderTaggingDataProvider, + private recipientTaggingDataProvider: RecipientTaggingDataProvider, private addressDataProvider: AddressDataProvider, private privateEventDataProvider: PrivateEventDataProvider, private log = createLogger('pxe:pxe_oracle_interface'), @@ -259,11 +264,7 @@ export class PXEOracleInterface implements ExecutionDataProvider { * @returns The full list of the users contact addresses. */ public getSenders(): Promise { - return this.taggingDataProvider.getSenderAddresses(); - } - - public getLastUsedIndexAsSender(secret: DirectionalAppTaggingSecret): Promise { - return this.taggingDataProvider.getLastUsedIndexesAsSender(secret); + return this.recipientTaggingDataProvider.getSenderAddresses(); } public async calculateDirectionalAppTaggingSecret( @@ -292,8 +293,8 @@ export class PXEOracleInterface implements ExecutionDataProvider { * @param recipient - The address receiving the notes * @returns A list of directional app tagging secrets along with the last used tagging indexes. If the corresponding * secret was never used, the index is undefined. - * TODO(benesjan): The naming here is broken as the function name does not reflect the return type. Fix when associating - * indexes with tx hash. + * TODO(#17775): The naming here is broken as the function name does not reflect the return type. Make sure this gets + * fixed when implementing the linked issue. */ async #getLastUsedTaggingIndexesForSenders( contractAddress: AztecAddress, @@ -305,7 +306,7 @@ export class PXEOracleInterface implements ExecutionDataProvider { // We implicitly add all PXE accounts as senders, this helps us decrypt tags on notes that we send to ourselves // (recipient = us, sender = us) const senders = [ - ...(await this.taggingDataProvider.getSenderAddresses()), + ...(await this.recipientTaggingDataProvider.getSenderAddresses()), ...(await this.keyStore.getAccounts()), ].filter((address, index, self) => index === self.findIndex(otherAddress => otherAddress.equals(address))); const secrets = await Promise.all( @@ -319,7 +320,7 @@ export class PXEOracleInterface implements ExecutionDataProvider { ); }), ); - const indexes = await this.taggingDataProvider.getLastUsedIndexesAsRecipient(secrets); + const indexes = await this.recipientTaggingDataProvider.getLastUsedIndexes(secrets); if (indexes.length !== secrets.length) { throw new Error('Indexes and directional app tagging secrets have different lengths'); } @@ -330,80 +331,9 @@ export class PXEOracleInterface implements ExecutionDataProvider { })); } - public async syncTaggedLogsAsSender( - secret: DirectionalAppTaggingSecret, - contractAddress: AztecAddress, - ): Promise { - const lastUsedIndex = await this.taggingDataProvider.getLastUsedIndexesAsSender(secret); - // If lastUsedIndex is undefined, we've never used this secret, so start from 0 - // Otherwise, start from one past the last used index - const startIndex = lastUsedIndex === undefined ? 0 : lastUsedIndex + 1; - - // This algorithm works such that: - // 1. If we find minimum consecutive empty logs in a window of logs we set the index to the index of the last log - // we found and quit. - // 2. If we don't find minimum consecutive empty logs in a window of logs we slide the window to latest log index - // and repeat the process. - const MIN_CONSECUTIVE_EMPTY_LOGS = 10; - const WINDOW_SIZE = MIN_CONSECUTIVE_EMPTY_LOGS * 2; - - let [numConsecutiveEmptyLogs, currentIndex] = [0, startIndex]; - let lastFoundLogIndex: number | undefined = undefined; - do { - // We compute the tags for the current window of indexes - const currentTags = await timesParallel(WINDOW_SIZE, async i => { - return SiloedTag.compute(await Tag.compute({ secret, index: currentIndex + i }), contractAddress); - }); - - // We fetch the logs for the tags - // TODO: The following conversion is unfortunate and we should most likely just type the #getPrivateLogsByTags - // to accept SiloedTag[] instead of Fr[]. That would result in a large change so I didn't do it yet. - const tagsAsFr = currentTags.map(tag => tag.value); - const possibleLogs = await this.#getPrivateLogsByTags(tagsAsFr); - - // We find the index of the last log in the window that is not empty - const indexOfLastLogWithinArray = possibleLogs.findLastIndex(possibleLog => possibleLog.length !== 0); - - if (indexOfLastLogWithinArray === -1) { - // We haven't found any logs in the current window so we stop looking - break; - } - - // We've found logs so we update the last found log index - lastFoundLogIndex = (lastFoundLogIndex ?? 0) + indexOfLastLogWithinArray; - // We move the current index to that of the log right after the last found log - currentIndex = lastFoundLogIndex + 1; - - // We compute the number of consecutive empty logs we found and repeat the process if we haven't found enough. - numConsecutiveEmptyLogs = WINDOW_SIZE - indexOfLastLogWithinArray - 1; - } while (numConsecutiveEmptyLogs < MIN_CONSECUTIVE_EMPTY_LOGS); - - const contractName = await this.contractDataProvider.getDebugContractName(contractAddress); - if (lastFoundLogIndex !== undefined) { - // Last found index is defined meaning we have actually found logs so we update the last used index - await this.taggingDataProvider.setLastUsedIndexesAsSender([{ secret, index: lastFoundLogIndex }]); - - this.log.debug(`Syncing logs for secret ${secret.toString()} at contract ${contractName}(${contractAddress})`, { - index: currentIndex, - contractName, - contractAddress, - }); - } else { - this.log.debug( - `No new logs found for secret ${secret.toString()} at contract ${contractName}(${contractAddress})`, - ); - } - } - - /** - * Synchronizes the private logs tagged with scoped addresses and all the senders in the address book. Stores the found - * logs in CapsuleArray ready for a later retrieval in Aztec.nr. - * @param contractAddress - The address of the contract that the logs are tagged for. - * @param pendingTaggedLogArrayBaseSlot - The base slot of the pending tagged logs capsule array in which - * found logs will be stored. - * @param scopes - The scoped addresses to sync logs for. If not provided, all accounts in the address book will be - * synced. - */ + // TODO(#17775): Replace this implementation of this function with one implementing an approach similar + // to syncSenderTaggingIndexes. Not done yet due to re-prioritization to devex and this doesn't directly affect + // devex. public async syncTaggedLogs( contractAddress: AztecAddress, pendingTaggedLogArrayBaseSlot: Fr, @@ -493,7 +423,7 @@ export class PXEOracleInterface implements ExecutionDataProvider { filteredLogsByBlockNumber, ); - // We retrieve the pre tag corresponding to the log as I need that to evaluate whether + // We retrieve the pre-tag corresponding to the log as I need that to evaluate whether // a new largest index have been found. const preTagCorrespondingToLog = preTagsForTheWholeWindow[logIndex]; const initialIndex = initialIndexesMap[preTagCorrespondingToLog.secret.toString()]; @@ -550,7 +480,7 @@ export class PXEOracleInterface implements ExecutionDataProvider { // At this point we have processed all the logs for the recipient so we store the last used indexes in the db. // newLargestIndexMapToStore contains "next" indexes to look for (one past the last found), so subtract 1 to get // last used. - await this.taggingDataProvider.setLastUsedIndexesAsRecipient( + await this.recipientTaggingDataProvider.setLastUsedIndexes( Object.entries(newLargestIndexMapToStore).map(([directionalAppTaggingSecret, index]) => ({ secret: DirectionalAppTaggingSecret.fromString(directionalAppTaggingSecret), index: index - 1, diff --git a/yarn-project/pxe/src/pxe.test.ts b/yarn-project/pxe/src/pxe.test.ts index e92fab4d8b12..ff2261c085fd 100644 --- a/yarn-project/pxe/src/pxe.test.ts +++ b/yarn-project/pxe/src/pxe.test.ts @@ -178,8 +178,9 @@ describe('PXE', () => { // class id of a contract instance node.getPublicStorageAt.mockResolvedValue(Fr.ZERO); - // Used to sync private logs from the node. - node.getLogsByTags.mockResolvedValue([]); + // Used to sync private logs from the node - the return array needs to have the same length as the number of tags + // on the input. + node.getLogsByTags.mockImplementation((tags: Fr[]) => Promise.resolve(tags.map(() => []))); // Necessary to sync contract private state await pxe.registerContractClass(TestContractArtifact); diff --git a/yarn-project/pxe/src/pxe.ts b/yarn-project/pxe/src/pxe.ts index 841d30b92f0b..b1d4c6ba46a1 100644 --- a/yarn-project/pxe/src/pxe.ts +++ b/yarn-project/pxe/src/pxe.ts @@ -76,7 +76,8 @@ import { CapsuleDataProvider } from './storage/capsule_data_provider/capsule_dat import { ContractDataProvider } from './storage/contract_data_provider/contract_data_provider.js'; import { NoteDataProvider } from './storage/note_data_provider/note_data_provider.js'; import { PrivateEventDataProvider } from './storage/private_event_data_provider/private_event_data_provider.js'; -import { TaggingDataProvider } from './storage/tagging_data_provider/tagging_data_provider.js'; +import { RecipientTaggingDataProvider } from './storage/tagging_data_provider/recipient_tagging_data_provider.js'; +import { SenderTaggingDataProvider } from './storage/tagging_data_provider/sender_tagging_data_provider.js'; export type PackedPrivateEvent = InTx & { packedEvent: Fr[]; @@ -96,7 +97,8 @@ export class PXE { private noteDataProvider: NoteDataProvider, private capsuleDataProvider: CapsuleDataProvider, private anchorBlockDataProvider: AnchorBlockDataProvider, - private taggingDataProvider: TaggingDataProvider, + private senderTaggingDataProvider: SenderTaggingDataProvider, + private recipientTaggingDataProvider: RecipientTaggingDataProvider, private addressDataProvider: AddressDataProvider, private privateEventDataProvider: PrivateEventDataProvider, private simulator: CircuitSimulator, @@ -135,7 +137,8 @@ export class PXE { const contractDataProvider = new ContractDataProvider(store); const noteDataProvider = await NoteDataProvider.create(store); const anchorBlockDataProvider = new AnchorBlockDataProvider(store); - const taggingDataProvider = new TaggingDataProvider(store); + const senderTaggingDataProvider = new SenderTaggingDataProvider(store); + const recipientTaggingDataProvider = new RecipientTaggingDataProvider(store); const capsuleDataProvider = new CapsuleDataProvider(store); const keyStore = new KeyStore(store); const tipsStore = new L2TipsKVStore(store, 'pxe'); @@ -143,7 +146,7 @@ export class PXE { node, anchorBlockDataProvider, noteDataProvider, - taggingDataProvider, + recipientTaggingDataProvider, tipsStore, config, loggerOrSuffix, @@ -161,7 +164,8 @@ export class PXE { noteDataProvider, capsuleDataProvider, anchorBlockDataProvider, - taggingDataProvider, + senderTaggingDataProvider, + recipientTaggingDataProvider, addressDataProvider, privateEventDataProvider, simulator, @@ -193,7 +197,8 @@ export class PXE { this.noteDataProvider, this.capsuleDataProvider, this.anchorBlockDataProvider, - this.taggingDataProvider, + this.senderTaggingDataProvider, + this.recipientTaggingDataProvider, this.addressDataProvider, this.privateEventDataProvider, this.log, @@ -468,50 +473,52 @@ export class PXE { } /** - * Registers a user contact in PXE. + * Registers a sender in this PXE. * - * Once a new contact is registered, the PXE will be able to receive notes tagged from this contact. - * Will do nothing if the account is already registered. + * After registering a new sender, the PXE will sync private logs that are tagged with this sender's address. + * Will do nothing if the address is already registered. * - * @param address - Address of the user to add to the address book - * @returns The address address of the account. + * @param sender - Address of the sender to register. + * @returns The address of the sender. + * TODO: It's strange that we return the address here and I (benesjan) think we should drop the return value. */ - public async registerSender(address: AztecAddress): Promise { + public async registerSender(sender: AztecAddress): Promise { const accounts = await this.keyStore.getAccounts(); - if (accounts.includes(address)) { - this.log.info(`Sender:\n "${address.toString()}"\n already registered.`); - return address; + if (accounts.includes(sender)) { + this.log.info(`Sender:\n "${sender.toString()}"\n already registered.`); + return sender; } - const wasAdded = await this.taggingDataProvider.addSenderAddress(address); + const wasAdded = await this.recipientTaggingDataProvider.addSenderAddress(sender); if (wasAdded) { - this.log.info(`Added sender:\n ${address.toString()}`); + this.log.info(`Added sender:\n ${sender.toString()}`); } else { - this.log.info(`Sender:\n "${address.toString()}"\n already registered.`); + this.log.info(`Sender:\n "${sender.toString()}"\n already registered.`); } - return address; + return sender; } /** - * Retrieves the addresses stored as senders on this PXE. - * @returns An array of the senders on this PXE. + * Retrieves senders registered in this PXE. + * @returns Senders registered in this PXE. */ public getSenders(): Promise { - return this.taggingDataProvider.getSenderAddresses(); + return this.recipientTaggingDataProvider.getSenderAddresses(); } /** - * Removes a sender in the address book. + * Removes a sender registered in this PXE. + * @param sender - The address of the sender to remove. */ - public async removeSender(address: AztecAddress): Promise { - const wasRemoved = await this.taggingDataProvider.removeSenderAddress(address); + public async removeSender(sender: AztecAddress): Promise { + const wasRemoved = await this.recipientTaggingDataProvider.removeSenderAddress(sender); if (wasRemoved) { - this.log.info(`Removed sender:\n ${address.toString()}`); + this.log.info(`Removed sender:\n ${sender.toString()}`); } else { - this.log.info(`Sender:\n "${address.toString()}"\n not in address book.`); + this.log.info(`Sender:\n "${sender.toString()}"\n not registered in PXE.`); } } @@ -698,14 +705,22 @@ export class PXE { nodeRPCCalls: contractFunctionSimulator?.getStats().nodeRPCCalls, }); + // While not strictly necessary to store tagging cache contents in the DB since we sync tagging indexes from + // chain before sending new logs, the sync can only see logs already included in blocks. If we send another + // transaction before this one is included in a block from this PXE, and that transaction contains a log with + // a tag derived from the same secret, we would reuse the tag and the transactions would be linked. Hence + // storing the tags here prevents linkage of txs sent from the same PXE. const preTagsUsedInTheTx = privateExecutionResult.entrypoint.preTags; if (preTagsUsedInTheTx.length > 0) { - await this.taggingDataProvider.setLastUsedIndexesAsSender(preTagsUsedInTheTx); - this.log.debug(`Stored used pre tags as sender for the tx`, { + // TODO(benesjan): The following is an expensive operation. Figure out a way to avoid it. + const txHash = (await txProvingResult.toTx()).txHash; + + await this.senderTaggingDataProvider.storePendingIndexes(preTagsUsedInTheTx, txHash); + this.log.debug(`Stored used pre-tags as sender for the tx`, { preTagsUsedInTheTx, }); } else { - this.log.debug(`No pre tags used in the tx`); + this.log.debug(`No pre-tags used in the tx`); } return txProvingResult; diff --git a/yarn-project/pxe/src/storage/tagging_data_provider/index.ts b/yarn-project/pxe/src/storage/tagging_data_provider/index.ts index 42085f6867aa..22ad7b18f2dc 100644 --- a/yarn-project/pxe/src/storage/tagging_data_provider/index.ts +++ b/yarn-project/pxe/src/storage/tagging_data_provider/index.ts @@ -1 +1,2 @@ -export { TaggingDataProvider } from './tagging_data_provider.js'; +export { SenderTaggingDataProvider } from './sender_tagging_data_provider.js'; +export { RecipientTaggingDataProvider } from './recipient_tagging_data_provider.js'; diff --git a/yarn-project/pxe/src/storage/tagging_data_provider/recipient_tagging_data_provider.ts b/yarn-project/pxe/src/storage/tagging_data_provider/recipient_tagging_data_provider.ts new file mode 100644 index 000000000000..ac291a718dcb --- /dev/null +++ b/yarn-project/pxe/src/storage/tagging_data_provider/recipient_tagging_data_provider.ts @@ -0,0 +1,86 @@ +import { toArray } from '@aztec/foundation/iterable'; +import type { AztecAsyncKVStore, AztecAsyncMap } from '@aztec/kv-store'; +import { AztecAddress } from '@aztec/stdlib/aztec-address'; +import type { DirectionalAppTaggingSecret, PreTag } from '@aztec/stdlib/logs'; + +/** + * Data provider of tagging data used when syncing the logs as a recipient. The sender counterpart of this class is + * called SenderTaggingDataProvider. We have the providers separate for the sender and recipient because + * the algorithms are completely disjoint and there is not data reuse between the 2. + */ +export class RecipientTaggingDataProvider { + #store: AztecAsyncKVStore; + #addressBook: AztecAsyncMap; + + // Stores the last used index for each directional app tagging secret. + #lastUsedIndexes: AztecAsyncMap; + + constructor(store: AztecAsyncKVStore) { + this.#store = store; + + this.#addressBook = this.#store.openMap('address_book'); + this.#lastUsedIndexes = this.#store.openMap('last_used_indexes'); + } + + /** + * Sets the last used indexes when looking for logs. + * @param preTags - The pre-tags containing the directional app tagging secrets and the indexes that are to be + * updated in the db. + * @throws If any two pre-tags contain the same directional app tagging secret + */ + setLastUsedIndexes(preTags: PreTag[]) { + // Non-unique secrets would indicate a bug in the caller function. + const secretsSet = new Set(preTags.map(preTag => preTag.secret.toString())); + if (secretsSet.size !== preTags.length) { + throw new Error(`Duplicate secrets found when setting last used indexes`); + } + + return Promise.all(preTags.map(({ secret, index }) => this.#lastUsedIndexes.set(secret.toString(), index))); + } + + /** + * Returns the last used indexes when looking for logs. + * @param secrets - The directional app tagging secrets to obtain the indexes for. + * @returns The last used indexes for the given directional app tagging secrets, or undefined if have never yet found + * a log for a given secret. + */ + getLastUsedIndexes(secrets: DirectionalAppTaggingSecret[]): Promise<(number | undefined)[]> { + return Promise.all(secrets.map(secret => this.#lastUsedIndexes.getAsync(secret.toString()))); + } + + resetNoteSyncData(): Promise { + return this.#store.transactionAsync(async () => { + const keys = await toArray(this.#lastUsedIndexes.keysAsync()); + await Promise.all(keys.map(secret => this.#lastUsedIndexes.delete(secret))); + }); + } + + // It might seem weird that the following 3 methods are in RecipientTaggingDataProvider and not + // in SenderTaggingDataProvider but that is because this data is truly only used for the purposes of syncing logs + // as a recipient. When sending logs or when syncing sender tagging indexes we only receive directional app tagging + // secret from Aztec.nr via an oracle and we don't need to access sender addresses. + + async addSenderAddress(address: AztecAddress): Promise { + if (await this.#addressBook.hasAsync(address.toString())) { + return false; + } + + await this.#addressBook.set(address.toString(), true); + + return true; + } + + async getSenderAddresses(): Promise { + return (await toArray(this.#addressBook.keysAsync())).map(AztecAddress.fromString); + } + + async removeSenderAddress(address: AztecAddress): Promise { + if (!(await this.#addressBook.hasAsync(address.toString()))) { + return false; + } + + await this.#addressBook.delete(address.toString()); + + return true; + } +} diff --git a/yarn-project/pxe/src/storage/tagging_data_provider/sender_tagging_data_provider.test.ts b/yarn-project/pxe/src/storage/tagging_data_provider/sender_tagging_data_provider.test.ts new file mode 100644 index 000000000000..1159b5a8515e --- /dev/null +++ b/yarn-project/pxe/src/storage/tagging_data_provider/sender_tagging_data_provider.test.ts @@ -0,0 +1,485 @@ +import { Fr } from '@aztec/foundation/curves/bn254'; +import { openTmpStore } from '@aztec/kv-store/lmdb-v2'; +import { DirectionalAppTaggingSecret, type PreTag } from '@aztec/stdlib/logs'; +import { TxHash } from '@aztec/stdlib/tx'; + +import { WINDOW_LEN } from '../../tagging/sync/sync_sender_tagging_indexes.js'; +import { SenderTaggingDataProvider } from './sender_tagging_data_provider.js'; + +describe('SenderTaggingDataProvider', () => { + let taggingDataProvider: SenderTaggingDataProvider; + let secret1: DirectionalAppTaggingSecret; + let secret2: DirectionalAppTaggingSecret; + + beforeEach(async () => { + taggingDataProvider = new SenderTaggingDataProvider(await openTmpStore('test')); + secret1 = DirectionalAppTaggingSecret.fromString(Fr.random().toString()); + secret2 = DirectionalAppTaggingSecret.fromString(Fr.random().toString()); + }); + + describe('storePendingIndexes', () => { + it('stores a single pending index', async () => { + const txHash = TxHash.random(); + const preTag: PreTag = { secret: secret1, index: 5 }; + + await taggingDataProvider.storePendingIndexes([preTag], txHash); + + const txHashes = await taggingDataProvider.getTxHashesOfPendingIndexes(secret1, 0, 10); + expect(txHashes).toHaveLength(1); + expect(txHashes[0]).toEqual(txHash); + }); + + it('stores multiple pending indexes for different secrets', async () => { + const txHash = TxHash.random(); + const preTags: PreTag[] = [ + { secret: secret1, index: 3 }, + { secret: secret2, index: 7 }, + ]; + + await taggingDataProvider.storePendingIndexes(preTags, txHash); + + const txHashes1 = await taggingDataProvider.getTxHashesOfPendingIndexes(secret1, 0, 10); + expect(txHashes1).toHaveLength(1); + expect(txHashes1[0]).toEqual(txHash); + + const txHashes2 = await taggingDataProvider.getTxHashesOfPendingIndexes(secret2, 0, 10); + expect(txHashes2).toHaveLength(1); + expect(txHashes2[0]).toEqual(txHash); + }); + + it('stores multiple pending indexes for the same secret from different txs', async () => { + const txHash1 = TxHash.random(); + const txHash2 = TxHash.random(); + + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 3 }], txHash1); + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 7 }], txHash2); + + const txHashes = await taggingDataProvider.getTxHashesOfPendingIndexes(secret1, 0, 10); + expect(txHashes).toHaveLength(2); + expect(txHashes).toContainEqual(txHash1); + expect(txHashes).toContainEqual(txHash2); + }); + + it('ignores duplicate preTag + txHash combination', async () => { + const txHash = TxHash.random(); + const preTag: PreTag = { secret: secret1, index: 5 }; + + await taggingDataProvider.storePendingIndexes([preTag], txHash); + await taggingDataProvider.storePendingIndexes([preTag], txHash); + + const txHashes = await taggingDataProvider.getTxHashesOfPendingIndexes(secret1, 0, 10); + expect(txHashes).toHaveLength(1); + expect(txHashes[0]).toEqual(txHash); + }); + + it('throws when storing duplicate secrets in the same call', async () => { + const txHash = TxHash.random(); + const preTags: PreTag[] = [ + { secret: secret1, index: 3 }, + { secret: secret1, index: 7 }, + ]; + + await expect(taggingDataProvider.storePendingIndexes(preTags, txHash)).rejects.toThrow( + 'Duplicate secrets found when storing pending indexes', + ); + }); + + it('throws when storing a different index for an existing secret + txHash pair', async () => { + const txHash = TxHash.random(); + + // First store an index + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 5 }], txHash); + + // Try to store a different index for the same secret + txHash pair + await expect(taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 7 }], txHash)).rejects.toThrow( + /Cannot store index 7.*a different index 5 already exists/, + ); + }); + + it('throws when storing a pending index lower than the last finalized index', async () => { + const txHash1 = TxHash.random(); + const txHash2 = TxHash.random(); + + // First store and finalize an index + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 10 }], txHash1); + await taggingDataProvider.finalizePendingIndexes([txHash1]); + + // Try to store a pending index lower than the finalized index + await expect(taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 5 }], txHash2)).rejects.toThrow( + /Cannot store pending index 5.*lower than or equal to the last finalized index 10/, + ); + }); + + it('throws when storing a pending index equal to the last finalized index', async () => { + const txHash1 = TxHash.random(); + const txHash2 = TxHash.random(); + + // First store and finalize an index + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 10 }], txHash1); + await taggingDataProvider.finalizePendingIndexes([txHash1]); + + // Try to store a pending index equal to the finalized index + await expect(taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 10 }], txHash2)).rejects.toThrow( + /Cannot store pending index 10.*lower than or equal to the last finalized index 10/, + ); + }); + + it('allows storing a pending index higher than the last finalized index', async () => { + const txHash1 = TxHash.random(); + const txHash2 = TxHash.random(); + + // First store and finalize an index + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 10 }], txHash1); + await taggingDataProvider.finalizePendingIndexes([txHash1]); + + // Store a pending index higher than the finalized index - should succeed + await expect( + taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 15 }], txHash2), + ).resolves.not.toThrow(); + + const txHashes = await taggingDataProvider.getTxHashesOfPendingIndexes(secret1, 0, 20); + expect(txHashes).toHaveLength(1); + expect(txHashes[0]).toEqual(txHash2); + }); + + describe('window length validation', () => { + it('throws when storing an index beyond window length from finalized index', async () => { + const txHash1 = TxHash.random(); + const txHash2 = TxHash.random(); + const finalizedIndex = 10; + const indexBeyondWindow = finalizedIndex + WINDOW_LEN + 1; + + // First store and finalize an index + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: finalizedIndex }], txHash1); + await taggingDataProvider.finalizePendingIndexes([txHash1]); + + // Try to store an index beyond the window + await expect( + taggingDataProvider.storePendingIndexes([{ secret: secret1, index: indexBeyondWindow }], txHash2), + ).rejects.toThrow( + `Highest used index ${indexBeyondWindow} is further than window length from the highest finalized index ${finalizedIndex}`, + ); + }); + + it('allows storing an index at the window boundary from finalized index', async () => { + const txHash1 = TxHash.random(); + const txHash2 = TxHash.random(); + const finalizedIndex = 10; + const indexAtBoundary = finalizedIndex + WINDOW_LEN; + + // First store and finalize an index + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: finalizedIndex }], txHash1); + await taggingDataProvider.finalizePendingIndexes([txHash1]); + + // Store an index at the boundary, but check is >, so it should succeed + await expect( + taggingDataProvider.storePendingIndexes([{ secret: secret1, index: indexAtBoundary }], txHash2), + ).resolves.not.toThrow(); + + const txHashes = await taggingDataProvider.getTxHashesOfPendingIndexes(secret1, 0, indexAtBoundary + 5); + expect(txHashes).toHaveLength(1); + expect(txHashes[0]).toEqual(txHash2); + }); + }); + }); + + describe('getTxHashesOfPendingIndexes', () => { + it('returns empty array when no pending indexes exist', async () => { + const txHashes = await taggingDataProvider.getTxHashesOfPendingIndexes(secret1, 0, 10); + expect(txHashes).toEqual([]); + }); + + it('returns tx hashes for indexes within the specified range', async () => { + const txHash1 = TxHash.random(); + const txHash2 = TxHash.random(); + const txHash3 = TxHash.random(); + + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 3 }], txHash1); + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 5 }], txHash2); + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 8 }], txHash3); + + const txHashes = await taggingDataProvider.getTxHashesOfPendingIndexes(secret1, 4, 9); + expect(txHashes).toHaveLength(2); + expect(txHashes).toContainEqual(txHash2); + expect(txHashes).toContainEqual(txHash3); + expect(txHashes).not.toContainEqual(txHash1); + }); + + it('includes startIndex and excludes endIndex (range is [startIndex, endIndex))', async () => { + const txHash1 = TxHash.random(); + const txHash2 = TxHash.random(); + + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 5 }], txHash1); + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 10 }], txHash2); + + const txHashes = await taggingDataProvider.getTxHashesOfPendingIndexes(secret1, 5, 10); + expect(txHashes).toHaveLength(1); + expect(txHashes[0]).toEqual(txHash1); + }); + + it('handles parallel pending indexes for the same secret from different txs', async () => { + const txHash1 = TxHash.random(); + const txHash2 = TxHash.random(); + const txHash3 = TxHash.random(); + const txHash4 = TxHash.random(); + + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 3 }], txHash1); + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 5 }], txHash2); + // We store different secret with txHash1 to check we correctly don't return it in the result + await taggingDataProvider.storePendingIndexes([{ secret: secret2, index: 7 }], txHash1); + // Store "parallel" index for secret1 with a different tx (can happen when sending logs from multiple PXEs) + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 7 }], txHash3); + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 7 }], txHash4); + + const txHashes = await taggingDataProvider.getTxHashesOfPendingIndexes(secret1, 0, 10); + // Should have 3 unique tx hashes for secret1 + expect(txHashes).toEqual(expect.arrayContaining([txHash1, txHash2, txHash3, txHash4])); + }); + }); + + describe('getLastFinalizedIndex', () => { + it('returns undefined when no finalized index exists', async () => { + const lastFinalized = await taggingDataProvider.getLastFinalizedIndex(secret1); + expect(lastFinalized).toBeUndefined(); + }); + + it('returns the last finalized index after finalizePendingIndexes', async () => { + const txHash = TxHash.random(); + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 5 }], txHash); + await taggingDataProvider.finalizePendingIndexes([txHash]); + + const lastFinalized = await taggingDataProvider.getLastFinalizedIndex(secret1); + expect(lastFinalized).toBe(5); + }); + }); + + describe('getLastUsedIndex', () => { + it('returns undefined when no indexes exist', async () => { + const lastUsed = await taggingDataProvider.getLastUsedIndex(secret1); + expect(lastUsed).toBeUndefined(); + }); + + it('returns the last finalized index when no pending indexes exist', async () => { + const txHash = TxHash.random(); + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 5 }], txHash); + await taggingDataProvider.finalizePendingIndexes([txHash]); + + const lastUsed = await taggingDataProvider.getLastUsedIndex(secret1); + expect(lastUsed).toBe(5); + }); + + it('returns the highest pending index when pending indexes exist', async () => { + const txHash1 = TxHash.random(); + const txHash2 = TxHash.random(); + + // First, finalize an index + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 3 }], txHash1); + await taggingDataProvider.finalizePendingIndexes([txHash1]); + + // Then add a higher pending index + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 7 }], txHash2); + + const lastUsed = await taggingDataProvider.getLastUsedIndex(secret1); + expect(lastUsed).toBe(7); + }); + + it('returns the highest of multiple pending indexes', async () => { + const txHash1 = TxHash.random(); + const txHash2 = TxHash.random(); + const txHash3 = TxHash.random(); + + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 3 }], txHash1); + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 7 }], txHash2); + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 5 }], txHash3); + + const lastUsed = await taggingDataProvider.getLastUsedIndex(secret1); + expect(lastUsed).toBe(7); + }); + }); + + describe('dropPendingIndexes', () => { + it('removes all pending indexes for a given tx hash', async () => { + const txHash1 = TxHash.random(); + const txHash2 = TxHash.random(); + + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 3 }], txHash1); + await taggingDataProvider.storePendingIndexes([{ secret: secret2, index: 5 }], txHash1); + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 7 }], txHash2); + + await taggingDataProvider.dropPendingIndexes([txHash1]); + + // txHash1 should be removed + const txHashes1 = await taggingDataProvider.getTxHashesOfPendingIndexes(secret1, 0, 10); + expect(txHashes1).toHaveLength(1); + expect(txHashes1[0]).toEqual(txHash2); + + // txHash1 should also be removed from secret2 + const txHashes2 = await taggingDataProvider.getTxHashesOfPendingIndexes(secret2, 0, 10); + expect(txHashes2).toEqual([]); + }); + }); + + describe('finalizePendingIndexes', () => { + it('moves pending index to finalized for a given tx hash', async () => { + const txHash = TxHash.random(); + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 5 }], txHash); + + await taggingDataProvider.finalizePendingIndexes([txHash]); + + const lastFinalized = await taggingDataProvider.getLastFinalizedIndex(secret1); + expect(lastFinalized).toBe(5); + + // Pending index should be removed + const txHashes = await taggingDataProvider.getTxHashesOfPendingIndexes(secret1, 0, 10); + expect(txHashes).toEqual([]); + }); + + it('updates finalized index to the higher value', async () => { + const txHash1 = TxHash.random(); + const txHash2 = TxHash.random(); + + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 3 }], txHash1); + await taggingDataProvider.finalizePendingIndexes([txHash1]); + + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 7 }], txHash2); + await taggingDataProvider.finalizePendingIndexes([txHash2]); + + const lastFinalized = await taggingDataProvider.getLastFinalizedIndex(secret1); + expect(lastFinalized).toBe(7); + }); + + it('does not update finalized index when newly finalized index is lower', async () => { + const txHash1 = TxHash.random(); + const txHash2 = TxHash.random(); + + // Store both pending indexes first + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 7 }], txHash1); + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 3 }], txHash2); + + // Finalize the higher index first + await taggingDataProvider.finalizePendingIndexes([txHash1]); + + // Then try to finalize the lower index + await taggingDataProvider.finalizePendingIndexes([txHash2]); + + const lastFinalized = await taggingDataProvider.getLastFinalizedIndex(secret1); + expect(lastFinalized).toBe(7); // Should remain at 7 + }); + + it('prunes pending indexes with lower or equal index than finalized', async () => { + const txHash1 = TxHash.random(); + const txHash2 = TxHash.random(); + const txHash3 = TxHash.random(); + + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 3 }], txHash1); + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 5 }], txHash2); + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 7 }], txHash3); + + // Finalize txHash2 (index 5) + await taggingDataProvider.finalizePendingIndexes([txHash2]); + + // txHash1 (index 3) should be pruned as it's lower than finalized + // txHash3 (index 7) should remain + const txHashes = await taggingDataProvider.getTxHashesOfPendingIndexes(secret1, 0, 10); + expect(txHashes).toHaveLength(1); + expect(txHashes[0]).toEqual(txHash3); + }); + + it('handles multiple secrets in the same tx', async () => { + const txHash = TxHash.random(); + await taggingDataProvider.storePendingIndexes( + [ + { secret: secret1, index: 3 }, + { secret: secret2, index: 7 }, + ], + txHash, + ); + + await taggingDataProvider.finalizePendingIndexes([txHash]); + + const lastFinalized1 = await taggingDataProvider.getLastFinalizedIndex(secret1); + const lastFinalized2 = await taggingDataProvider.getLastFinalizedIndex(secret2); + + expect(lastFinalized1).toBe(3); + expect(lastFinalized2).toBe(7); + }); + + it('does nothing when tx hash does not exist', async () => { + const txHash = TxHash.random(); + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 3 }], txHash); + + await taggingDataProvider.finalizePendingIndexes([TxHash.random()]); + + // Original pending index should still be there + const txHashes = await taggingDataProvider.getTxHashesOfPendingIndexes(secret1, 0, 10); + expect(txHashes).toHaveLength(1); + + // Finalized index should not be set + const lastFinalized = await taggingDataProvider.getLastFinalizedIndex(secret1); + expect(lastFinalized).toBeUndefined(); + }); + }); + + describe('complex scenarios', () => { + it('handles a full lifecycle: pending -> finalized -> new pending', async () => { + const txHash1 = TxHash.random(); + const txHash2 = TxHash.random(); + + // Step 1: Add pending index + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 3 }], txHash1); + expect(await taggingDataProvider.getLastUsedIndex(secret1)).toBe(3); + expect(await taggingDataProvider.getLastFinalizedIndex(secret1)).toBeUndefined(); + + // Step 2: Finalize the index + await taggingDataProvider.finalizePendingIndexes([txHash1]); + expect(await taggingDataProvider.getLastUsedIndex(secret1)).toBe(3); + expect(await taggingDataProvider.getLastFinalizedIndex(secret1)).toBe(3); + + // Step 3: Add a new higher pending index + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 7 }], txHash2); + expect(await taggingDataProvider.getLastUsedIndex(secret1)).toBe(7); + expect(await taggingDataProvider.getLastFinalizedIndex(secret1)).toBe(3); + + // Step 4: Finalize the new index + await taggingDataProvider.finalizePendingIndexes([txHash2]); + expect(await taggingDataProvider.getLastUsedIndex(secret1)).toBe(7); + expect(await taggingDataProvider.getLastFinalizedIndex(secret1)).toBe(7); + }); + + it('handles dropped transactions', async () => { + const txHash1 = TxHash.random(); + const txHash2 = TxHash.random(); + + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 3 }], txHash1); + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 5 }], txHash2); + + expect(await taggingDataProvider.getLastUsedIndex(secret1)).toBe(5); + + // Drop txHash2 + await taggingDataProvider.dropPendingIndexes([txHash2]); + + expect(await taggingDataProvider.getLastUsedIndex(secret1)).toBe(3); + }); + + it('handles multiple secrets with different lifecycles', async () => { + const txHash1 = TxHash.random(); + const txHash2 = TxHash.random(); + const txHash3 = TxHash.random(); + + // Secret1: pending -> finalized + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 3 }], txHash1); + await taggingDataProvider.finalizePendingIndexes([txHash1]); + + // Secret2: pending (not finalized) + await taggingDataProvider.storePendingIndexes([{ secret: secret2, index: 5 }], txHash2); + + // Secret1: new pending + await taggingDataProvider.storePendingIndexes([{ secret: secret1, index: 7 }], txHash3); + + expect(await taggingDataProvider.getLastFinalizedIndex(secret1)).toBe(3); + expect(await taggingDataProvider.getLastUsedIndex(secret1)).toBe(7); + expect(await taggingDataProvider.getLastFinalizedIndex(secret2)).toBeUndefined(); + expect(await taggingDataProvider.getLastUsedIndex(secret2)).toBe(5); + }); + }); +}); diff --git a/yarn-project/pxe/src/storage/tagging_data_provider/sender_tagging_data_provider.ts b/yarn-project/pxe/src/storage/tagging_data_provider/sender_tagging_data_provider.ts new file mode 100644 index 000000000000..180a7b710681 --- /dev/null +++ b/yarn-project/pxe/src/storage/tagging_data_provider/sender_tagging_data_provider.ts @@ -0,0 +1,244 @@ +import { toArray } from '@aztec/foundation/iterable'; +import type { AztecAsyncKVStore, AztecAsyncMap } from '@aztec/kv-store'; +import type { DirectionalAppTaggingSecret, PreTag } from '@aztec/stdlib/logs'; +import { TxHash } from '@aztec/stdlib/tx'; + +import { WINDOW_LEN as SENDER_TAGGING_INDEXES_SYNC_WINDOW_LEN } from '../../tagging/sync/sync_sender_tagging_indexes.js'; + +/** + * Data provider of tagging data used when syncing the sender tagging indexes. The recipient counterpart of this class + * is called RecipientTaggingDataProvider. We have the providers separate for the sender and recipient because + * the algorithms are completely disjoint and there is not data reuse between the two. + */ +export class SenderTaggingDataProvider { + #store: AztecAsyncKVStore; + + // Stores the pending indexes for each directional app tagging secret. Pending here means that the tx that contained + // the private logs with tags corresponding to these indexes has not been finalized yet. + // + // We don't store just the highest index because if their transaction is dropped we'd then need the information about + // the lower pending indexes. For each secret-tx pair however we only store the largest index used in that tx, since + // the smaller ones are irrelevant due to tx atomicity. + // + // TODO(#17615): This assumes no logs are used in the non-revertible phase. + #pendingIndexes: AztecAsyncMap; + + // Stores the last (highest) finalized index for each directional app tagging secret. We care only about the last + // index because unlike the pending indexes, it will never happen that a finalized index would be removed and hence + // we don't need to store the history. + #lastFinalizedIndexes: AztecAsyncMap; + + constructor(store: AztecAsyncKVStore) { + this.#store = store; + + this.#pendingIndexes = this.#store.openMap('pending_indexes'); + this.#lastFinalizedIndexes = this.#store.openMap('last_finalized_indexes'); + } + + /** + * Stores pending indexes. + * @remarks Ignores the index if the same preTag + txHash combination already exists in the db with the same index. + * This is expected to happen because whenever we start sync we start from the last finalized index and we can have + * pending indexes already stored from previous syncs. + * @param preTags - The pre-tags containing the directional app tagging secrets and the indexes that are to be + * stored in the db. + * @param txHash - The tx in which the pretags were used in private logs. + * @throws If any two pre-tags contain the same directional app tagging secret. This is enforced because we care + * only about the highest index for a given secret that was used in the tx. Hence this check is a good way to catch + * bugs. + * @throws If the newly stored pending index is further than window length from the highest finalized index for the + * same secret. This is enforced in order to give a guarantee to a recipient that he doesn't need to look further than + * window length ahead of the highest finalized index. + * @throws If a secret + txHash pair already exists in the db with a different index value. It should never happen + * that we would attempt to store a different index for a given secret-txHash pair because we always store just the + * highest index for a given secret-txHash pair. Hence this is a good way to catch bugs. + * @throws If the newly stored pending index is lower than or equal to the last finalized index for the same secret. + * This is enforced because this should never happen if the syncing is done correctly as we look for logs from higher + * indexes than finalized ones. + */ + async storePendingIndexes(preTags: PreTag[], txHash: TxHash) { + // The secrets in pre-tags should be unique because we always store just the highest index per given secret-txHash + // pair. Below we check that this is the case. + const secretsSet = new Set(preTags.map(preTag => preTag.secret.toString())); + if (secretsSet.size !== preTags.length) { + throw new Error(`Duplicate secrets found when storing pending indexes`); + } + + for (const { secret, index } of preTags) { + // First we check that for any secret the highest used index in tx is not further than window length from + // the highest finalized index. + const finalizedIndex = (await this.getLastFinalizedIndex(secret)) ?? 0; + if (index > finalizedIndex + SENDER_TAGGING_INDEXES_SYNC_WINDOW_LEN) { + throw new Error( + `Highest used index ${index} is further than window length from the highest finalized index ${finalizedIndex}. + Tagging window length ${SENDER_TAGGING_INDEXES_SYNC_WINDOW_LEN} is configured too low. Contact the Aztec team + to increase it!`, + ); + } + + // Throw if the new pending index is lower than or equal to the last finalized index + const secretStr = secret.toString(); + const lastFinalizedIndex = await this.#lastFinalizedIndexes.getAsync(secretStr); + if (lastFinalizedIndex !== undefined && index <= lastFinalizedIndex) { + throw new Error( + `Cannot store pending index ${index} for secret ${secretStr}: ` + + `it is lower than or equal to the last finalized index ${lastFinalizedIndex}`, + ); + } + + // Check if this secret + txHash combination already exists + const txHashStr = txHash.toString(); + const existingForSecret = (await this.#pendingIndexes.getAsync(secretStr)) ?? []; + const existingForSecretAndTx = existingForSecret.find(entry => entry.txHash === txHashStr); + + if (existingForSecretAndTx) { + // If it exists with a different index, throw an error + if (existingForSecretAndTx.index !== index) { + throw new Error( + `Cannot store index ${index} for secret ${secretStr} and txHash ${txHashStr}: ` + + `a different index ${existingForSecretAndTx.index} already exists for this secret-txHash pair`, + ); + } + // If it exists with the same index, ignore the update (no-op) + } else { + // If it doesn't exist, add it + await this.#pendingIndexes.set(secretStr, [...existingForSecret, { index, txHash: txHashStr }]); + } + } + } + + /** + * Returns the transaction hashes of all pending transactions that contain indexes within a specified range + * for a given directional app tagging secret. + * @param secret - The directional app tagging secret to query pending indexes for. + * @param startIndex - The lower bound of the index range (inclusive). + * @param endIndex - The upper bound of the index range (exclusive). + * @returns An array of unique transaction hashes for pending transactions that contain indexes in the range + * [startIndex, endIndex). Returns an empty array if no pending indexes exist in the range. + */ + async getTxHashesOfPendingIndexes( + secret: DirectionalAppTaggingSecret, + startIndex: number, + endIndex: number, + ): Promise { + const existing = (await this.#pendingIndexes.getAsync(secret.toString())) ?? []; + const txHashes = existing + .filter(entry => entry.index >= startIndex && entry.index < endIndex) + .map(entry => entry.txHash); + return Array.from(new Set(txHashes)).map(TxHash.fromString); + } + + /** + * Returns the last (highest) finalized index for a given secret. + * @param secret - The secret to get the last finalized index for. + * @returns The last (highest) finalized index for the given secret. + */ + getLastFinalizedIndex(secret: DirectionalAppTaggingSecret): Promise { + return this.#lastFinalizedIndexes.getAsync(secret.toString()); + } + + /** + * Returns the last used index for a given directional app tagging secret, considering both finalized and pending + * indexes. + * @param secret - The directional app tagging secret to query the last used index for. + * @returns The last used index. + */ + async getLastUsedIndex(secret: DirectionalAppTaggingSecret): Promise { + const secretStr = secret.toString(); + const pendingTxScopedIndexes = (await this.#pendingIndexes.getAsync(secretStr)) ?? []; + const pendingIndexes = pendingTxScopedIndexes.map(entry => entry.index); + + if (pendingTxScopedIndexes.length === 0) { + return this.#lastFinalizedIndexes.getAsync(secretStr); + } + + // As the last used index we return the highest one from the pending indexes. Note that this value will be always + // higher than the last finalized index because we prune lower pending indexes when a tx is finalized. + return Math.max(...pendingIndexes); + } + + /** + * Drops all pending indexes corresponding to the given transaction hashes. + */ + async dropPendingIndexes(txHashes: TxHash[]) { + if (txHashes.length === 0) { + return; + } + + const txHashStrs = new Set(txHashes.map(txHash => txHash.toString())); + const allSecrets = await toArray(this.#pendingIndexes.keysAsync()); + + for (const secret of allSecrets) { + const pendingData = await this.#pendingIndexes.getAsync(secret); + if (pendingData) { + const filtered = pendingData.filter(item => !txHashStrs.has(item.txHash)); + if (filtered.length === 0) { + await this.#pendingIndexes.delete(secret); + } else if (filtered.length !== pendingData.length) { + // Some items were filtered out, so update the pending data + await this.#pendingIndexes.set(secret, filtered); + } + // else: No items were filtered out (txHashes not found for this secret) --> no-op + } + } + } + + /** + * Updates pending indexes corresponding to the given transaction hashes to be finalized and prunes any lower pending + * indexes. + */ + async finalizePendingIndexes(txHashes: TxHash[]) { + if (txHashes.length === 0) { + return; + } + + for (const txHash of txHashes) { + const txHashStr = txHash.toString(); + + const allSecrets = await toArray(this.#pendingIndexes.keysAsync()); + + for (const secret of allSecrets) { + const pendingData = await this.#pendingIndexes.getAsync(secret); + if (!pendingData) { + continue; + } + + const matchingIndexes = pendingData.filter(item => item.txHash === txHashStr).map(item => item.index); + if (matchingIndexes.length === 0) { + continue; + } + + if (matchingIndexes.length > 1) { + // We should always just store the highest pending index for a given tx hash and secret because the lower + // values are irrelevant. + throw new Error(`Multiple pending indexes found for tx hash ${txHashStr} and secret ${secret}`); + } + + let lastFinalized = await this.#lastFinalizedIndexes.getAsync(secret); + const newFinalized = matchingIndexes[0]; + + if (newFinalized < (lastFinalized ?? 0)) { + // This should never happen because when last finalized index was finalized we should have pruned the lower + // pending indexes. + throw new Error( + `New finalized index ${newFinalized} is smaller than the current last finalized index ${lastFinalized}`, + ); + } + + await this.#lastFinalizedIndexes.set(secret, newFinalized); + lastFinalized = newFinalized; + + // When we add pending indexes, we ensure they are higher than the last finalized index. However, because we + // cannot control the order in which transactions are finalized, there may be pending indexes that are now + // obsolete because they are lower than the most recently finalized index. For this reason, we prune these + // outdated pending indexes. + const remainingItemsOfHigherIndex = pendingData.filter(item => item.index > (lastFinalized ?? 0)); + if (remainingItemsOfHigherIndex.length === 0) { + await this.#pendingIndexes.delete(secret); + } else { + await this.#pendingIndexes.set(secret, remainingItemsOfHigherIndex); + } + } + } + } +} diff --git a/yarn-project/pxe/src/storage/tagging_data_provider/tagging_data_provider.ts b/yarn-project/pxe/src/storage/tagging_data_provider/tagging_data_provider.ts deleted file mode 100644 index fe54c8444847..000000000000 --- a/yarn-project/pxe/src/storage/tagging_data_provider/tagging_data_provider.ts +++ /dev/null @@ -1,120 +0,0 @@ -import { toArray } from '@aztec/foundation/iterable'; -import type { AztecAsyncKVStore, AztecAsyncMap } from '@aztec/kv-store'; -import { AztecAddress } from '@aztec/stdlib/aztec-address'; -import type { DirectionalAppTaggingSecret, PreTag } from '@aztec/stdlib/logs'; - -export class TaggingDataProvider { - #store: AztecAsyncKVStore; - #addressBook: AztecAsyncMap; - - // Stores the last used index for each directional app tagging secret. Taking into account whether we are - // requesting the index as a sender or as a recipient because the sender and recipient can be in the same PXE. - #lastUsedIndexesAsSenders: AztecAsyncMap; - #lastUsedIndexesAsRecipients: AztecAsyncMap; - - constructor(store: AztecAsyncKVStore) { - this.#store = store; - - this.#addressBook = this.#store.openMap('address_book'); - - this.#lastUsedIndexesAsSenders = this.#store.openMap('last_used_indexes_as_senders'); - this.#lastUsedIndexesAsRecipients = this.#store.openMap('last_used_indexes_as_recipients'); - } - - /** - * Sets the last used indexes when sending a log. - * @param preTags - The pre tags containing the directional app tagging secrets and the indexes that are to be - * updated in the db. - * @throws If any two pre tags contain the same directional app tagging secret - */ - setLastUsedIndexesAsSender(preTags: PreTag[]) { - this.#assertUniqueSecrets(preTags, 'sender'); - - return Promise.all( - preTags.map(({ secret, index }) => this.#lastUsedIndexesAsSenders.set(secret.toString(), index)), - ); - } - - /** - * Sets the last used indexes when looking for logs. - * @param preTags - The pre tags containing the directional app tagging secrets and the indexes that are to be - * updated in the db. - * @throws If any two pre tags contain the same directional app tagging secret - */ - setLastUsedIndexesAsRecipient(preTags: PreTag[]) { - this.#assertUniqueSecrets(preTags, 'recipient'); - - return Promise.all( - preTags.map(({ secret, index }) => this.#lastUsedIndexesAsRecipients.set(secret.toString(), index)), - ); - } - - // It should never happen that we would receive any two pre tags on the input containing the same directional app - // tagging secret as everywhere we always just apply the largest index. Hence this check is a good way to catch - // bugs. - #assertUniqueSecrets(preTags: PreTag[], role: 'sender' | 'recipient'): void { - const secretStrings = preTags.map(({ secret }) => secret.toString()); - const uniqueSecrets = new Set(secretStrings); - if (uniqueSecrets.size !== secretStrings.length) { - throw new Error(`Duplicate secrets found when setting last used indexes as ${role}`); - } - } - - /** - * Returns the last used index when sending a log with a given secret. - * @param secret - The directional app tagging secret. - * @returns The last used index for the given directional app tagging secret, or undefined if not found. - */ - async getLastUsedIndexesAsSender(secret: DirectionalAppTaggingSecret): Promise { - return await this.#lastUsedIndexesAsSenders.getAsync(secret.toString()); - } - - /** - * Returns the last used indexes when looking for logs as a recipient. - * @param secrets - The directional app tagging secrets to obtain the indexes for. - * @returns The last used indexes for the given directional app tagging secrets, or undefined if have never yet found - * a log for a given secret. - */ - getLastUsedIndexesAsRecipient(secrets: DirectionalAppTaggingSecret[]): Promise<(number | undefined)[]> { - return Promise.all(secrets.map(secret => this.#lastUsedIndexesAsRecipients.getAsync(secret.toString()))); - } - - resetNoteSyncData(): Promise { - return this.#store.transactionAsync(async () => { - const keysForSenders = await toArray(this.#lastUsedIndexesAsSenders.keysAsync()); - await Promise.all(keysForSenders.map(secret => this.#lastUsedIndexesAsSenders.delete(secret))); - const keysForRecipients = await toArray(this.#lastUsedIndexesAsRecipients.keysAsync()); - await Promise.all(keysForRecipients.map(secret => this.#lastUsedIndexesAsRecipients.delete(secret))); - }); - } - - async addSenderAddress(address: AztecAddress): Promise { - if (await this.#addressBook.hasAsync(address.toString())) { - return false; - } - - await this.#addressBook.set(address.toString(), true); - - return true; - } - - async getSenderAddresses(): Promise { - return (await toArray(this.#addressBook.keysAsync())).map(AztecAddress.fromString); - } - - async removeSenderAddress(address: AztecAddress): Promise { - if (!(await this.#addressBook.hasAsync(address.toString()))) { - return false; - } - - await this.#addressBook.delete(address.toString()); - - return true; - } - - async getSize() { - const addressesCount = (await toArray(this.#addressBook.keysAsync())).length; - // All keys are addresses - return 3 * addressesCount * AztecAddress.SIZE_IN_BYTES; - } -} diff --git a/yarn-project/pxe/src/tagging/constants.ts b/yarn-project/pxe/src/tagging/constants.ts index 776d2cafbab7..ba547f894e48 100644 --- a/yarn-project/pxe/src/tagging/constants.ts +++ b/yarn-project/pxe/src/tagging/constants.ts @@ -1,2 +1,3 @@ // Half the size of the window we slide over the tagging indexes. +// TODO(#17775): Move this to the recipient log sync function once it's implemented in this directory. export const WINDOW_HALF_SIZE = 10; diff --git a/yarn-project/pxe/src/tagging/sync/sync_sender_tagging_indexes.test.ts b/yarn-project/pxe/src/tagging/sync/sync_sender_tagging_indexes.test.ts new file mode 100644 index 000000000000..6f207bd6b7da --- /dev/null +++ b/yarn-project/pxe/src/tagging/sync/sync_sender_tagging_indexes.test.ts @@ -0,0 +1,249 @@ +import { BlockNumber } from '@aztec/foundation/branded-types'; +import { Fr } from '@aztec/foundation/curves/bn254'; +import { openTmpStore } from '@aztec/kv-store/lmdb-v2'; +import { AztecAddress } from '@aztec/stdlib/aztec-address'; +import { L2BlockHash } from '@aztec/stdlib/block'; +import type { AztecNode } from '@aztec/stdlib/interfaces/client'; +import { PrivateLog, TxScopedL2Log } from '@aztec/stdlib/logs'; +import { TxHash, TxStatus } from '@aztec/stdlib/tx'; + +import { type MockProxy, mock } from 'jest-mock-extended'; + +import { SenderTaggingDataProvider } from '../../storage/tagging_data_provider/sender_tagging_data_provider.js'; +import { DirectionalAppTaggingSecret, SiloedTag, Tag } from '../index.js'; +import { WINDOW_LEN, syncSenderTaggingIndexes } from './sync_sender_tagging_indexes.js'; + +describe('syncSenderTaggingIndexes', () => { + // Contract address and secret to be used on the input of the syncSenderTaggingIndexes function. + let secret: DirectionalAppTaggingSecret; + let contractAddress: AztecAddress; + + let aztecNode: MockProxy; + let taggingDataProvider: SenderTaggingDataProvider; + + async function computeSiloedTagForIndex(index: number) { + const tag = await Tag.compute({ secret, index }); + return SiloedTag.compute(tag, contractAddress); + } + + function makeLog(txHash: TxHash, tag: Fr) { + return new TxScopedL2Log(txHash, 0, 0, BlockNumber(0), L2BlockHash.random(), PrivateLog.random(tag)); + } + + async function setUp() { + secret = DirectionalAppTaggingSecret.fromString(Fr.random().toString()); + contractAddress = await AztecAddress.random(); + + aztecNode = mock(); + taggingDataProvider = new SenderTaggingDataProvider(await openTmpStore('test')); + } + + it('no new logs found for a given secret', async () => { + await setUp(); + + aztecNode.getLogsByTags.mockImplementation((tags: Fr[]) => { + // No log found for any tag + return Promise.resolve(tags.map((_tag: Fr) => [])); + }); + + await syncSenderTaggingIndexes(secret, contractAddress, aztecNode, taggingDataProvider); + + // Highest used and finalized indexes should stay undefined + expect(await taggingDataProvider.getLastUsedIndex(secret)).toBeUndefined(); + expect(await taggingDataProvider.getLastFinalizedIndex(secret)).toBeUndefined(); + }); + + // These tests need to be run together in sequence. + describe('sequential tests', () => { + const finalizedIndexStep1 = 3; + const finalizedBlockNumberStep1 = 15; + + const pendingTxHashStep2 = TxHash.random(); + const pendingIndexStep2 = 5; + + beforeAll(async () => { + await setUp(); + }); + + it('step 1: highest finalized index is updated', async () => { + // Create a log with tag index 3 + const index3Tag = await computeSiloedTagForIndex(finalizedIndexStep1); + + aztecNode.getLogsByTags.mockImplementation((tags: Fr[]) => { + // Return empty arrays for all tags except the one at index 3 + return Promise.resolve( + tags.map((tag: Fr) => (tag.equals(index3Tag.value) ? [makeLog(TxHash.random(), index3Tag.value)] : [])), + ); + }); + + // Mock getTxReceipt to return a successful, finalized tx (finalized because it is included in a block before + // the finalized block) + aztecNode.getTxReceipt.mockResolvedValue({ + status: TxStatus.SUCCESS, + blockNumber: finalizedBlockNumberStep1 - 1, + } as any); + + // Mock getL2Tips to return a finalized block number >= the tx block number + aztecNode.getL2Tips.mockResolvedValue({ + finalized: { number: finalizedBlockNumberStep1 }, + } as any); + + await syncSenderTaggingIndexes(secret, contractAddress, aztecNode, taggingDataProvider); + + // Verify the highest finalized index is updated to 3 + expect(await taggingDataProvider.getLastFinalizedIndex(secret)).toBe(finalizedIndexStep1); + // Verify the highest used index also returns 3 (when there is no higher pending index the highest used index is + // the highest finalized index). + expect(await taggingDataProvider.getLastUsedIndex(secret)).toBe(finalizedIndexStep1); + }); + + it('step 2: pending log is synced', async () => { + const pendingTag = await computeSiloedTagForIndex(pendingIndexStep2); + + aztecNode.getLogsByTags.mockImplementation((tags: Fr[]) => { + // Return empty arrays for all tags except the one at the pending index + return Promise.resolve( + tags.map((tag: Fr) => (tag.equals(pendingTag.value) ? [makeLog(pendingTxHashStep2, pendingTag.value)] : [])), + ); + }); + + // Mock getTxReceipt to return a successful but still pending tx + aztecNode.getTxReceipt.mockResolvedValue({ + status: TxStatus.SUCCESS, + blockNumber: finalizedBlockNumberStep1 + 1, + } as any); + + aztecNode.getL2Tips.mockResolvedValue({ + finalized: { number: finalizedBlockNumberStep1 }, + } as any); + + await syncSenderTaggingIndexes(secret, contractAddress, aztecNode, taggingDataProvider); + + // Verify the highest finalized index was not updated + expect(await taggingDataProvider.getLastFinalizedIndex(secret)).toBe(finalizedIndexStep1); + // Verify the highest used index was updated to the pending index + expect(await taggingDataProvider.getLastUsedIndex(secret)).toBe(pendingIndexStep2); + }); + + it('step 3: syncs logs across 2 windows', async () => { + // Move finalized block into the future + const newFinalizedBlockNumber = finalizedBlockNumberStep1 + 5; + const newHighestFinalizedIndex = finalizedIndexStep1 + 4; + const newHighestUsedIndex = newHighestFinalizedIndex + WINDOW_LEN; + + // Create tx hashes for new logs + const newHighestFinalizedTxHash = TxHash.random(); + const newHighestUsedTxHash = TxHash.random(); + + // Create tags for multiple indices across 2 windows + const nowFinalizedTag = await computeSiloedTagForIndex(pendingIndexStep2); // Previously pending, now finalized + const newHighestFinalizedTag = await computeSiloedTagForIndex(newHighestFinalizedIndex); // New finalized log + const newHighestUsedTag = await computeSiloedTagForIndex(newHighestUsedIndex); // New pending log + + // Mock getLogsByTags to return logs for multiple indices + aztecNode.getLogsByTags.mockImplementation((tags: Fr[]) => { + return Promise.resolve( + tags.map((tag: Fr) => { + if (tag.equals(nowFinalizedTag.value)) { + return [makeLog(pendingTxHashStep2, nowFinalizedTag.value)]; + } else if (tag.equals(newHighestFinalizedTag.value)) { + return [makeLog(newHighestFinalizedTxHash, newHighestFinalizedTag.value)]; + } else if (tag.equals(newHighestUsedTag.value)) { + return [makeLog(newHighestUsedTxHash, newHighestUsedTag.value)]; + } + return []; + }), + ); + }); + + // Mock getTxReceipt to return appropriate statuses + aztecNode.getTxReceipt.mockImplementation((hash: TxHash) => { + if (hash.equals(pendingTxHashStep2)) { + // The previously pending tx (index pendingIndexStep2) is now finalized + return { + status: TxStatus.SUCCESS, + blockNumber: newFinalizedBlockNumber - 3, + } as any; + } else if (hash.equals(newHighestFinalizedTxHash)) { + // This tx (index newHighestFinalizedIndex) is finalized + return { + status: TxStatus.SUCCESS, + blockNumber: newFinalizedBlockNumber - 2, + } as any; + } else if (hash.equals(newHighestUsedTxHash)) { + // This tx (index newHighestUsedIndex) is pending + return { + status: TxStatus.SUCCESS, + blockNumber: newFinalizedBlockNumber + 2, + } as any; + } else { + throw new Error(`Unexpected tx hash: ${hash.toString()}`); + } + }); + + // Mock getL2Tips with the new finalized block number + aztecNode.getL2Tips.mockResolvedValue({ + finalized: { number: newFinalizedBlockNumber }, + } as any); + + await syncSenderTaggingIndexes(secret, contractAddress, aztecNode, taggingDataProvider); + + expect(await taggingDataProvider.getLastFinalizedIndex(secret)).toBe(newHighestFinalizedIndex); + expect(await taggingDataProvider.getLastUsedIndex(secret)).toBe(newHighestUsedIndex); + }); + }); + + /** + * This test verifies that when multiple logs use the same tag, we correctly bump the finalized index. With this + * test we make sure we don't accidentally ignore the duplicate log. + */ + it('handles pending and finalized logs found at the same index', async () => { + await setUp(); + + const finalizedTxHash = TxHash.random(); + const pendingTxHash = TxHash.random(); + + const finalizedBlockNumber = 15; + const pendingAndFinalizedIndex = 3; + + const index3Tag = await computeSiloedTagForIndex(pendingAndFinalizedIndex); + + aztecNode.getLogsByTags.mockImplementation((tags: Fr[]) => { + // Return both the pending and finalized logs for the tag at index 3 + return Promise.resolve( + tags.map((tag: Fr) => + tag.equals(index3Tag.value) + ? [makeLog(pendingTxHash, index3Tag.value), makeLog(finalizedTxHash, index3Tag.value)] + : [], + ), + ); + }); + + aztecNode.getTxReceipt.mockImplementation((hash: TxHash) => { + if (hash.equals(finalizedTxHash)) { + return { + status: TxStatus.SUCCESS, + blockNumber: finalizedBlockNumber - 1, // Finalized tx + } as any; + } else if (hash.equals(pendingTxHash)) { + return { + status: TxStatus.SUCCESS, + blockNumber: finalizedBlockNumber + 1, // Pending tx + } as any; + } else { + throw new Error(`Unexpected tx hash: ${hash.toString()}`); + } + }); + + aztecNode.getL2Tips.mockResolvedValue({ + finalized: { number: finalizedBlockNumber }, + } as any); + + // Sync tagged logs + await syncSenderTaggingIndexes(secret, contractAddress, aztecNode, taggingDataProvider); + + // Verify that both highest finalized and highest used were set to the pending and finalized index + expect(await taggingDataProvider.getLastFinalizedIndex(secret)).toBe(pendingAndFinalizedIndex); + expect(await taggingDataProvider.getLastUsedIndex(secret)).toBe(pendingAndFinalizedIndex); + }); +}); diff --git a/yarn-project/pxe/src/tagging/sync/sync_sender_tagging_indexes.ts b/yarn-project/pxe/src/tagging/sync/sync_sender_tagging_indexes.ts new file mode 100644 index 000000000000..9579f65caa73 --- /dev/null +++ b/yarn-project/pxe/src/tagging/sync/sync_sender_tagging_indexes.ts @@ -0,0 +1,112 @@ +import type { AztecAddress } from '@aztec/stdlib/aztec-address'; +import type { AztecNode } from '@aztec/stdlib/interfaces/server'; +import type { DirectionalAppTaggingSecret } from '@aztec/stdlib/logs'; + +import type { SenderTaggingDataProvider } from '../../storage/tagging_data_provider/sender_tagging_data_provider.js'; +import { getStatusChangeOfPending } from './utils/get_status_change_of_pending.js'; +import { loadAndStoreNewTaggingIndexes } from './utils/load_and_store_new_tagging_indexes.js'; + +// This window has to be as large as the largest expected number of logs emitted in a tx for a given directional app +// tagging secret. If we get more tag indexes consumed than this window, an error is thrown in `PXE::proveTx` function. +// This is set to a larger value than MAX_PRIVATE_LOGS_PER_TX (currently 64) because there could be more than +// MAX_PRIVATE_LOGS_PER_TX indexes consumed in case the logs are squashed. This happens when the log contains a note +// and the note is nullified in the same tx. +// +// Rationale for value 95: +// - The `e2e_pending_note_hashes_contract` test's "Should handle overflowing the kernel data structures in nested +// calls" test case hits 95 tagging indexes emitted in a single transaction. This test creates and nullifies many +// notes recursively to test kernel reset circuit behavior, which causes logs to be squashed but still consume +// tagging indexes during the sync process. Since this is testing MAX_PRIVATE_LOGS_PER_TX overflow we can be +// reasonably certain that this value is large enough for standard use cases. +// - This value is below MAX_RPC_LEN (100) which is the limit for array parameters in the JSON RPC schema for +// `getLogsByTags`. Any test that would perform sync over JSON RPC (not by having access to the Aztec node instance +// directly) would error out if that maximum was hit (docs_examples.test.ts is an example of this). +export const WINDOW_LEN = 95; + +/** + * Syncs tagging indexes. This function needs to be called whenever a private log is being sent. + * + * @param secret - The secret that's unique for (sender, recipient, contract) tuple while the direction of + * sender -> recipient matters. + * @param app - The address of the contract that the logs are tagged for. Needs to be provided because we perform + * second round of siloing in this function which is necessary because kernels do it as well (they silo first field + * of the private log which corresponds to the tag). + * @remarks When syncing the indexes as sender we don't care about the log contents - we only care about the highest + * pending and highest finalized indexes as that guides the next index choice when sending a log. The next index choice + * is simply the highest pending index plus one (or finalized if pending is undefined). + * @dev This function looks for new indexes, adds them to pending, then it checks status of each pending index and + * updates its status accordingly. + */ +export async function syncSenderTaggingIndexes( + secret: DirectionalAppTaggingSecret, + app: AztecAddress, + aztecNode: AztecNode, + taggingDataProvider: SenderTaggingDataProvider, +): Promise { + // # Explanation of how syncing works + // + // When choosing an index, we select: highest pending index + 1 (or highest finalized index + 1 if no pending). + // If the chosen index is more than WINDOW_LEN from the highest finalized index, we throw an error. By having this + // hard limit we give a guarantee to a recipient that he doesn't need to look further than WINDOW_LEN ahead of the + // highest finalized index. + // + // This function synchronizes the finalized and pending indexes by iteratively querying the node for a window of + // indexes at a time, storing all those indexes as pending, and then checking the status of each pending index to + // update its finalization status accordingly. If we stumble upon a window with no indexes, we stop the loop. + // + // Stopping at that point is safe because of the limit described above - there can never be an index that is more + // than WINDOW_LEN from the highest finalized index. + // + // # Note on performance + // Each window advance requires two queries (logs + tx status). For example, syncing indexes 0–500 with a window of + // 100 takes at least 10 round trips (5 windows × 2 queries). + + const finalizedIndex = await taggingDataProvider.getLastFinalizedIndex(secret); + + let start = finalizedIndex === undefined ? 0 : finalizedIndex + 1; + let end = start + WINDOW_LEN; + + let previousFinalizedIndex = finalizedIndex; + let newFinalizedIndex = undefined; + + while (true) { + // Load and store indexes for the current window. These indexes may already exist in the database if txs using + // them were previously sent from this PXE. Any duplicates are handled by the tagging data provider. + await loadAndStoreNewTaggingIndexes(secret, app, start, end, aztecNode, taggingDataProvider); + + // Retrieve all indexes within the current window from storage and update their status accordingly. + const pendingTxHashes = await taggingDataProvider.getTxHashesOfPendingIndexes(secret, start, end); + if (pendingTxHashes.length === 0) { + break; + } + + const { txHashesToFinalize, txHashesToDrop } = await getStatusChangeOfPending(pendingTxHashes, aztecNode); + + await taggingDataProvider.dropPendingIndexes(txHashesToDrop); + await taggingDataProvider.finalizePendingIndexes(txHashesToFinalize); + + // We check if the finalized index has been updated. + newFinalizedIndex = await taggingDataProvider.getLastFinalizedIndex(secret); + if (previousFinalizedIndex !== newFinalizedIndex) { + // A new finalized index was found, so we'll run the loop again. For example: + // - Previous finalized index: 10 + // - New finalized index: 13 + // - Window length: 10 + // + // In the last iteration, we processed indexes 11-20. To avoid reprocessing the same logs, + // we'll only look at the new indexes 21-23: + // + // Previous window: [11, 12, 13, 14, 15, 16, 17, 18, 19, 20] + // New window: [21, 22, 23] + + const previousEnd = end; + // Add 1 because `end` is exclusive and the known finalized index is not included in the window. + end = newFinalizedIndex! + WINDOW_LEN + 1; + start = previousEnd; + previousFinalizedIndex = newFinalizedIndex; + } else { + // No new finalized index was found, so we don't need to process the next window. + break; + } + } +} diff --git a/yarn-project/pxe/src/tagging/sync/utils/get_status_change_of_pending.test.ts b/yarn-project/pxe/src/tagging/sync/utils/get_status_change_of_pending.test.ts new file mode 100644 index 000000000000..953c12e3a9b0 --- /dev/null +++ b/yarn-project/pxe/src/tagging/sync/utils/get_status_change_of_pending.test.ts @@ -0,0 +1,101 @@ +import { BlockNumber } from '@aztec/foundation/branded-types'; +import type { AztecNode } from '@aztec/stdlib/interfaces/server'; +import { TxHash, TxStatus } from '@aztec/stdlib/tx'; + +import { type MockProxy, mock } from 'jest-mock-extended'; + +import { getStatusChangeOfPending } from './get_status_change_of_pending.js'; + +describe('getStatusChangeOfPending', () => { + let aztecNode: MockProxy; + + beforeEach(() => { + aztecNode = mock(); + }); + + it('handles mixed scenarios with multiple transaction hashes', async () => { + const finalizedBlockNumber = 10; + + const finalizedTxHash = TxHash.random(); + const droppedTxHash = TxHash.random(); + const pendingTxHash = TxHash.random(); + const appLogicRevertedTxHash = TxHash.random(); + const teardownRevertedTxHash = TxHash.random(); + const bothRevertedTxHash = TxHash.random(); + + aztecNode.getTxReceipt.mockImplementation((hash: TxHash) => { + if (hash.equals(finalizedTxHash)) { + return Promise.resolve({ + status: TxStatus.SUCCESS, + blockNumber: BlockNumber(finalizedBlockNumber - 1), + } as any); + } else if (hash.equals(droppedTxHash)) { + return Promise.resolve({ + status: TxStatus.DROPPED, + } as any); + } else if (hash.equals(pendingTxHash)) { + return Promise.resolve({ + status: TxStatus.SUCCESS, + blockNumber: BlockNumber(finalizedBlockNumber + 1), + } as any); + } else if (hash.equals(appLogicRevertedTxHash)) { + return Promise.resolve({ + status: TxStatus.APP_LOGIC_REVERTED, + } as any); + } else if (hash.equals(teardownRevertedTxHash)) { + return Promise.resolve({ + status: TxStatus.TEARDOWN_REVERTED, + } as any); + } else if (hash.equals(bothRevertedTxHash)) { + return Promise.resolve({ + status: TxStatus.BOTH_REVERTED, + } as any); + } else { + throw new Error(`Unexpected tx hash: ${hash.toString()}`); + } + }); + + aztecNode.getL2Tips.mockResolvedValue({ + finalized: { number: BlockNumber(finalizedBlockNumber) }, + } as any); + + const result = await getStatusChangeOfPending( + [ + finalizedTxHash, + droppedTxHash, + pendingTxHash, + appLogicRevertedTxHash, + teardownRevertedTxHash, + bothRevertedTxHash, + ], + aztecNode, + ); + + expect(result.txHashesToFinalize).toEqual([finalizedTxHash]); + expect(result.txHashesToDrop).toEqual([ + droppedTxHash, + appLogicRevertedTxHash, + teardownRevertedTxHash, + bothRevertedTxHash, + ]); + }); + + it('returns txHash in txHashesToFinalize when blockNumber equals finalized block number', async () => { + const finalizedBlockNumber = 10; + const txHash = TxHash.random(); + + aztecNode.getTxReceipt.mockResolvedValue({ + status: TxStatus.SUCCESS, + blockNumber: BlockNumber(finalizedBlockNumber), + } as any); + + aztecNode.getL2Tips.mockResolvedValue({ + finalized: { number: BlockNumber(finalizedBlockNumber) }, + } as any); + + const result = await getStatusChangeOfPending([txHash], aztecNode); + + expect(result.txHashesToFinalize).toEqual([txHash]); + expect(result.txHashesToDrop).toEqual([]); + }); +}); diff --git a/yarn-project/pxe/src/tagging/sync/utils/get_status_change_of_pending.ts b/yarn-project/pxe/src/tagging/sync/utils/get_status_change_of_pending.ts new file mode 100644 index 000000000000..6cf934ae1861 --- /dev/null +++ b/yarn-project/pxe/src/tagging/sync/utils/get_status_change_of_pending.ts @@ -0,0 +1,44 @@ +import type { AztecNode } from '@aztec/stdlib/interfaces/server'; +import { TxHash, TxStatus } from '@aztec/stdlib/tx'; + +/** + * Based on receipts obtained from `aztecNode` returns which pending transactions changed their status to finalized or + * dropped. + */ +export async function getStatusChangeOfPending( + pending: TxHash[], + aztecNode: AztecNode, +): Promise<{ txHashesToFinalize: TxHash[]; txHashesToDrop: TxHash[] }> { + // Get receipts for all pending tx hashes and the finalized block number. + const [receipts, { finalized }] = await Promise.all([ + Promise.all(pending.map(pendingTxHash => aztecNode.getTxReceipt(pendingTxHash))), + aztecNode.getL2Tips(), + ]); + + const txHashesToFinalize: TxHash[] = []; + const txHashesToDrop: TxHash[] = []; + + for (let i = 0; i < receipts.length; i++) { + const receipt = receipts[i]; + const txHash = pending[i]; + + if (receipt.status === TxStatus.SUCCESS && receipt.blockNumber && receipt.blockNumber <= finalized.number) { + // Tx has been included in a block and the corresponding block is finalized --> we mark the indexes as + // finalized. + txHashesToFinalize.push(txHash); + } else if ( + receipt.status === TxStatus.DROPPED || + receipt.status === TxStatus.APP_LOGIC_REVERTED || + receipt.status === TxStatus.TEARDOWN_REVERTED || + receipt.status === TxStatus.BOTH_REVERTED + ) { + // Tx was dropped or reverted --> we drop the corresponding pending indexes. + // TODO(#17615): Don't drop pending indexes corresponding to non-revertible phases. + txHashesToDrop.push(txHash); + } else { + // Tx is still pending or the corresponding block is not yet finalized --> we don't do anything. + } + } + + return { txHashesToFinalize, txHashesToDrop }; +} diff --git a/yarn-project/pxe/src/tagging/sync/utils/load_and_store_new_tagging_indexes.test.ts b/yarn-project/pxe/src/tagging/sync/utils/load_and_store_new_tagging_indexes.test.ts new file mode 100644 index 000000000000..b740c752abba --- /dev/null +++ b/yarn-project/pxe/src/tagging/sync/utils/load_and_store_new_tagging_indexes.test.ts @@ -0,0 +1,275 @@ +import { BlockNumber } from '@aztec/foundation/branded-types'; +import { Fr } from '@aztec/foundation/curves/bn254'; +import { openTmpStore } from '@aztec/kv-store/lmdb-v2'; +import { AztecAddress } from '@aztec/stdlib/aztec-address'; +import { L2BlockHash } from '@aztec/stdlib/block'; +import type { AztecNode } from '@aztec/stdlib/interfaces/server'; +import { DirectionalAppTaggingSecret, PrivateLog, TxScopedL2Log } from '@aztec/stdlib/logs'; +import { TxHash } from '@aztec/stdlib/tx'; + +import { type MockProxy, mock } from 'jest-mock-extended'; + +import { SenderTaggingDataProvider } from '../../../storage/tagging_data_provider/sender_tagging_data_provider.js'; +import { SiloedTag } from '../../siloed_tag.js'; +import { Tag } from '../../tag.js'; +import { loadAndStoreNewTaggingIndexes } from './load_and_store_new_tagging_indexes.js'; + +describe('loadAndStoreNewTaggingIndexes', () => { + // App contract address and secret to be used on the input of the loadAndStoreNewTaggingIndexes function. + let secret: DirectionalAppTaggingSecret; + let app: AztecAddress; + + let aztecNode: MockProxy; + let taggingDataProvider: SenderTaggingDataProvider; + + async function computeSiloedTagForIndex(index: number) { + const tag = await Tag.compute({ secret, index }); + return SiloedTag.compute(tag, app); + } + + function makeLog(txHash: TxHash, tag: Fr) { + return new TxScopedL2Log(txHash, 0, 0, BlockNumber(0), L2BlockHash.random(), PrivateLog.random(tag)); + } + + beforeAll(async () => { + secret = DirectionalAppTaggingSecret.fromString(Fr.random().toString()); + app = await AztecAddress.random(); + aztecNode = mock(); + }); + + // Unlike for secret, app address and aztecNode we need a fresh instance of the tagging data provider for each test. + beforeEach(async () => { + aztecNode.getLogsByTags.mockReset(); + taggingDataProvider = new SenderTaggingDataProvider(await openTmpStore('test')); + }); + + it('no logs found for the given window', async () => { + aztecNode.getLogsByTags.mockImplementation((tags: Fr[]) => { + // No log found for any tag + return Promise.resolve(tags.map((_tag: Fr) => [])); + }); + + await loadAndStoreNewTaggingIndexes(secret, app, 0, 10, aztecNode, taggingDataProvider); + + // Verify that no pending indexes were stored + expect(await taggingDataProvider.getLastUsedIndex(secret)).toBeUndefined(); + expect(await taggingDataProvider.getLastFinalizedIndex(secret)).toBeUndefined(); + + // Verify the entire window has no pending tx hashes + const txHashesInWindow = await taggingDataProvider.getTxHashesOfPendingIndexes(secret, 0, 10); + expect(txHashesInWindow).toHaveLength(0); + }); + + it('single log found at a specific index', async () => { + const txHash = TxHash.random(); + const index = 5; + const tag = await computeSiloedTagForIndex(index); + + aztecNode.getLogsByTags.mockImplementation((tags: Fr[]) => { + return Promise.resolve(tags.map((t: Fr) => (t.equals(tag.value) ? [makeLog(txHash, tag.value)] : []))); + }); + + await loadAndStoreNewTaggingIndexes(secret, app, 0, 10, aztecNode, taggingDataProvider); + + // Verify that the pending index was stored for this txHash + const txHashesInRange = await taggingDataProvider.getTxHashesOfPendingIndexes(secret, index, index + 1); + expect(txHashesInRange).toHaveLength(1); + expect(txHashesInRange[0].equals(txHash)).toBe(true); + + // Verify the last used index is correct + expect(await taggingDataProvider.getLastUsedIndex(secret)).toBe(index); + }); + + it('for multiple logs with same txHash stores the highest index', async () => { + const txHash = TxHash.random(); + const index1 = 3; + const index2 = 7; + const tag1 = await computeSiloedTagForIndex(index1); + const tag2 = await computeSiloedTagForIndex(index2); + + aztecNode.getLogsByTags.mockImplementation((tags: Fr[]) => { + return Promise.resolve( + tags.map((t: Fr) => { + if (t.equals(tag1.value)) { + return [makeLog(txHash, tag1.value)]; + } else if (t.equals(tag2.value)) { + return [makeLog(txHash, tag2.value)]; + } + return []; + }), + ); + }); + + await loadAndStoreNewTaggingIndexes(secret, app, 0, 10, aztecNode, taggingDataProvider); + + // Verify that only the highest index (7) was stored for this txHash and secret + const txHashesAtIndex2 = await taggingDataProvider.getTxHashesOfPendingIndexes(secret, index2, index2 + 1); + expect(txHashesAtIndex2).toHaveLength(1); + expect(txHashesAtIndex2[0].equals(txHash)).toBe(true); + + // Verify the lower index is not stored separately + const txHashesAtIndex1 = await taggingDataProvider.getTxHashesOfPendingIndexes(secret, index1, index1 + 1); + expect(txHashesAtIndex1).toHaveLength(0); + + // Verify the last used index is the highest + expect(await taggingDataProvider.getLastUsedIndex(secret)).toBe(index2); + }); + + it('multiple logs with different txHashes', async () => { + const txHash1 = TxHash.random(); + const txHash2 = TxHash.random(); + const index1 = 2; + const index2 = 6; + const tag1 = await computeSiloedTagForIndex(index1); + const tag2 = await computeSiloedTagForIndex(index2); + + aztecNode.getLogsByTags.mockImplementation((tags: Fr[]) => { + return Promise.resolve( + tags.map((t: Fr) => { + if (t.equals(tag1.value)) { + return [makeLog(txHash1, tag1.value)]; + } else if (t.equals(tag2.value)) { + return [makeLog(txHash2, tag2.value)]; + } + return []; + }), + ); + }); + + await loadAndStoreNewTaggingIndexes(secret, app, 0, 10, aztecNode, taggingDataProvider); + + // Verify that both txHashes have their respective indexes stored + const txHashesAtIndex1 = await taggingDataProvider.getTxHashesOfPendingIndexes(secret, index1, index1 + 1); + expect(txHashesAtIndex1).toHaveLength(1); + expect(txHashesAtIndex1[0].equals(txHash1)).toBe(true); + + const txHashesAtIndex2 = await taggingDataProvider.getTxHashesOfPendingIndexes(secret, index2, index2 + 1); + expect(txHashesAtIndex2).toHaveLength(1); + expect(txHashesAtIndex2[0].equals(txHash2)).toBe(true); + + // Verify the last used index is the highest + expect(await taggingDataProvider.getLastUsedIndex(secret)).toBe(index2); + }); + + // Expected to happen if sending logs from multiple PXEs at a similar time. + it('multiple logs at the same index', async () => { + const txHash1 = TxHash.random(); + const txHash2 = TxHash.random(); + const index = 4; + const tag = await computeSiloedTagForIndex(index); + + aztecNode.getLogsByTags.mockImplementation((tags: Fr[]) => { + return Promise.resolve( + tags.map((t: Fr) => (t.equals(tag.value) ? [makeLog(txHash1, tag.value), makeLog(txHash2, tag.value)] : [])), + ); + }); + + await loadAndStoreNewTaggingIndexes(secret, app, 0, 10, aztecNode, taggingDataProvider); + + // Verify that both txHashes have the same index stored + const txHashesAtIndex = await taggingDataProvider.getTxHashesOfPendingIndexes(secret, index, index + 1); + expect(txHashesAtIndex).toHaveLength(2); + const txHashStrings = txHashesAtIndex.map(h => h.toString()); + expect(txHashStrings).toContain(txHash1.toString()); + expect(txHashStrings).toContain(txHash2.toString()); + + // Verify the last used index is correct + expect(await taggingDataProvider.getLastUsedIndex(secret)).toBe(index); + }); + + it('complex scenario: multiple txHashes with multiple indexes', async () => { + const txHash1 = TxHash.random(); + const txHash2 = TxHash.random(); + const txHash3 = TxHash.random(); + + // txHash1 has logs at index 1 and 8 (should store 8) + // txHash2 has logs at index 3 and 5 (should store 5) + // txHash3 has a log at index 9 (should store 9) + const tag1 = await computeSiloedTagForIndex(1); + const tag3 = await computeSiloedTagForIndex(3); + const tag5 = await computeSiloedTagForIndex(5); + const tag8 = await computeSiloedTagForIndex(8); + const tag9 = await computeSiloedTagForIndex(9); + + aztecNode.getLogsByTags.mockImplementation((tags: Fr[]) => { + return Promise.resolve( + tags.map((t: Fr) => { + if (t.equals(tag1.value)) { + return [makeLog(txHash1, tag1.value)]; + } else if (t.equals(tag3.value)) { + return [makeLog(txHash2, tag3.value)]; + } else if (t.equals(tag5.value)) { + return [makeLog(txHash2, tag5.value)]; + } else if (t.equals(tag8.value)) { + return [makeLog(txHash1, tag8.value)]; + } else if (t.equals(tag9.value)) { + return [makeLog(txHash3, tag9.value)]; + } + return []; + }), + ); + }); + + await loadAndStoreNewTaggingIndexes(secret, app, 0, 10, aztecNode, taggingDataProvider); + + // Verify txHash1 has highest index 8 (should not be at index 1) + const txHashesAtIndex1 = await taggingDataProvider.getTxHashesOfPendingIndexes(secret, 1, 2); + expect(txHashesAtIndex1).toHaveLength(0); + const txHashesAtIndex8 = await taggingDataProvider.getTxHashesOfPendingIndexes(secret, 8, 9); + expect(txHashesAtIndex8).toHaveLength(1); + expect(txHashesAtIndex8[0].equals(txHash1)).toBe(true); + + // Verify txHash2 has highest index 5 (should not be at index 3) + const txHashesAtIndex3 = await taggingDataProvider.getTxHashesOfPendingIndexes(secret, 3, 4); + expect(txHashesAtIndex3).toHaveLength(0); + const txHashesAtIndex5 = await taggingDataProvider.getTxHashesOfPendingIndexes(secret, 5, 6); + expect(txHashesAtIndex5).toHaveLength(1); + expect(txHashesAtIndex5[0].equals(txHash2)).toBe(true); + + // Verify txHash3 has index 9 + const txHashesAtIndex9 = await taggingDataProvider.getTxHashesOfPendingIndexes(secret, 9, 10); + expect(txHashesAtIndex9).toHaveLength(1); + expect(txHashesAtIndex9[0].equals(txHash3)).toBe(true); + + // Verify the last used index is the highest + expect(await taggingDataProvider.getLastUsedIndex(secret)).toBe(9); + }); + + it('start is inclusive and end is exclusive', async () => { + const start = 5; + const end = 10; + + const txHashAtStart = TxHash.random(); + const txHashAtEnd = TxHash.random(); + + const tagAtStart = await computeSiloedTagForIndex(start); + const tagAtEnd = await computeSiloedTagForIndex(end); + + aztecNode.getLogsByTags.mockImplementation((tags: Fr[]) => { + return Promise.resolve( + tags.map((t: Fr) => { + if (t.equals(tagAtStart.value)) { + return [makeLog(txHashAtStart, tagAtStart.value)]; + } else if (t.equals(tagAtEnd.value)) { + return [makeLog(txHashAtEnd, tagAtEnd.value)]; + } + return []; + }), + ); + }); + + await loadAndStoreNewTaggingIndexes(secret, app, start, end, aztecNode, taggingDataProvider); + + // Verify that the log at start (inclusive) was processed + const txHashesAtStart = await taggingDataProvider.getTxHashesOfPendingIndexes(secret, start, start + 1); + expect(txHashesAtStart).toHaveLength(1); + expect(txHashesAtStart[0].equals(txHashAtStart)).toBe(true); + + // Verify that the log at end (exclusive) was NOT processed + const txHashesAtEnd = await taggingDataProvider.getTxHashesOfPendingIndexes(secret, end, end + 1); + expect(txHashesAtEnd).toHaveLength(0); + + // Verify the last used index is the start index (since end was not processed) + expect(await taggingDataProvider.getLastUsedIndex(secret)).toBe(start); + }); +}); diff --git a/yarn-project/pxe/src/tagging/sync/utils/load_and_store_new_tagging_indexes.ts b/yarn-project/pxe/src/tagging/sync/utils/load_and_store_new_tagging_indexes.ts new file mode 100644 index 000000000000..a3229a44c99d --- /dev/null +++ b/yarn-project/pxe/src/tagging/sync/utils/load_and_store_new_tagging_indexes.ts @@ -0,0 +1,74 @@ +import type { AztecAddress } from '@aztec/stdlib/aztec-address'; +import type { AztecNode } from '@aztec/stdlib/interfaces/server'; +import type { DirectionalAppTaggingSecret, PreTag } from '@aztec/stdlib/logs'; +import { TxHash } from '@aztec/stdlib/tx'; + +import type { SenderTaggingDataProvider } from '../../../storage/tagging_data_provider/sender_tagging_data_provider.js'; +import { SiloedTag } from '../../siloed_tag.js'; +import { Tag } from '../../tag.js'; + +/** + * Loads tagging indexes from the Aztec node and stores them in the tagging data provider. + * @remarks This function is one of two places by which a pending index can get to the tagging data provider. The other + * place is when a tx is being sent from this PXE. + * @param secret - The directional app tagging secret that's unique for (sender, recipient, contract) tuple. + * @param app - The address of the contract that the logs are tagged for. Used for siloing tags to match + * kernel circuit behavior. + * @param start - The starting index (inclusive) of the window to process. + * @param end - The ending index (exclusive) of the window to process. + * @param aztecNode - The Aztec node instance to query for logs. + * @param taggingDataProvider - The data provider to store pending indexes. + */ +export async function loadAndStoreNewTaggingIndexes( + secret: DirectionalAppTaggingSecret, + app: AztecAddress, + start: number, + end: number, + aztecNode: AztecNode, + taggingDataProvider: SenderTaggingDataProvider, +) { + // We compute the tags for the current window of indexes + const preTagsForWindow: PreTag[] = Array(end - start) + .fill(0) + .map((_, i) => ({ secret, index: start + i })); + const siloedTagsForWindow = await Promise.all( + preTagsForWindow.map(async preTag => SiloedTag.compute(await Tag.compute(preTag), app)), + ); + + const txsForTags = await getTxsContainingTags(siloedTagsForWindow, aztecNode); + const highestIndexMap = getTxHighestIndexMap(txsForTags, preTagsForWindow); + + // Now we iterate over the map, reconstruct the preTags and tx hash and store them in the db. + for (const [txHashStr, highestIndex] of highestIndexMap.entries()) { + const txHash = TxHash.fromString(txHashStr); + await taggingDataProvider.storePendingIndexes([{ secret, index: highestIndex }], txHash); + } +} + +// Returns txs that used the given tags. A tag might have been used in multiple txs and for this reason we return +// an array for each tag. +async function getTxsContainingTags(tags: SiloedTag[], aztecNode: AztecNode): Promise { + const tagsAsFr = tags.map(tag => tag.value); + const allLogs = await aztecNode.getLogsByTags(tagsAsFr); + return allLogs.map(logs => logs.filter(log => !log.isFromPublic).map(log => log.txHash)); +} + +// Returns a map of txHash to the highest index for that txHash. +function getTxHighestIndexMap(txHashesForTags: TxHash[][], preTagsForWindow: PreTag[]): Map { + if (txHashesForTags.length !== preTagsForWindow.length) { + throw new Error( + `Number of tx hashes arrays does not match number of pre-tags. ${txHashesForTags.length} !== ${preTagsForWindow.length}`, + ); + } + + const highestIndexMap = new Map(); + for (let i = 0; i < txHashesForTags.length; i++) { + const taggingIndex = preTagsForWindow[i].index; + const txHashesForTag = txHashesForTags[i]; + for (const txHash of txHashesForTag) { + const key = txHash.toString(); + highestIndexMap.set(key, Math.max(highestIndexMap.get(key) ?? 0, taggingIndex)); + } + } + return highestIndexMap; +} diff --git a/yarn-project/pxe/src/tagging/utils.ts b/yarn-project/pxe/src/tagging/utils.ts index bcc5d340289e..1b3b7f3eb22f 100644 --- a/yarn-project/pxe/src/tagging/utils.ts +++ b/yarn-project/pxe/src/tagging/utils.ts @@ -1,6 +1,7 @@ import type { DirectionalAppTaggingSecret, PreTag } from '@aztec/stdlib/logs'; -// TODO(benesjan): Make this return tags instead - this will moves some complexity from syncTaggedLogs +// TODO(#17775): If this does not get dropped when implementing the linked issue make this return tags instead. This +// will move some complexity from syncTaggedLogs to here. export function getPreTagsForTheWindow( secretsAndWindows: { secret: DirectionalAppTaggingSecret; leftMostIndex: number; rightMostIndex: number }[], ): PreTag[] { @@ -15,7 +16,7 @@ export function getPreTagsForTheWindow( /** * Creates a map from directional app tagging secret to initial index. - * @param preTags - The pre tags to get the initial indexes map from. + * @param preTags - The pre-tags to get the initial indexes map from. * @returns The map from directional app tagging secret to initial index. */ export function getInitialIndexesMap(preTags: { secret: DirectionalAppTaggingSecret; index: number | undefined }[]): { diff --git a/yarn-project/stdlib/src/tx/private_execution_result.ts b/yarn-project/stdlib/src/tx/private_execution_result.ts index 5adf939acc98..cc859bcf4758 100644 --- a/yarn-project/stdlib/src/tx/private_execution_result.ts +++ b/yarn-project/stdlib/src/tx/private_execution_result.ts @@ -139,7 +139,7 @@ export class PrivateCallExecutionResult { public returnValues: Fr[], /** The offchain effects emitted during execution of this function call via the `emit_offchain_effect` oracle. */ public offchainEffects: { data: Fr[] }[], - /** The pre tags used in this tx to compute tags for private logs */ + /** The pre-tags used in this tx to compute tags for private logs */ public preTags: PreTag[], /** The nested executions. */ public nestedExecutionResults: PrivateCallExecutionResult[], diff --git a/yarn-project/txe/src/state_machine/archiver.ts b/yarn-project/txe/src/state_machine/archiver.ts index 05a1da4153d8..7a465a345050 100644 --- a/yarn-project/txe/src/state_machine/archiver.ts +++ b/yarn-project/txe/src/state_machine/archiver.ts @@ -9,12 +9,13 @@ import type { AztecAddress } from '@aztec/stdlib/aztec-address'; import { CommitteeAttestation, L2Block, + type L2BlockId, type L2BlockSource, type L2Tips, PublishedL2Block, type ValidateBlockResult, } from '@aztec/stdlib/block'; -import { Checkpoint, type PublishedCheckpoint } from '@aztec/stdlib/checkpoint'; +import { Checkpoint, PublishedCheckpoint } from '@aztec/stdlib/checkpoint'; import type { ContractInstanceWithAddress } from '@aztec/stdlib/contract'; import type { L1RollupConstants } from '@aztec/stdlib/epoch-helpers'; import type { BlockHeader } from '@aztec/stdlib/tx'; @@ -187,8 +188,21 @@ export class TXEArchiver extends ArchiverStoreHelper implements L2BlockSource { throw new Error('TXE Archiver does not implement "isEpochComplete"'); } - public getL2Tips(): Promise { - throw new Error('TXE Archiver does not implement "getL2Tips"'); + public async getL2Tips(): Promise { + // In TXE there is no possibility of reorgs and no blocks are ever getting proven so we just set 'latest', 'proven' + // and 'finalized' to the latest block. + const blockHeader = await this.getBlockHeader('latest'); + if (!blockHeader) { + throw new Error('L2Tips requested from TXE Archiver but no block header found'); + } + + const number = blockHeader.globalVariables.blockNumber; + const hash = (await blockHeader.hash()).toString(); + return { + latest: { number, hash } as L2BlockId, + proven: { number, hash } as L2BlockId, + finalized: { number, hash } as L2BlockId, + }; } public getL1Constants(): Promise { diff --git a/yarn-project/txe/src/state_machine/dummy_p2p_client.ts b/yarn-project/txe/src/state_machine/dummy_p2p_client.ts index 33bc616cf593..ff4c72e73c23 100644 --- a/yarn-project/txe/src/state_machine/dummy_p2p_client.ts +++ b/yarn-project/txe/src/state_machine/dummy_p2p_client.ts @@ -79,7 +79,9 @@ export class DummyP2P implements P2P { } public getTxStatus(_txHash: TxHash): Promise<'pending' | 'mined' | undefined> { - throw new Error('DummyP2P does not implement "getTxStatus"'); + // In TXE there is no concept of transactions but we need to implement this because of tagging. We return 'mined' + // tx status for any tx hash. + return Promise.resolve('mined'); } public iteratePendingTxs(): AsyncIterableIterator { diff --git a/yarn-project/txe/src/txe_session.ts b/yarn-project/txe/src/txe_session.ts index 1e84ade1a9ac..386dc076d3f6 100644 --- a/yarn-project/txe/src/txe_session.ts +++ b/yarn-project/txe/src/txe_session.ts @@ -10,7 +10,8 @@ import { NoteDataProvider, PXEOracleInterface, PrivateEventDataProvider, - TaggingDataProvider, + RecipientTaggingDataProvider, + SenderTaggingDataProvider, } from '@aztec/pxe/server'; import { ExecutionNoteCache, @@ -136,7 +137,8 @@ export class TXESession implements TXESessionStateHandler { const privateEventDataProvider = new PrivateEventDataProvider(store); const contractDataProvider = new TXEContractDataProvider(store); const noteDataProvider = await NoteDataProvider.create(store); - const taggingDataProvider = new TaggingDataProvider(store); + const senderTaggingDataProvider = new SenderTaggingDataProvider(store); + const recipientTaggingDataProvider = new RecipientTaggingDataProvider(store); const capsuleDataProvider = new CapsuleDataProvider(store); const keyStore = new KeyStore(store); const accountDataProvider = new TXEAccountDataProvider(store); @@ -160,7 +162,8 @@ export class TXESession implements TXESessionStateHandler { noteDataProvider, capsuleDataProvider, stateMachine.anchorBlockDataProvider, - taggingDataProvider, + senderTaggingDataProvider, + recipientTaggingDataProvider, addressDataProvider, privateEventDataProvider, );