Skip to content

Commit

Permalink
feat(avm): keep history of reads and writes in journal (#4315)
Browse files Browse the repository at this point in the history
fixes: #3999
  • Loading branch information
Maddiaa0 committed Jan 31, 2024
1 parent 82f5f03 commit cdf1baf
Show file tree
Hide file tree
Showing 3 changed files with 137 additions and 30 deletions.
38 changes: 31 additions & 7 deletions yarn-project/acir-simulator/src/avm/journal/journal.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ describe('journal', () => {
journal.writeStorage(contractAddress, key, value);

const journalUpdates: JournalData = journal.flush();
expect(journalUpdates.storageWrites.get(contractAddress.toBigInt())?.get(key.toBigInt())).toEqual(value);
expect(journalUpdates.currentStorageValue.get(contractAddress.toBigInt())?.get(key.toBigInt())).toEqual(value);
});

it('When reading from storage, should check the parent first', async () => {
Expand Down Expand Up @@ -61,7 +61,7 @@ describe('journal', () => {
expect(cachedResult).toEqual(cachedValue);
});

it('When reading from storage, should check the cache first', async () => {
it('When reading from storage, should check the cache first, and be appended to read/write journal', async () => {
// Store a different value in storage vs the cache, and make sure the cache is returned
const contractAddress = new Fr(1);
const key = new Fr(2);
Expand All @@ -80,6 +80,16 @@ describe('journal', () => {
// Get the storage value
const cachedResult = await journal.readStorage(contractAddress, key);
expect(cachedResult).toEqual(cachedValue);

// We expect the journal to store the access in [storedVal, cachedVal] - [time0, time1]
const { storageReads, storageWrites }: JournalData = journal.flush();
const contractReads = storageReads.get(contractAddress.toBigInt());
const keyReads = contractReads?.get(key.toBigInt());
expect(keyReads).toEqual([storedValue, cachedValue]);

const contractWrites = storageWrites.get(contractAddress.toBigInt());
const keyWrites = contractWrites?.get(key.toBigInt());
expect(keyWrites).toEqual([cachedValue]);
});
});

Expand Down Expand Up @@ -127,17 +137,19 @@ describe('journal', () => {
const logsT1 = [new Fr(3), new Fr(4)];

journal.writeStorage(contractAddress, key, value);
await journal.readStorage(contractAddress, key);
journal.writeNoteHash(commitment);
journal.writeLog(logs);
journal.writeL1Message(logs);
journal.writeNullifier(commitment);

const journal1 = new AvmJournal(journal.hostStorage, journal);
journal.writeStorage(contractAddress, key, valueT1);
journal.writeNoteHash(commitmentT1);
journal.writeLog(logsT1);
journal.writeL1Message(logsT1);
journal.writeNullifier(commitmentT1);
journal1.writeStorage(contractAddress, key, valueT1);
await journal1.readStorage(contractAddress, key);
journal1.writeNoteHash(commitmentT1);
journal1.writeLog(logsT1);
journal1.writeL1Message(logsT1);
journal1.writeNullifier(commitmentT1);

journal1.mergeWithParent();

Expand All @@ -147,6 +159,18 @@ describe('journal', () => {

// Check that the UTXOs are merged
const journalUpdates: JournalData = journal.flush();

// Check storage reads order is preserved upon merge
// We first read value from t0, then value from t1
const contractReads = journalUpdates.storageReads.get(contractAddress.toBigInt());
const slotReads = contractReads?.get(key.toBigInt());
expect(slotReads).toEqual([value, valueT1]);

// We first write value from t0, then value from t1
const contractWrites = journalUpdates.storageReads.get(contractAddress.toBigInt());
const slotWrites = contractWrites?.get(key.toBigInt());
expect(slotWrites).toEqual([value, valueT1]);

expect(journalUpdates.newNoteHashes).toEqual([commitment, commitmentT1]);
expect(journalUpdates.newLogs).toEqual([logs, logsT1]);
expect(journalUpdates.newL1Messages).toEqual([logs, logsT1]);
Expand Down
123 changes: 103 additions & 20 deletions yarn-project/acir-simulator/src/avm/journal/journal.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,14 @@ export type JournalData = {
newNullifiers: Fr[];
newL1Messages: Fr[][];
newLogs: Fr[][];

/** contract address -\> key -\> value */
storageWrites: Map<bigint, Map<bigint, Fr>>;
currentStorageValue: Map<bigint, Map<bigint, Fr>>;

/** contract address -\> key -\> value[] (stored in order of access) */
storageWrites: Map<bigint, Map<bigint, Fr[]>>;
/** contract address -\> key -\> value[] (stored in order of access) */
storageReads: Map<bigint, Map<bigint, Fr[]>>;
};

/**
Expand All @@ -28,9 +34,9 @@ export class AvmJournal {
public readonly hostStorage: HostStorage;

// Reading state - must be tracked for vm execution
// contract address -> key -> value
// TODO(https://github.com/AztecProtocol/aztec-packages/issues/3999)
private storageReads: Map<bigint, Map<bigint, Fr>> = new Map();
// contract address -> key -> value[] (array stored in order of reads)
private storageReads: Map<bigint, Map<bigint, Fr[]>> = new Map();
private storageWrites: Map<bigint, Map<bigint, Fr[]>> = new Map();

// New written state
private newNoteHashes: Fr[] = [];
Expand All @@ -41,7 +47,7 @@ export class AvmJournal {
private newLogs: Fr[][] = [];

// contract address -> key -> value
private storageWrites: Map<bigint, Map<bigint, Fr>> = new Map();
private currentStorageValue: Map<bigint, Map<bigint, Fr>> = new Map();

private parentJournal: AvmJournal | undefined;

Expand Down Expand Up @@ -74,12 +80,15 @@ export class AvmJournal {
* @param value -
*/
public writeStorage(contractAddress: Fr, key: Fr, value: Fr) {
let contractMap = this.storageWrites.get(contractAddress.toBigInt());
let contractMap = this.currentStorageValue.get(contractAddress.toBigInt());
if (!contractMap) {
contractMap = new Map();
this.storageWrites.set(contractAddress.toBigInt(), contractMap);
this.currentStorageValue.set(contractAddress.toBigInt(), contractMap);
}
contractMap.set(key.toBigInt(), value);

// We want to keep track of all performed writes in the journal
this.journalWrite(contractAddress, key, value);
}

/**
Expand All @@ -90,17 +99,52 @@ export class AvmJournal {
* @param key -
* @returns current value
*/
public readStorage(contractAddress: Fr, key: Fr): Promise<Fr> {
const cachedValue = this.storageWrites.get(contractAddress.toBigInt())?.get(key.toBigInt());
if (cachedValue) {
return Promise.resolve(cachedValue);
public async readStorage(contractAddress: Fr, key: Fr): Promise<Fr> {
// - We first try this journal's storage cache ( if written to before in this call frame )
// - Then we try the parent journal's storage cache ( if it exists ) ( written to earlier in this block )
// - Finally we try the host storage ( a trip to the database )

// Do not early return as we want to keep track of reads in this.storageReads
let value = this.currentStorageValue.get(contractAddress.toBigInt())?.get(key.toBigInt());
if (!value && this.parentJournal) {
value = await this.parentJournal?.readStorage(contractAddress, key);
}
if (this.parentJournal) {
return this.parentJournal?.readStorage(contractAddress, key);
if (!value) {
value = await this.hostStorage.publicStateDb.storageRead(contractAddress, key);
}
return this.hostStorage.publicStateDb.storageRead(contractAddress, key);

this.journalRead(contractAddress, key, value);
return Promise.resolve(value);
}

/**
* We want to keep track of all performed reads in the journal
* This information is hinted to the avm circuit
* @param contractAddress -
* @param key -
* @param value -
*/
journalUpdate(map: Map<bigint, Map<bigint, Fr[]>>, contractAddress: Fr, key: Fr, value: Fr): void {
let contractMap = map.get(contractAddress.toBigInt());
if (!contractMap) {
contractMap = new Map<bigint, Array<Fr>>();
map.set(contractAddress.toBigInt(), contractMap);
}

let accessArray = contractMap.get(key.toBigInt());
if (!accessArray) {
accessArray = new Array<Fr>();
contractMap.set(key.toBigInt(), accessArray);
}
accessArray.push(value);
}

// Create an instance of journalUpdate that appends to the read array
private journalRead = this.journalUpdate.bind(this, this.storageReads);
// Create an instance of journalUpdate that appends to the writes array
private journalWrite = this.journalUpdate.bind(this, this.storageWrites);

public writeNoteHash(noteHash: Fr) {
this.newNoteHashes.push(noteHash);
}
Expand Down Expand Up @@ -131,9 +175,14 @@ export class AvmJournal {
this.parentJournal.newNoteHashes = this.parentJournal.newNoteHashes.concat(this.newNoteHashes);
this.parentJournal.newL1Messages = this.parentJournal.newL1Messages.concat(this.newL1Messages);
this.parentJournal.newNullifiers = this.parentJournal.newNullifiers.concat(this.newNullifiers);
this.parentJournal.newLogs = this.parentJournal.newLogs.concat(this.newLogs);

// Merge Public State
mergeContractMaps(this.parentJournal.storageWrites, this.storageWrites);
mergeCurrentValueMaps(this.parentJournal.currentStorageValue, this.currentStorageValue);

// Merge storage read and write journals
mergeContractJournalMaps(this.parentJournal.storageReads, this.storageReads);
mergeContractJournalMaps(this.parentJournal.storageWrites, this.storageWrites);
}

/**
Expand All @@ -147,38 +196,72 @@ export class AvmJournal {
newNullifiers: this.newNullifiers,
newL1Messages: this.newL1Messages,
newLogs: this.newLogs,
currentStorageValue: this.currentStorageValue,
storageReads: this.storageReads,
storageWrites: this.storageWrites,
};
}
}

/**
* Merges two contract maps together
* Merges two contract current value together
* Where childMap keys will take precedent over the hostMap
* The assumption being that the child map is created at a later time
* And thus contains more up to date information
*
* @param hostMap - The map to be merged into
* @param childMap - The map to be merged from
*/
function mergeContractMaps(hostMap: Map<bigint, Map<bigint, Fr>>, childMap: Map<bigint, Map<bigint, Fr>>) {
function mergeCurrentValueMaps(hostMap: Map<bigint, Map<bigint, Fr>>, childMap: Map<bigint, Map<bigint, Fr>>) {
for (const [key, value] of childMap) {
const map1Value = hostMap.get(key);
if (!map1Value) {
hostMap.set(key, value);
} else {
mergeStorageMaps(map1Value, value);
mergeStorageCurrentValueMaps(map1Value, value);
}
}
}

/**
*
* @param hostMap - The map to be merge into
* @param childMap - The map to be merged from
*/
function mergeStorageMaps(hostMap: Map<bigint, Fr>, childMap: Map<bigint, Fr>) {
function mergeStorageCurrentValueMaps(hostMap: Map<bigint, Fr>, childMap: Map<bigint, Fr>) {
for (const [key, value] of childMap) {
hostMap.set(key, value);
}
}

/**
* Merges two contract journalling maps together
* For read maps, we just append the childMap arrays into the host map arrays, as the order is important
*
* @param hostMap - The map to be merged into
* @param childMap - The map to be merged from
*/
function mergeContractJournalMaps(hostMap: Map<bigint, Map<bigint, Fr[]>>, childMap: Map<bigint, Map<bigint, Fr[]>>) {
for (const [key, value] of childMap) {
const map1Value = hostMap.get(key);
if (!map1Value) {
hostMap.set(key, value);
} else {
mergeStorageJournalMaps(map1Value, value);
}
}
}

/**
* @param hostMap - The map to be merge into
* @param childMap - The map to be merged from
*/
function mergeStorageJournalMaps(hostMap: Map<bigint, Fr[]>, childMap: Map<bigint, Fr[]>) {
for (const [key, value] of childMap) {
const readArr = hostMap.get(key);
if (!readArr) {
hostMap.set(key, value);
} else {
readArr?.concat(value);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -78,10 +78,10 @@ describe('External Calls', () => {
expect(retValue).toEqual([new Field(1n), new Field(2n)]);

// Check that the storage call has been merged into the parent journal
const { storageWrites } = journal.flush();
expect(storageWrites.size).toEqual(1);
const { currentStorageValue } = journal.flush();
expect(currentStorageValue.size).toEqual(1);

const nestedContractWrites = storageWrites.get(addr.toBigInt());
const nestedContractWrites = currentStorageValue.get(addr.toBigInt());
expect(nestedContractWrites).toBeDefined();

const slotNumber = 1n;
Expand Down

0 comments on commit cdf1baf

Please sign in to comment.