Skip to content

Commit

Permalink
Merge branch 'master' into smaller-deps
Browse files Browse the repository at this point in the history
  • Loading branch information
akorchyn committed Jun 18, 2024
2 parents 64a0a46 + 3b3e0f3 commit 6de1313
Show file tree
Hide file tree
Showing 37 changed files with 456 additions and 571 deletions.
23 changes: 21 additions & 2 deletions .github/workflows/near_crates_publish.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
name: Near Crates Publish

on:
release:
types: [released]
workflow_dispatch:
inputs:
branch:
Expand All @@ -19,12 +21,31 @@ jobs:

steps:
- name: Checkout near/nearcore's ${{ github.event.inputs.branch }} branch
if: ${{ github.event_name == 'workflow_dispatch'}}
uses: actions/checkout@v4
with:
fetch-depth: 0
ref: ${{ github.event.inputs.branch }}

- name: Checkout nearcore repository
if: ${{ github.event_name != 'workflow_dispatch'}}
uses: actions/checkout@v4
with:
fetch-depth: 0

- name: Set up git user
uses: fregante/setup-git-user@v2

- name: Check if version is already published
run: |
PACKAGE_NAME="near-primitives"
VERSION=$(cargo metadata --no-deps --format-version 1 | jq -r '.metadata.workspaces.version')
PUBLISHED=$(curl -s https://crates.io/api/v1/crates/$PACKAGE_NAME/versions | jq -r '.versions[] | select(.num=="'"$VERSION"'") | .num')
if [ "$PUBLISHED" == "$VERSION" ]; then
echo "Version $VERSION of $PACKAGE_NAME is already published."
exit 1
fi
- name: Publish near-workspaces on crates.io
env:
CARGO_REGISTRY_TOKEN: ${{ secrets.CARGO_REGISTRY_TOKEN }}
Expand All @@ -40,5 +61,3 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
git push --no-follow-tags https://github.com/near/nearcore.git tag 'crates-*'
2 changes: 2 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ license = "MIT OR Apache-2.0"
# Most crates are not stable on purpose, as maintaining API compatibility is a
# significant developer time expense. Please think thoroughly before adding
# anything to the list of stable crates.
# Only bump 0.x.* to 0.(x+1).0 on any nearcore release as nearcore does not guarantee
# semver compatibility. i.e. api can change without a protocol upgrade.
version = "0.20.1"
exclude = ["neard"]

Expand Down
147 changes: 76 additions & 71 deletions core/store/src/migrations.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ use borsh::{BorshDeserialize, BorshSerialize};
use near_primitives::epoch_manager::epoch_info::EpochSummary;
use near_primitives::epoch_manager::AGGREGATOR_KEY;
use near_primitives::hash::CryptoHash;
use near_primitives::serialize::dec_format;
use near_primitives::state::FlatStateValue;
use near_primitives::transaction::{ExecutionOutcomeWithIdAndProof, ExecutionOutcomeWithProof};
use near_primitives::types::{
Expand Down Expand Up @@ -239,6 +240,65 @@ pub fn migrate_37_to_38(store: &Store) -> anyhow::Result<()> {
Ok(())
}

/// `ValidatorKickoutReason` enum layout before DB version 39, included.
#[derive(BorshSerialize, BorshDeserialize, serde::Deserialize)]
pub enum LegacyValidatorKickoutReasonV39 {
/// Slashed validators are kicked out.
Slashed,
/// Validator didn't produce enough blocks.
NotEnoughBlocks { produced: NumBlocks, expected: NumBlocks },
/// Validator didn't produce enough chunks.
NotEnoughChunks { produced: NumBlocks, expected: NumBlocks },
/// Validator unstaked themselves.
Unstaked,
/// Validator stake is now below threshold
NotEnoughStake {
#[serde(with = "dec_format", rename = "stake_u128")]
stake: Balance,
#[serde(with = "dec_format", rename = "threshold_u128")]
threshold: Balance,
},
/// Enough stake but is not chosen because of seat limits.
DidNotGetASeat,
/// Validator didn't produce enough chunk endorsements.
NotEnoughChunkEndorsements { produced: NumBlocks, expected: NumBlocks },
}

/// `EpochSummary` struct at DB version 39.
#[derive(BorshSerialize, BorshDeserialize)]
struct LegacyEpochSummaryV39 {
pub prev_epoch_last_block_hash: CryptoHash,
/// Proposals from the epoch, only the latest one per account
pub all_proposals: Vec<ValidatorStake>,
/// Kickout set, includes slashed
pub validator_kickout: HashMap<AccountId, LegacyValidatorKickoutReasonV39>,
/// Only for validators who met the threshold and didn't get slashed
pub validator_block_chunk_stats: HashMap<AccountId, BlockChunkValidatorStats>,
/// Protocol version for next epoch.
pub next_next_epoch_version: ProtocolVersion,
}

/// `ValidatorKickoutReason` struct layout before DB version 38, included.
#[derive(BorshDeserialize)]
struct LegacyBlockChunkValidatorStatsV38 {
pub block_stats: ValidatorStats,
pub chunk_stats: ValidatorStats,
}

/// `ValidatorKickoutReason` struct layout before DB version 38, included.
#[derive(BorshDeserialize)]
struct LegacyEpochSummaryV38 {
pub prev_epoch_last_block_hash: CryptoHash,
/// Proposals from the epoch, only the latest one per account
pub all_proposals: Vec<ValidatorStake>,
/// Kickout set, includes slashed
pub validator_kickout: HashMap<AccountId, LegacyValidatorKickoutReasonV39>,
/// Only for validators who met the threshold and didn't get slashed
pub validator_block_chunk_stats: HashMap<AccountId, LegacyBlockChunkValidatorStatsV38>,
/// Protocol version for next epoch.
pub next_version: ProtocolVersion,
}

/// Migrates the database from version 38 to 39.
///
/// Rewrites Epoch summary to include endorsement stats.
Expand All @@ -262,25 +322,6 @@ pub fn migrate_38_to_39(store: &Store) -> anyhow::Result<()> {
type LegacyEpochInfoAggregator = EpochInfoAggregator<ValidatorStats>;
type NewEpochInfoAggregator = EpochInfoAggregator<ChunkStats>;

#[derive(BorshDeserialize)]
struct LegacyBlockChunkValidatorStats {
pub block_stats: ValidatorStats,
pub chunk_stats: ValidatorStats,
}

#[derive(BorshDeserialize)]
struct LegacyEpochSummary {
pub prev_epoch_last_block_hash: CryptoHash,
/// Proposals from the epoch, only the latest one per account
pub all_proposals: Vec<ValidatorStake>,
/// Kickout set, includes slashed
pub validator_kickout: HashMap<AccountId, ValidatorKickoutReason>,
/// Only for validators who met the threshold and didn't get slashed
pub validator_block_chunk_stats: HashMap<AccountId, LegacyBlockChunkValidatorStats>,
/// Protocol version for next epoch.
pub next_version: ProtocolVersion,
}

let mut update = store.store_update();

// Update EpochInfoAggregator
Expand Down Expand Up @@ -316,8 +357,8 @@ pub fn migrate_38_to_39(store: &Store) -> anyhow::Result<()> {
// Update EpochSummary
for result in store.iter(DBCol::EpochValidatorInfo) {
let (key, old_value) = result?;
let legacy_summary = LegacyEpochSummary::try_from_slice(&old_value)?;
let new_value = EpochSummary {
let legacy_summary = LegacyEpochSummaryV38::try_from_slice(&old_value)?;
let new_value = LegacyEpochSummaryV39 {
prev_epoch_last_block_hash: legacy_summary.prev_epoch_last_block_hash,
all_proposals: legacy_summary.all_proposals,
validator_kickout: legacy_summary.validator_kickout,
Expand Down Expand Up @@ -357,63 +398,27 @@ pub fn migrate_39_to_40(store: &Store) -> anyhow::Result<()> {
return Ok(());
}

use near_primitives::serialize::dec_format;
#[derive(BorshDeserialize, serde::Deserialize)]
pub enum LegacyValidatorKickoutReason {
/// Slashed validators are kicked out.
Slashed,
/// Validator didn't produce enough blocks.
NotEnoughBlocks { produced: NumBlocks, expected: NumBlocks },
/// Validator didn't produce enough chunks.
NotEnoughChunks { produced: NumBlocks, expected: NumBlocks },
/// Validator unstaked themselves.
Unstaked,
/// Validator stake is now below threshold
NotEnoughStake {
#[serde(with = "dec_format", rename = "stake_u128")]
stake: Balance,
#[serde(with = "dec_format", rename = "threshold_u128")]
threshold: Balance,
},
/// Enough stake but is not chosen because of seat limits.
DidNotGetASeat,
/// Validator didn't produce enough chunk endorsements.
NotEnoughChunkEndorsements { produced: NumBlocks, expected: NumBlocks },
}

#[derive(BorshDeserialize)]
struct LegacyEpochSummary {
pub prev_epoch_last_block_hash: CryptoHash,
/// Proposals from the epoch, only the latest one per account
pub all_proposals: Vec<ValidatorStake>,
/// Kickout set, includes slashed
pub validator_kickout: HashMap<AccountId, LegacyValidatorKickoutReason>,
/// Only for validators who met the threshold and didn't get slashed
pub validator_block_chunk_stats: HashMap<AccountId, BlockChunkValidatorStats>,
/// Protocol version for next epoch.
pub next_version: ProtocolVersion,
}

impl From<LegacyValidatorKickoutReason> for ValidatorKickoutReason {
fn from(reason: LegacyValidatorKickoutReason) -> Self {
impl From<LegacyValidatorKickoutReasonV39> for ValidatorKickoutReason {
fn from(reason: LegacyValidatorKickoutReasonV39) -> Self {
match reason {
LegacyValidatorKickoutReason::Slashed => ValidatorKickoutReason::Slashed,
LegacyValidatorKickoutReason::NotEnoughBlocks { produced, expected } => {
LegacyValidatorKickoutReasonV39::Slashed => ValidatorKickoutReason::Slashed,
LegacyValidatorKickoutReasonV39::NotEnoughBlocks { produced, expected } => {
ValidatorKickoutReason::NotEnoughBlocks { produced, expected }
}
LegacyValidatorKickoutReason::NotEnoughChunks { produced, expected } => {
LegacyValidatorKickoutReasonV39::NotEnoughChunks { produced, expected } => {
ValidatorKickoutReason::NotEnoughChunks { produced, expected }
}
LegacyValidatorKickoutReason::Unstaked => ValidatorKickoutReason::Unstaked,
LegacyValidatorKickoutReason::NotEnoughStake { stake, threshold } => {
LegacyValidatorKickoutReasonV39::Unstaked => ValidatorKickoutReason::Unstaked,
LegacyValidatorKickoutReasonV39::NotEnoughStake { stake, threshold } => {
ValidatorKickoutReason::NotEnoughStake { stake, threshold }
}
LegacyValidatorKickoutReason::DidNotGetASeat => {
LegacyValidatorKickoutReasonV39::DidNotGetASeat => {
ValidatorKickoutReason::DidNotGetASeat
}
LegacyValidatorKickoutReason::NotEnoughChunkEndorsements { produced, expected } => {
ValidatorKickoutReason::NotEnoughChunkEndorsements { produced, expected }
}
LegacyValidatorKickoutReasonV39::NotEnoughChunkEndorsements {
produced,
expected,
} => ValidatorKickoutReason::NotEnoughChunkEndorsements { produced, expected },
}
}
}
Expand All @@ -422,7 +427,7 @@ pub fn migrate_39_to_40(store: &Store) -> anyhow::Result<()> {
// Update EpochSummary
for result in store.iter(DBCol::EpochValidatorInfo) {
let (key, old_value) = result?;
let legacy_summary = LegacyEpochSummary::try_from_slice(&old_value)?;
let legacy_summary = LegacyEpochSummaryV39::try_from_slice(&old_value)?;
let legacy_validator_kickout = legacy_summary.validator_kickout;
let validator_kickout: HashMap<AccountId, ValidatorKickoutReason> =
legacy_validator_kickout
Expand All @@ -434,7 +439,7 @@ pub fn migrate_39_to_40(store: &Store) -> anyhow::Result<()> {
all_proposals: legacy_summary.all_proposals,
validator_kickout,
validator_block_chunk_stats: legacy_summary.validator_block_chunk_stats,
next_next_epoch_version: legacy_summary.next_version,
next_next_epoch_version: legacy_summary.next_next_epoch_version,
};
update.set(DBCol::EpochValidatorInfo, &key, &borsh::to_vec(&new_value)?);
}
Expand Down
27 changes: 21 additions & 6 deletions core/store/src/trie/accounting_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,22 @@ use near_primitives::errors::StorageError;
use near_primitives::hash::CryptoHash;
use near_primitives::shard_layout::ShardUId;
use std::collections::HashMap;
use std::rc::Rc;
use std::sync::Arc;

/// Switch that controls whether the `TrieAccountingCache` is enabled.
pub struct TrieAccountingCacheSwitch(Rc<std::cell::Cell<bool>>);

impl TrieAccountingCacheSwitch {
pub fn set(&self, enabled: bool) {
self.0.set(enabled);
}

pub fn enabled(&self) -> bool {
self.0.get()
}
}

/// Deterministic cache to store trie nodes that have been accessed so far
/// during the cache's lifetime. It is used for deterministic gas accounting
/// so that previously accessed trie nodes and values are charged at a
Expand Down Expand Up @@ -41,7 +55,7 @@ use std::sync::Arc;
pub struct TrieAccountingCache {
/// Whether the cache is enabled. By default it is not, but it can be
/// turned on or off on the fly.
enable: bool,
enable: TrieAccountingCacheSwitch,
/// Cache of trie node hash -> trie node body, or a leaf value hash ->
/// leaf value.
cache: HashMap<CryptoHash, Arc<[u8]>>,
Expand Down Expand Up @@ -78,11 +92,12 @@ impl TrieAccountingCache {
accounting_cache_size: metrics::CHUNK_CACHE_SIZE.with_label_values(&metrics_labels),
}
});
Self { enable: false, cache: HashMap::new(), db_read_nodes: 0, mem_read_nodes: 0, metrics }
let switch = TrieAccountingCacheSwitch(Default::default());
Self { enable: switch, cache: HashMap::new(), db_read_nodes: 0, mem_read_nodes: 0, metrics }
}

pub fn set_enabled(&mut self, enabled: bool) {
self.enable = enabled;
pub fn enable_switch(&self) -> TrieAccountingCacheSwitch {
TrieAccountingCacheSwitch(Rc::clone(&self.enable.0))
}

/// Retrieve raw bytes from the cache if it exists, otherwise retrieve it
Expand All @@ -105,7 +120,7 @@ impl TrieAccountingCache {
}
let node = storage.retrieve_raw_bytes(hash)?;

if self.enable {
if self.enable.enabled() {
self.cache.insert(*hash, node.clone());
if let Some(metrics) = &self.metrics {
metrics.accounting_cache_size.set(self.cache.len() as i64);
Expand All @@ -123,7 +138,7 @@ impl TrieAccountingCache {
} else {
self.db_read_nodes += 1;
}
if self.enable {
if self.enable.enabled() {
self.cache.insert(hash, data);
if let Some(metrics) = &self.metrics {
metrics.accounting_cache_size.set(self.cache.len() as i64);
Expand Down
3 changes: 2 additions & 1 deletion core/store/src/trie/mem/loading.rs
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,8 @@ mod tests {
&CryptoHash::default(),
false,
));
trie_update.set_trie_cache_mode(near_primitives::types::TrieCacheMode::CachingChunk);
let _mode_guard = trie_update
.with_trie_cache_mode(Some(near_primitives::types::TrieCacheMode::CachingChunk));
let trie = trie_update.trie();
let root = in_memory_trie.get_root(&state_root).unwrap();

Expand Down
10 changes: 5 additions & 5 deletions core/store/src/trie/trie_recording.rs
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,7 @@ mod trie_recording_tests {
// Let's capture the baseline node counts - this is what will happen
// in production.
let trie = get_trie_for_shard(&tries, shard_uid, state_root, use_flat_storage);
trie.accounting_cache.borrow_mut().set_enabled(enable_accounting_cache);
trie.accounting_cache.borrow().enable_switch().set(enable_accounting_cache);
for key in &keys_to_get {
assert_eq!(trie.get(key).unwrap(), data_in_trie.get(key).cloned());
}
Expand All @@ -341,7 +341,7 @@ mod trie_recording_tests {
// we get are exactly the same.
let trie = get_trie_for_shard(&tries, shard_uid, state_root, use_flat_storage)
.recording_reads();
trie.accounting_cache.borrow_mut().set_enabled(enable_accounting_cache);
trie.accounting_cache.borrow().enable_switch().set(enable_accounting_cache);
for key in &keys_to_get {
assert_eq!(trie.get(key).unwrap(), data_in_trie.get(key).cloned());
}
Expand All @@ -366,7 +366,7 @@ mod trie_recording_tests {
destructively_delete_in_memory_state_from_disk(&store, &data_in_trie);
let trie = get_trie_for_shard(&tries, shard_uid, state_root, use_flat_storage)
.recording_reads();
trie.accounting_cache.borrow_mut().set_enabled(enable_accounting_cache);
trie.accounting_cache.borrow().enable_switch().set(enable_accounting_cache);
for key in &keys_to_get {
assert_eq!(trie.get(key).unwrap(), data_in_trie.get(key).cloned());
}
Expand All @@ -392,7 +392,7 @@ mod trie_recording_tests {
);
let trie =
Trie::from_recorded_storage(partial_storage.clone(), state_root, use_flat_storage);
trie.accounting_cache.borrow_mut().set_enabled(enable_accounting_cache);
trie.accounting_cache.borrow().enable_switch().set(enable_accounting_cache);
for key in &keys_to_get {
assert_eq!(trie.get(key).unwrap(), data_in_trie.get(key).cloned());
}
Expand All @@ -410,7 +410,7 @@ mod trie_recording_tests {
// Build a Trie using recorded storage and enable recording_reads on this Trie
let trie = Trie::from_recorded_storage(partial_storage, state_root, use_flat_storage)
.recording_reads();
trie.accounting_cache.borrow_mut().set_enabled(enable_accounting_cache);
trie.accounting_cache.borrow().enable_switch().set(enable_accounting_cache);
for key in &keys_to_get {
assert_eq!(trie.get(key).unwrap(), data_in_trie.get(key).cloned());
}
Expand Down
Loading

0 comments on commit 6de1313

Please sign in to comment.