Skip to content

Commit

Permalink
implements copy-on-write for staked-nodes (backport #19090) (#22507)
Browse files Browse the repository at this point in the history
* implements copy-on-write for staked-nodes (#19090)

Bank::staked_nodes and Bank::epoch_staked_nodes redundantly clone
staked-nodes HashMap even though an immutable reference will suffice:
https://github.com/solana-labs/solana/blob/a9014cece/runtime/src/vote_account.rs#L77

This commit implements copy-on-write semantics for staked-nodes by
wrapping the underlying HashMap in Arc<...>.

(cherry picked from commit f302774)

# Conflicts:
#	runtime/src/bank.rs
#	runtime/src/stakes.rs
#	runtime/src/vote_account.rs

* removes backport merge conflicts

Co-authored-by: behzad nouri <behzadnouri@gmail.com>
  • Loading branch information
mergify[bot] and behzadnouri committed Jan 14, 2022
1 parent debac00 commit 40ef11e
Show file tree
Hide file tree
Showing 9 changed files with 51 additions and 39 deletions.
2 changes: 1 addition & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 3 additions & 2 deletions core/src/broadcast_stage/broadcast_duplicates_run.rs
Expand Up @@ -216,8 +216,9 @@ impl BroadcastRun for BroadcastDuplicatesRun {
let mut stakes: Vec<(Pubkey, u64)> = bank
.epoch_staked_nodes(bank_epoch)
.unwrap()
.into_iter()
.filter(|(pubkey, _)| *pubkey != self.keypair.pubkey())
.iter()
.filter(|(pubkey, _)| **pubkey != self.keypair.pubkey())
.map(|(pubkey, stake)| (*pubkey, *stake))
.collect();
stakes.sort_by(|(l_key, l_stake), (r_key, r_stake)| {
if r_stake == l_stake {
Expand Down
4 changes: 2 additions & 2 deletions gossip/src/cluster_info.rs
Expand Up @@ -1749,7 +1749,7 @@ impl ClusterInfo {
Some(root_bank.feature_set.clone()),
)
}
None => (HashMap::new(), None),
None => (Arc::default(), None),
};
let require_stake_for_gossip =
self.require_stake_for_gossip(feature_set.as_deref(), &stakes);
Expand Down Expand Up @@ -2542,7 +2542,7 @@ impl ClusterInfo {
// feature does not roll back (if the feature happens to get enabled in
// a minority fork).
let (feature_set, stakes) = match bank_forks {
None => (None, HashMap::default()),
None => (None, Arc::default()),
Some(bank_forks) => {
let bank = bank_forks.read().unwrap().root_bank();
let feature_set = bank.feature_set.clone();
Expand Down
11 changes: 9 additions & 2 deletions ledger/src/leader_schedule_utils.rs
Expand Up @@ -13,7 +13,10 @@ pub fn leader_schedule(epoch: Epoch, bank: &Bank) -> Option<LeaderSchedule> {
bank.epoch_staked_nodes(epoch).map(|stakes| {
let mut seed = [0u8; 32];
seed[0..8].copy_from_slice(&epoch.to_le_bytes());
let mut stakes: Vec<_> = stakes.into_iter().collect();
let mut stakes: Vec<_> = stakes
.iter()
.map(|(pubkey, stake)| (*pubkey, *stake))
.collect();
sort_stakes(&mut stakes);
LeaderSchedule::new(
&stakes,
Expand Down Expand Up @@ -91,7 +94,11 @@ mod tests {
.genesis_config;
let bank = Bank::new(&genesis_config);

let pubkeys_and_stakes: Vec<_> = bank.staked_nodes().into_iter().collect();
let pubkeys_and_stakes: Vec<_> = bank
.staked_nodes()
.iter()
.map(|(pubkey, stake)| (*pubkey, *stake))
.collect();
let seed = [0u8; 32];
let leader_schedule = LeaderSchedule::new(
&pubkeys_and_stakes,
Expand Down
8 changes: 4 additions & 4 deletions programs/bpf/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion runtime/Cargo.toml
Expand Up @@ -21,7 +21,7 @@ crossbeam-channel = "0.4"
dir-diff = "0.3.2"
flate2 = "1.0.14"
fnv = "1.0.7"
itertools = "0.9.0"
itertools = "0.10.1"
lazy_static = "1.4.0"
libc = "0.2.81"
libloading = "0.6.2"
Expand Down
4 changes: 2 additions & 2 deletions runtime/src/bank.rs
Expand Up @@ -5282,7 +5282,7 @@ impl Bank {
self.stakes_cache.stakes().stake_delegations().clone()
}

pub fn staked_nodes(&self) -> HashMap<Pubkey, u64> {
pub fn staked_nodes(&self) -> Arc<HashMap<Pubkey, u64>> {
self.stakes_cache.stakes().staked_nodes()
}

Expand Down Expand Up @@ -5312,7 +5312,7 @@ impl Bank {
&self.epoch_stakes
}

pub fn epoch_staked_nodes(&self, epoch: Epoch) -> Option<HashMap<Pubkey, u64>> {
pub fn epoch_staked_nodes(&self, epoch: Epoch) -> Option<Arc<HashMap<Pubkey, u64>>> {
Some(self.epoch_stakes.get(&epoch)?.stakes().staked_nodes())
}

Expand Down
2 changes: 1 addition & 1 deletion runtime/src/stakes.rs
Expand Up @@ -335,7 +335,7 @@ impl Stakes {
&self.stake_delegations
}

pub fn staked_nodes(&self) -> HashMap<Pubkey, u64> {
pub fn staked_nodes(&self) -> Arc<HashMap<Pubkey, u64>> {
self.vote_accounts.staked_nodes()
}

Expand Down
52 changes: 28 additions & 24 deletions runtime/src/vote_account.rs
@@ -1,4 +1,5 @@
use {
itertools::Itertools,
serde::{
de::{Deserialize, Deserializer},
ser::{Serialize, Serializer},
Expand Down Expand Up @@ -40,9 +41,11 @@ pub struct VoteAccounts {
// Inner Arc is meant to implement copy-on-write semantics as opposed to
// sharing mutations (hence RwLock<Arc<...>> instead of Arc<RwLock<...>>).
staked_nodes: RwLock<
HashMap<
Pubkey, // VoteAccount.vote_state.node_pubkey.
u64, // Total stake across all vote-accounts.
Arc<
HashMap<
Pubkey, // VoteAccount.vote_state.node_pubkey.
u64, // Total stake across all vote-accounts.
>,
>,
>,
staked_nodes_once: Once,
Expand All @@ -69,20 +72,19 @@ impl VoteAccount {
}

impl VoteAccounts {
pub fn staked_nodes(&self) -> HashMap<Pubkey, u64> {
pub fn staked_nodes(&self) -> Arc<HashMap<Pubkey, u64>> {
self.staked_nodes_once.call_once(|| {
let mut staked_nodes = HashMap::new();
for (stake, vote_account) in
self.vote_accounts.values().filter(|(stake, _)| *stake != 0)
{
if let Some(node_pubkey) = vote_account.node_pubkey() {
staked_nodes
.entry(node_pubkey)
.and_modify(|s| *s += *stake)
.or_insert(*stake);
}
}
*self.staked_nodes.write().unwrap() = staked_nodes
let staked_nodes = self
.vote_accounts
.values()
.filter(|(stake, _)| *stake != 0)
.filter_map(|(stake, vote_account)| {
let node_pubkey = vote_account.node_pubkey()?;
Some((node_pubkey, stake))
})
.into_grouping_map()
.aggregate(|acc, _node_pubkey, stake| Some(acc.unwrap_or_default() + stake));
*self.staked_nodes.write().unwrap() = Arc::new(staked_nodes)
});
self.staked_nodes.read().unwrap().clone()
}
Expand Down Expand Up @@ -135,9 +137,9 @@ impl VoteAccounts {
fn add_node_stake(&mut self, stake: u64, vote_account: &VoteAccount) {
if stake != 0 && self.staked_nodes_once.is_completed() {
if let Some(node_pubkey) = vote_account.node_pubkey() {
self.staked_nodes
.write()
.unwrap()
let mut staked_nodes = self.staked_nodes.write().unwrap();
let staked_nodes = Arc::make_mut(&mut staked_nodes);
staked_nodes
.entry(node_pubkey)
.and_modify(|s| *s += stake)
.or_insert(stake);
Expand All @@ -148,7 +150,9 @@ impl VoteAccounts {
fn sub_node_stake(&mut self, stake: u64, vote_account: &VoteAccount) {
if stake != 0 && self.staked_nodes_once.is_completed() {
if let Some(node_pubkey) = vote_account.node_pubkey() {
match self.staked_nodes.write().unwrap().entry(node_pubkey) {
let mut staked_nodes = self.staked_nodes.write().unwrap();
let staked_nodes = Arc::make_mut(&mut staked_nodes);
match staked_nodes.entry(node_pubkey) {
Entry::Vacant(_) => panic!("this should not happen!"),
Entry::Occupied(mut entry) => match entry.get().cmp(&stake) {
Ordering::Less => panic!("subtraction value exceeds node's stake"),
Expand Down Expand Up @@ -485,7 +489,7 @@ mod tests {
if (k + 1) % 128 == 0 {
assert_eq!(
staked_nodes(&accounts[..k + 1]),
vote_accounts.staked_nodes()
*vote_accounts.staked_nodes()
);
}
}
Expand All @@ -495,7 +499,7 @@ mod tests {
let (pubkey, (_, _)) = accounts.swap_remove(index);
vote_accounts.remove(&pubkey);
if (k + 1) % 32 == 0 {
assert_eq!(staked_nodes(&accounts), vote_accounts.staked_nodes());
assert_eq!(staked_nodes(&accounts), *vote_accounts.staked_nodes());
}
}
// Modify the stakes for some of the accounts.
Expand All @@ -510,7 +514,7 @@ mod tests {
}
*stake = new_stake;
if (k + 1) % 128 == 0 {
assert_eq!(staked_nodes(&accounts), vote_accounts.staked_nodes());
assert_eq!(staked_nodes(&accounts), *vote_accounts.staked_nodes());
}
}
// Remove everything.
Expand All @@ -519,7 +523,7 @@ mod tests {
let (pubkey, (_, _)) = accounts.swap_remove(index);
vote_accounts.remove(&pubkey);
if accounts.len() % 32 == 0 {
assert_eq!(staked_nodes(&accounts), vote_accounts.staked_nodes());
assert_eq!(staked_nodes(&accounts), *vote_accounts.staked_nodes());
}
}
assert!(vote_accounts.staked_nodes.read().unwrap().is_empty());
Expand Down

0 comments on commit 40ef11e

Please sign in to comment.