Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[fix] #0000: On-chain predictable iteration order #4130

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 14 additions & 13 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions core/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,9 @@ displaydoc = { workspace = true }
wasmtime = { workspace = true }
parking_lot = { workspace = true, features = ["deadlock_detection"] }
derive_more = { workspace = true }

Erigara marked this conversation as resolved.
Show resolved Hide resolved
uuid = { version = "1.4.1", features = ["v4"] }
indexmap = "2.1.0"

[dev-dependencies]
criterion = { workspace = true }
Expand Down
1 change: 1 addition & 0 deletions core/clippy.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
disallowed-types = ["std::collections::HashMap", "std::collections::HashSet"]
1 change: 1 addition & 0 deletions core/src/block_sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@ impl BlockSynchronizer {
}

/// Get a random online peer.
#[allow(clippy::disallowed_types)]
pub fn random_peer(peers: &std::collections::HashSet<PeerId>) -> Option<Peer> {
use rand::{seq::IteratorRandom, SeedableRng};

Expand Down
11 changes: 6 additions & 5 deletions core/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,10 @@ pub mod tx;
pub mod wsv;

use core::time::Duration;
use std::collections::{BTreeSet, HashMap, HashSet};
use std::collections::BTreeSet;

use gossiper::TransactionGossip;
use indexmap::{IndexMap, IndexSet};
use iroha_data_model::{permission::Permissions, prelude::*};
use iroha_primitives::unique_vec::UniqueVec;
use parity_scale_codec::{Decode, Encode};
Expand All @@ -39,16 +40,16 @@ pub type IrohaNetwork = iroha_p2p::NetworkHandle<NetworkMessage>;
pub type PeersIds = UniqueVec<PeerId>;

/// Parameters set.
pub type Parameters = HashSet<Parameter>;
pub type Parameters = IndexSet<Parameter>;

/// API to work with collections of [`DomainId`] : [`Domain`] mappings.
pub type DomainsMap = HashMap<DomainId, Domain>;
pub type DomainsMap = IndexMap<DomainId, Domain>;

/// API to work with a collections of [`RoleId`]: [`Role`] mappings.
pub type RolesMap = HashMap<RoleId, Role>;
pub type RolesMap = IndexMap<RoleId, Role>;

/// API to work with a collections of [`AccountId`] [`Permissions`] mappings.
pub type PermissionTokensMap = HashMap<AccountId, Permissions>;
pub type PermissionTokensMap = IndexMap<AccountId, Permissions>;

/// API to work with a collections of [`AccountId`] to [`RoleId`] mappings.
pub type AccountRolesSet = BTreeSet<role::RoleIdWithOwner>;
Expand Down
6 changes: 3 additions & 3 deletions core/src/query/store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,11 @@

use std::{
cmp::Ordering,
collections::HashMap,
num::NonZeroU64,
time::{Duration, Instant},
};

use indexmap::IndexMap;
use iroha_config::live_query_store::Configuration;
use iroha_data_model::{
asset::AssetValue,
Expand Down Expand Up @@ -67,15 +67,15 @@ type LiveQuery = Batched<Vec<Value>>;
/// Clients can handle their queries using [`LiveQueryStoreHandle`]
#[derive(Debug)]
pub struct LiveQueryStore {
queries: HashMap<QueryId, (LiveQuery, Instant)>,
queries: IndexMap<QueryId, (LiveQuery, Instant)>,
query_idle_time: Duration,
}

impl LiveQueryStore {
/// Construct [`LiveQueryStore`] from configuration.
pub fn from_configuration(cfg: Configuration) -> Self {
Self {
queries: HashMap::default(),
queries: IndexMap::new(),
query_idle_time: Duration::from_millis(cfg.query_idle_time_ms.into()),
}
}
Expand Down
4 changes: 2 additions & 2 deletions core/src/queue.rs
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
//! Module with queue actor
use core::time::Duration;
use std::collections::HashSet;

use crossbeam_queue::ArrayQueue;
use dashmap::{mapref::entry::Entry, DashMap};
use eyre::{Report, Result};
use indexmap::IndexSet;
use iroha_config::queue::Configuration;
use iroha_crypto::HashOf;
use iroha_data_model::{account::AccountId, transaction::prelude::*};
Expand Down Expand Up @@ -326,7 +326,7 @@ impl Queue {
self.pop_from_queue(&mut seen_queue, wsv, &mut expired_transactions_queue)
});

let transactions_hashes: HashSet<HashOf<TransactionPayload>> =
let transactions_hashes: IndexSet<HashOf<TransactionPayload>> =
transactions.iter().map(|tx| tx.payload().hash()).collect();
let txs = txs_from_queue
.filter(|tx| !transactions_hashes.contains(&tx.payload().hash()))
Expand Down
33 changes: 17 additions & 16 deletions core/src/smartcontracts/isi/triggers/set.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,9 @@
//! trigger hooks.

use core::cmp::min;
use std::{collections::HashMap, fmt};
use std::fmt;

use indexmap::IndexMap;
use iroha_crypto::HashOf;
use iroha_data_model::{
events::Filter as EventFilter,
Expand Down Expand Up @@ -138,17 +139,17 @@ impl<F: Filter + Into<TriggeringFilterBox> + Clone> LoadedActionTrait for Loaded
#[derive(Debug, Default)]
pub struct Set {
/// Triggers using [`DataEventFilter`]
data_triggers: HashMap<TriggerId, LoadedAction<DataEventFilter>>,
data_triggers: IndexMap<TriggerId, LoadedAction<DataEventFilter>>,
/// Triggers using [`PipelineEventFilter`]
pipeline_triggers: HashMap<TriggerId, LoadedAction<PipelineEventFilter>>,
pipeline_triggers: IndexMap<TriggerId, LoadedAction<PipelineEventFilter>>,
/// Triggers using [`TimeEventFilter`]
time_triggers: HashMap<TriggerId, LoadedAction<TimeEventFilter>>,
time_triggers: IndexMap<TriggerId, LoadedAction<TimeEventFilter>>,
/// Triggers using [`ExecuteTriggerEventFilter`]
by_call_triggers: HashMap<TriggerId, LoadedAction<ExecuteTriggerEventFilter>>,
by_call_triggers: IndexMap<TriggerId, LoadedAction<ExecuteTriggerEventFilter>>,
/// Trigger ids with type of events they process
ids: HashMap<TriggerId, TriggeringEventType>,
ids: IndexMap<TriggerId, TriggeringEventType>,
/// Original [`WasmSmartContract`]s by [`TriggerId`] for querying purposes.
original_contracts: HashMap<HashOf<WasmSmartContract>, WasmSmartContract>,
original_contracts: IndexMap<HashOf<WasmSmartContract>, WasmSmartContract>,
/// List of actions that should be triggered by events provided by `handle_*` methods.
/// Vector is used to save the exact triggers order.
matched_ids: Vec<(Event, TriggerId)>,
Expand All @@ -157,14 +158,14 @@ pub struct Set {
/// Helper struct for serializing triggers.
struct TriggersWithContext<'s, F> {
/// Triggers being serialized
triggers: &'s HashMap<TriggerId, LoadedAction<F>>,
triggers: &'s IndexMap<TriggerId, LoadedAction<F>>,
/// Containing Set, used for looking up origignal [`WasmSmartContract`]s
/// during serialization.
set: &'s Set,
}

impl<'s, F> TriggersWithContext<'s, F> {
fn new(triggers: &'s HashMap<TriggerId, LoadedAction<F>>, set: &'s Set) -> Self {
fn new(triggers: &'s IndexMap<TriggerId, LoadedAction<F>>, set: &'s Set) -> Self {
Self { triggers, set }
}
}
Expand Down Expand Up @@ -236,15 +237,15 @@ impl<'de> DeserializeSeed<'de> for WasmSeed<'_, Set> {
while let Some(key) = map.next_key::<String>()? {
match key.as_str() {
"data_triggers" => {
let triggers: HashMap<TriggerId, Action<DataEventFilter>> =
let triggers: IndexMap<TriggerId, Action<DataEventFilter>> =
map.next_value()?;
for (id, action) in triggers {
set.add_data_trigger(self.loader.engine, Trigger::new(id, action))
.unwrap();
}
}
"pipeline_triggers" => {
let triggers: HashMap<TriggerId, Action<PipelineEventFilter>> =
let triggers: IndexMap<TriggerId, Action<PipelineEventFilter>> =
map.next_value()?;
for (id, action) in triggers {
set.add_pipeline_trigger(
Expand All @@ -255,15 +256,15 @@ impl<'de> DeserializeSeed<'de> for WasmSeed<'_, Set> {
}
}
"time_triggers" => {
let triggers: HashMap<TriggerId, Action<TimeEventFilter>> =
let triggers: IndexMap<TriggerId, Action<TimeEventFilter>> =
map.next_value()?;
for (id, action) in triggers {
set.add_time_trigger(self.loader.engine, Trigger::new(id, action))
.unwrap();
}
}
"by_call_triggers" => {
let triggers: HashMap<TriggerId, Action<ExecuteTriggerEventFilter>> =
let triggers: IndexMap<TriggerId, Action<ExecuteTriggerEventFilter>> =
map.next_value()?;
for (id, action) in triggers {
set.add_by_call_trigger(
Expand Down Expand Up @@ -387,7 +388,7 @@ impl Set {
engine: &wasmtime::Engine,
trigger: Trigger<F>,
event_type: TriggeringEventType,
map: impl FnOnce(&mut Self) -> &mut HashMap<TriggerId, LoadedAction<F>>,
map: impl FnOnce(&mut Self) -> &mut IndexMap<TriggerId, LoadedAction<F>>,
) -> Result<bool> {
if self.contains(trigger.id()) {
return Ok(false);
Expand Down Expand Up @@ -816,8 +817,8 @@ impl Set {

/// Remove actions with zero execution count from `triggers`
fn remove_zeros<F: Filter>(
ids: &mut HashMap<TriggerId, TriggeringEventType>,
triggers: &mut HashMap<TriggerId, LoadedAction<F>>,
ids: &mut IndexMap<TriggerId, TriggeringEventType>,
triggers: &mut IndexMap<TriggerId, LoadedAction<F>>,
) {
let to_remove: Vec<TriggerId> = triggers
.iter()
Expand Down
9 changes: 4 additions & 5 deletions core/src/smartcontracts/wasm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -328,9 +328,8 @@ impl LimitsExecutor {
pub mod state {
//! All supported states for [`Runtime`](super::Runtime)

use std::collections::HashSet;

use derive_more::Constructor;
use indexmap::IndexSet;

use super::*;

Expand Down Expand Up @@ -360,7 +359,7 @@ pub mod state {
pub(super) store_limits: StoreLimits,
/// Span inside of which all logs are recorded for this smart contract
pub(super) log_span: Span,
pub(super) executed_queries: HashSet<QueryId>,
pub(super) executed_queries: IndexSet<QueryId>,
/// Borrowed [`WorldStateView`] kind
pub(super) wsv: W,
/// Concrete state for specific executable
Expand All @@ -380,14 +379,14 @@ pub mod state {
authority,
store_limits: store_limits_from_config(&config),
log_span,
executed_queries: HashSet::new(),
executed_queries: IndexSet::new(),
wsv,
specific_state,
}
}

/// Take executed queries leaving an empty set
pub fn take_executed_queries(&mut self) -> HashSet<QueryId> {
pub fn take_executed_queries(&mut self) -> IndexSet<QueryId> {
std::mem::take(&mut self.executed_queries)
}
}
Expand Down
5 changes: 4 additions & 1 deletion core/src/sumeragi/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,10 @@ impl SumeragiHandle {
pub fn update_metrics(&self) -> Result<()> {
let online_peers_count: u64 = self
.network
.online_peers(std::collections::HashSet::len)
.online_peers(
#[allow(clippy::disallowed_types)]
std::collections::HashSet::len,
)
.try_into()
.expect("casting usize to u64");

Expand Down
4 changes: 2 additions & 2 deletions core/src/sumeragi/network_topology.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
//! Structures formalising the peer topology (e.g. which peers have which predefined roles).
use std::collections::HashSet;

use derive_more::Display;
use indexmap::IndexSet;
use iroha_crypto::{PublicKey, SignatureOf};
use iroha_data_model::{block::SignedBlock, prelude::PeerId};
use iroha_logger::trace;
Expand Down Expand Up @@ -88,7 +88,7 @@ impl Topology {
roles: &[Role],
signatures: I,
) -> Vec<SignatureOf<T>> {
let mut public_keys: HashSet<&PublicKey> = HashSet::with_capacity(self.ordered_peers.len());
let mut public_keys = IndexSet::with_capacity(self.ordered_peers.len());
for role in roles {
match (role, self.is_non_empty(), self.is_consensus_required()) {
(Role::Leader, Some(topology), _) => {
Expand Down
Loading
Loading