Skip to content
This repository has been archived by the owner on Nov 6, 2020. It is now read-only.

Commit

Permalink
Stable 2.4.8 (#10779)
Browse files Browse the repository at this point in the history
* ethcore/res: activate atlantis classic hf on block 8772000 (#10766)

* fix docker tags for publishing (#10741)

* merge-backports

* Update version

* remove clique engine from backports

* Reset blockchain properly (#10669)

* delete BlockDetails from COL_EXTRA

* better proofs

* added tests

* PR suggestions

* adds rpc error message for --no-ancient-blocks (#10608)

* adds error message for --no-ancient-blocks, closes #10261

* Apply suggestions from code review

Co-Authored-By: seunlanlege <seunlanlege@gmail.com>

* Treat empty account the same as non-exist accounts in EIP-1052 (#10775)

* fix: aura don't add `SystemTime::now()` (#10720)

This commit does the following:
- Prevent overflow in `verify_timestamp()` by not adding `now` to found faulty timestamp
- Use explicit `CheckedSystemTime::checked_add` to prevent potential consensus issues because SystemTime is platform
depedent
- remove `#[cfg(not(time_checked_add))]` conditional compilation

* DevP2p: Get node IP address and udp port from Socket, if not included in PING packet (#10705)

* get node IP address and udp port from Socket, if not included in PING packet

* prevent bootnodes from being added to host nodes

* code corrections

* code corrections

* code corrections

* code corrections

* docs

* code corrections

* code corrections

* Apply suggestions from code review

Co-Authored-By: David <dvdplm@gmail.com>

* Revert "fix: aura don't add `SystemTime::now()` (#10720)"

This reverts commit f104784.

* Add a way to signal shutdown to snapshotting threads (#10744)

* Add a way to signal shutdown to snapshotting threads

* Pass Progress to fat_rlps() so we can abort from there too.

* Checking for abort in a single spot

* Remove nightly-only weak/strong counts

* fix warning

* Fix tests

* Add dummy impl to abort snapshots

* Add another dummy impl for TestSnapshotService

* Remove debugging code

* Return error instead of the odd Ok(())
Switch to AtomicU64

* revert .as_bytes() change

* fix build

* fix build maybe
  • Loading branch information
s3krit authored Jun 25, 2019
1 parent d6c5546 commit 25435c6
Show file tree
Hide file tree
Showing 33 changed files with 381 additions and 207 deletions.
12 changes: 6 additions & 6 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
description = "Parity Ethereum client"
name = "parity-ethereum"
# NOTE Make sure to update util/version/Cargo.toml as well
version = "2.4.7"
version = "2.4.8"
license = "GPL-3.0"
authors = ["Parity Technologies <admin@parity.io>"]

Expand Down
23 changes: 8 additions & 15 deletions ethcore/blockchain/src/blockchain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -668,21 +668,6 @@ impl BlockChain {
self.db.key_value().read_with_cache(db::COL_EXTRA, &self.block_details, parent).map_or(false, |d| d.children.contains(hash))
}

/// fetches the list of blocks from best block to n, and n's parent hash
/// where n > 0
pub fn block_headers_from_best_block(&self, n: u32) -> Option<(Vec<encoded::Header>, H256)> {
let mut blocks = Vec::with_capacity(n as usize);
let mut hash = self.best_block_hash();

for _ in 0..n {
let current_hash = self.block_header_data(&hash)?;
hash = current_hash.parent_hash();
blocks.push(current_hash);
}

Some((blocks, hash))
}

/// Returns a tree route between `from` and `to`, which is a tuple of:
///
/// - a vector of hashes of all blocks, ordered from `from` to `to`.
Expand Down Expand Up @@ -869,6 +854,14 @@ impl BlockChain {
}
}

/// clears all caches for testing purposes
pub fn clear_cache(&self) {
self.block_bodies.write().clear();
self.block_details.write().clear();
self.block_hashes.write().clear();
self.block_headers.write().clear();
}

/// Update the best ancient block to the given hash, after checking that
/// it's directly linked to the currently known best ancient block
pub fn update_best_ancient_block(&self, hash: &H256) {
Expand Down
24 changes: 12 additions & 12 deletions ethcore/res/ethereum/classic.json
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
"ecip1010PauseTransition": "0x2dc6c0",
"ecip1010ContinueTransition": "0x4c4b40",
"ecip1017EraRounds": "0x4c4b40",
"eip100bTransition": "0x7fffffffffffffff",
"eip100bTransition": "0x85d9a0",
"bombDefuseTransition": "0x5a06e0"
}
}
Expand All @@ -29,15 +29,15 @@
"forkCanonHash": "0x94365e3a8c0b35089c1d1195081fe7489b528a84b22199c916180db8b28ade7f",
"eip150Transition": "0x2625a0",
"eip160Transition": "0x2dc6c0",
"eip161abcTransition": "0x7fffffffffffffff",
"eip161dTransition": "0x7fffffffffffffff",
"eip161abcTransition": "0x85d9a0",
"eip161dTransition": "0x85d9a0",
"eip155Transition": "0x2dc6c0",
"maxCodeSize": "0x6000",
"maxCodeSizeTransition": "0x7fffffffffffffff",
"eip140Transition": "0x7fffffffffffffff",
"eip211Transition": "0x7fffffffffffffff",
"eip214Transition": "0x7fffffffffffffff",
"eip658Transition": "0x7fffffffffffffff"
"maxCodeSizeTransition": "0x85d9a0",
"eip140Transition": "0x85d9a0",
"eip211Transition": "0x85d9a0",
"eip214Transition": "0x85d9a0",
"eip658Transition": "0x85d9a0"
},
"genesis": {
"seal": {
Expand Down Expand Up @@ -3905,7 +3905,7 @@
"0x0000000000000000000000000000000000000005": {
"builtin": {
"name": "modexp",
"activate_at": "0x7fffffffffffffff",
"activate_at": "0x85d9a0",
"pricing": {
"modexp": {
"divisor": 20
Expand All @@ -3916,7 +3916,7 @@
"0x0000000000000000000000000000000000000006": {
"builtin": {
"name": "alt_bn128_add",
"activate_at": "0x7fffffffffffffff",
"activate_at": "0x85d9a0",
"pricing": {
"linear": {
"base": 500,
Expand All @@ -3928,7 +3928,7 @@
"0x0000000000000000000000000000000000000007": {
"builtin": {
"name": "alt_bn128_mul",
"activate_at": "0x7fffffffffffffff",
"activate_at": "0x85d9a0",
"pricing": {
"linear": {
"base": 40000,
Expand All @@ -3940,7 +3940,7 @@
"0x0000000000000000000000000000000000000008": {
"builtin": {
"name": "alt_bn128_pairing",
"activate_at": "0x7fffffffffffffff",
"activate_at": "0x85d9a0",
"pricing": {
"alt_bn128_pairing": {
"base": 100000,
Expand Down
11 changes: 9 additions & 2 deletions ethcore/service/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,10 @@ use blockchain::{BlockChainDB, BlockChainDBHandler};
use ethcore::client::{Client, ClientConfig, ChainNotify, ClientIoMessage};
use ethcore::miner::Miner;
use ethcore::snapshot::service::{Service as SnapshotService, ServiceParams as SnapServiceParams};
use ethcore::snapshot::{SnapshotService as _SnapshotService, RestorationStatus};
use ethcore::snapshot::{SnapshotService as _SnapshotService, RestorationStatus, Error as SnapshotError};
use ethcore::spec::Spec;
use ethcore::error::{Error as EthcoreError, ErrorKind};


use ethcore_private_tx::{self, Importer, Signer};
use Error;
Expand Down Expand Up @@ -197,6 +199,7 @@ impl ClientService {

/// Shutdown the Client Service
pub fn shutdown(&self) {
trace!(target: "shutdown", "Shutting down Client Service");
self.snapshot.shutdown();
}
}
Expand Down Expand Up @@ -257,7 +260,11 @@ impl IoHandler<ClientIoMessage> for ClientIoHandler {

let res = thread::Builder::new().name("Periodic Snapshot".into()).spawn(move || {
if let Err(e) = snapshot.take_snapshot(&*client, num) {
warn!("Failed to take snapshot at block #{}: {}", num, e);
match e {
EthcoreError(ErrorKind::Snapshot(SnapshotError::SnapshotAborted), _) => info!("Snapshot aborted"),
_ => warn!("Failed to take snapshot at block #{}: {}", num, e),
}

}
});

Expand Down
94 changes: 65 additions & 29 deletions ethcore/src/client/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ use bytes::Bytes;
use call_contract::{CallContract, RegistryInfo};
use ethcore_miner::pool::VerifiedTransaction;
use ethcore_miner::service_transaction_checker::ServiceTransactionChecker;
use ethereum_types::{H256, Address, U256};
use ethereum_types::{H256, H264, Address, U256};
use evm::Schedule;
use hash::keccak;
use io::IoChannel;
Expand Down Expand Up @@ -87,7 +87,7 @@ pub use types::blockchain_info::BlockChainInfo;
pub use types::block_status::BlockStatus;
pub use blockchain::CacheSize as BlockChainCacheSize;
pub use verification::QueueInfo as BlockQueueInfo;
use db::Writable;
use db::{Writable, Readable, keys::BlockDetails};

use_contract!(registry, "res/contracts/registrar.json");

Expand Down Expand Up @@ -772,8 +772,8 @@ impl Client {
liveness: AtomicBool::new(awake),
mode: Mutex::new(config.mode.clone()),
chain: RwLock::new(chain),
tracedb: tracedb,
engine: engine,
tracedb,
engine,
pruning: config.pruning.clone(),
db: RwLock::new(db.clone()),
state_db: RwLock::new(state_db),
Expand All @@ -786,8 +786,8 @@ impl Client {
ancient_blocks_import_lock: Default::default(),
queue_consensus_message: IoChannelQueue::new(usize::max_value()),
last_hashes: RwLock::new(VecDeque::new()),
factories: factories,
history: history,
factories,
history,
on_user_defaults_change: Mutex::new(None),
registrar_address,
exit_handler: Mutex::new(None),
Expand Down Expand Up @@ -1146,7 +1146,12 @@ impl Client {

/// Take a snapshot at the given block.
/// If the ID given is "latest", this will default to 1000 blocks behind.
pub fn take_snapshot<W: snapshot_io::SnapshotWriter + Send>(&self, writer: W, at: BlockId, p: &snapshot::Progress) -> Result<(), EthcoreError> {
pub fn take_snapshot<W: snapshot_io::SnapshotWriter + Send>(
&self,
writer: W,
at: BlockId,
p: &snapshot::Progress,
) -> Result<(), EthcoreError> {
let db = self.state_db.read().journal_db().boxed_clone();
let best_block_number = self.chain_info().best_block_number;
let block_number = self.block_number(at).ok_or(snapshot::Error::InvalidStartingBlock(at))?;
Expand Down Expand Up @@ -1176,8 +1181,16 @@ impl Client {
};

let processing_threads = self.config.snapshot.processing_threads;
snapshot::take_snapshot(&*self.engine, &self.chain.read(), start_hash, db.as_hash_db(), writer, p, processing_threads)?;

let chunker = self.engine.snapshot_components().ok_or(snapshot::Error::SnapshotsUnsupported)?;
snapshot::take_snapshot(
chunker,
&self.chain.read(),
start_hash,
db.as_hash_db(),
writer,
p,
processing_threads,
)?;
Ok(())
}

Expand Down Expand Up @@ -1335,37 +1348,60 @@ impl BlockChainReset for Client {
fn reset(&self, num: u32) -> Result<(), String> {
if num as u64 > self.pruning_history() {
return Err("Attempting to reset to block with pruned state".into())
} else if num == 0 {
return Err("invalid number of blocks to reset".into())
}

let (blocks_to_delete, best_block_hash) = self.chain.read()
.block_headers_from_best_block(num)
.ok_or("Attempted to reset past genesis block")?;
let mut blocks_to_delete = Vec::with_capacity(num as usize);
let mut best_block_hash = self.chain.read().best_block_hash();
let mut batch = DBTransaction::with_capacity(blocks_to_delete.capacity());

let mut db_transaction = DBTransaction::with_capacity((num + 1) as usize);
for _ in 0..num {
let current_header = self.chain.read().block_header_data(&best_block_hash)
.expect("best_block_hash was fetched from db; block_header_data should exist in db; qed");
best_block_hash = current_header.parent_hash();

for hash in &blocks_to_delete {
db_transaction.delete(::db::COL_HEADERS, &hash.hash());
db_transaction.delete(::db::COL_BODIES, &hash.hash());
db_transaction.delete(::db::COL_EXTRA, &hash.hash());
let (number, hash) = (current_header.number(), current_header.hash());
batch.delete(::db::COL_HEADERS, &hash);
batch.delete(::db::COL_BODIES, &hash);
Writable::delete::<BlockDetails, H264>
(&mut batch, ::db::COL_EXTRA, &hash);
Writable::delete::<H256, BlockNumberKey>
(&mut db_transaction, ::db::COL_EXTRA, &hash.number());
(&mut batch, ::db::COL_EXTRA, &number);

blocks_to_delete.push((number, hash));
}

let hashes = blocks_to_delete.iter().map(|(_, hash)| hash).collect::<Vec<_>>();
info!("Deleting block hashes {}",
Colour::Red
.bold()
.paint(format!("{:#?}", hashes))
);

let mut best_block_details = Readable::read::<BlockDetails, H264>(
&**self.db.read().key_value(),
::db::COL_EXTRA,
&best_block_hash
).expect("block was previously imported; best_block_details should exist; qed");

let (_, last_hash) = blocks_to_delete.last()
.expect("num is > 0; blocks_to_delete can't be empty; qed");
// remove the last block as a child so that it can be re-imported
// ethcore/blockchain/src/blockchain.rs/Blockchain::is_known_child()
best_block_details.children.retain(|h| *h != *last_hash);
batch.write(
::db::COL_EXTRA,
&best_block_hash,
&best_block_details
);
// update the new best block hash
db_transaction.put(::db::COL_EXTRA, b"best", &*best_block_hash);
batch.put(::db::COL_EXTRA, b"best", &best_block_hash);

self.db.read()
.key_value()
.write(db_transaction)
.map_err(|err| format!("could not complete reset operation; io error occured: {}", err))?;

let hashes = blocks_to_delete.iter().map(|b| b.hash()).collect::<Vec<_>>();

info!("Deleting block hashes {}",
Colour::Red
.bold()
.paint(format!("{:#?}", hashes))
);
.write(batch)
.map_err(|err| format!("could not delete blocks; io error occurred: {}", err))?;

info!("New best block hash {}", Colour::Green.bold().paint(format!("{:?}", best_block_hash)));

Expand Down
14 changes: 6 additions & 8 deletions ethcore/src/engines/authority_round/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ use std::iter::FromIterator;
use std::ops::Deref;
use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering};
use std::sync::{Weak, Arc};
use std::time::{UNIX_EPOCH, SystemTime, Duration};
use std::time::{UNIX_EPOCH, Duration};

use block::*;
use client::EngineClient;
Expand All @@ -42,14 +42,12 @@ use itertools::{self, Itertools};
use rlp::{encode, Decodable, DecoderError, Encodable, RlpStream, Rlp};
use ethereum_types::{H256, H520, Address, U128, U256};
use parking_lot::{Mutex, RwLock};
use time_utils::CheckedSystemTime;
use types::BlockNumber;
use types::header::{Header, ExtendedHeader};
use types::ancestry_action::AncestryAction;
use unexpected::{Mismatch, OutOfBounds};

#[cfg(not(time_checked_add))]
use time_utils::CheckedSystemTime;

mod finality;

/// `AuthorityRound` params.
Expand Down Expand Up @@ -574,10 +572,10 @@ fn verify_timestamp(step: &Step, header_step: u64) -> Result<(), BlockError> {
// Returning it further won't recover the sync process.
trace!(target: "engine", "verify_timestamp: block too early");

let now = SystemTime::now();
let found = now.checked_add(Duration::from_secs(oob.found)).ok_or(BlockError::TimestampOverflow)?;
let max = oob.max.and_then(|m| now.checked_add(Duration::from_secs(m)));
let min = oob.min.and_then(|m| now.checked_add(Duration::from_secs(m)));
let found = CheckedSystemTime::checked_add(UNIX_EPOCH, Duration::from_secs(oob.found))
.ok_or(BlockError::TimestampOverflow)?;
let max = oob.max.and_then(|m| CheckedSystemTime::checked_add(UNIX_EPOCH, Duration::from_secs(m)));
let min = oob.min.and_then(|m| CheckedSystemTime::checked_add(UNIX_EPOCH, Duration::from_secs(m)));

let new_oob = OutOfBounds { min, max, found };

Expand Down
Loading

0 comments on commit 25435c6

Please sign in to comment.