Skip to content

Commit

Permalink
tikv: Change KvEngine bounds to Snapshot
Browse files Browse the repository at this point in the history
Signed-off-by: Brian Anderson <andersrb@gmail.com>
  • Loading branch information
brson committed May 11, 2020
1 parent 6b8e458 commit 0367db3
Show file tree
Hide file tree
Showing 3 changed files with 13 additions and 13 deletions.
2 changes: 1 addition & 1 deletion cmd/src/server.rs
Expand Up @@ -163,7 +163,7 @@ impl TiKVServer {
let (router, system) = fsm::create_raft_batch_system(&config.raft_store);

let (resolve_worker, resolver, state) =
resolve::new_resolver::<_, RocksEngine>(Arc::clone(&pd_client), router.clone())
resolve::new_resolver(Arc::clone(&pd_client), router.clone())
.unwrap_or_else(|e| fatal!("failed to start address resolver: {}", e));

let mut coprocessor_host = Some(CoprocessorHost::new(router.clone()));
Expand Down
2 changes: 1 addition & 1 deletion components/test_raftstore/src/server.rs
Expand Up @@ -221,7 +221,7 @@ impl Simulator for ServerCluster {

// Create pd client, snapshot manager, server.
let (worker, resolver, state) =
resolve::new_resolver::<_, RocksEngine>(Arc::clone(&self.pd_client), router.clone()).unwrap();
resolve::new_resolver(Arc::clone(&self.pd_client), router.clone()).unwrap();
let snap_mgr = SnapManager::new(tmp_str, Some(router.clone()));
let server_cfg = Arc::new(cfg.server.clone());
let cop_read_pool = ReadPool::from(coprocessor::readpool_impl::build_read_pool_for_test(
Expand Down
22 changes: 11 additions & 11 deletions src/server/resolve.rs
Expand Up @@ -4,7 +4,7 @@ use std::fmt::{self, Display, Formatter};
use std::sync::{Arc, Mutex};
use std::time::Instant;

use engine_traits::KvEngine;
use engine_traits::Snapshot;
use kvproto::metapb;
use kvproto::replication_modepb::ReplicationMode;
use pd_client::{take_peer_address, PdClient};
Expand Down Expand Up @@ -43,14 +43,14 @@ struct StoreAddr {
}

/// A runner for resolving store addresses.
struct Runner<T: PdClient, E: KvEngine> {
struct Runner<T: PdClient, S: Snapshot> {
pd_client: Arc<T>,
store_addrs: HashMap<u64, StoreAddr>,
state: Arc<Mutex<GlobalReplicationState>>,
router: Option<RaftRouter<E::Snapshot>>,
router: Option<RaftRouter<S>>,
}

impl<T: PdClient, E: KvEngine> Runner<T, E> {
impl<T: PdClient, S: Snapshot> Runner<T, S> {
fn resolve(&mut self, store_id: u64) -> Result<String> {
if let Some(s) = self.store_addrs.get(&store_id) {
let now = Instant::now();
Expand Down Expand Up @@ -106,7 +106,7 @@ impl<T: PdClient, E: KvEngine> Runner<T, E> {
}
}

impl<T: PdClient, E: KvEngine> Runnable<Task> for Runner<T, E> {
impl<T: PdClient, S: Snapshot> Runnable<Task> for Runner<T, S> {
fn run(&mut self, task: Task) {
let store_id = task.store_id;
let resp = self.resolve(store_id);
Expand All @@ -127,21 +127,21 @@ impl PdStoreAddrResolver {
}

/// Creates a new `PdStoreAddrResolver`.
pub fn new_resolver<T, E>(
pub fn new_resolver<T, S>(
pd_client: Arc<T>,
router: RaftRouter<E::Snapshot>,
router: RaftRouter<S>,
) -> Result<(
Worker<Task>,
PdStoreAddrResolver,
Arc<Mutex<GlobalReplicationState>>,
)>
where
T: PdClient + 'static,
E: KvEngine,
S: Snapshot,
{
let mut worker = Worker::new("addr-resolver");
let state = Arc::new(Mutex::new(GlobalReplicationState::default()));
let runner = Runner::<_, E> {
let runner = Runner {
pd_client,
store_addrs: HashMap::default(),
state: state.clone(),
Expand Down Expand Up @@ -170,7 +170,7 @@ mod tests {
use std::thread;
use std::time::{Duration, Instant};

use engine_rocks::RocksEngine;
use engine_rocks::RocksSnapshot;
use kvproto::metapb;
use pd_client::{PdClient, Result};
use tikv_util::collections::HashMap;
Expand Down Expand Up @@ -201,7 +201,7 @@ mod tests {
store
}

fn new_runner(store: metapb::Store) -> Runner<MockPdClient, RocksEngine> {
fn new_runner(store: metapb::Store) -> Runner<MockPdClient, RocksSnapshot> {
let client = MockPdClient {
start: Instant::now(),
store,
Expand Down

0 comments on commit 0367db3

Please sign in to comment.