Skip to content

Commit

Permalink
move test_unsafe_recovery_create_destroy_reentrancy to integrations
Browse files Browse the repository at this point in the history
Signed-off-by: Neil Shen <overvenus@gmail.com>
  • Loading branch information
overvenus committed Jul 26, 2023
1 parent 4ef1d85 commit 8381b45
Show file tree
Hide file tree
Showing 2 changed files with 88 additions and 88 deletions.
88 changes: 0 additions & 88 deletions tests/failpoints/cases/test_unsafe_recovery.rs
Original file line number Diff line number Diff line change
Expand Up @@ -363,91 +363,3 @@ fn test_unsafe_recovery_demotion_reentrancy() {
assert_eq!(demoted, true);
fail::remove("on_handle_apply_store_1");
}

#[test_case(test_raftstore::new_node_cluster)]
#[test_case(test_raftstore_v2::new_node_cluster)]
fn test_unsafe_recovery_create_destroy_reentrancy() {
let mut cluster = new_cluster(0, 3);
cluster.run();
let nodes = Vec::from_iter(cluster.get_node_ids());
assert_eq!(nodes.len(), 3);

let pd_client = Arc::clone(&cluster.pd_client);
pd_client.disable_default_operator();
let region = block_on(pd_client.get_region_by_id(1)).unwrap().unwrap();

// Makes the leadership definite.
let store2_peer = find_peer(&region, nodes[1]).unwrap().to_owned();
cluster.must_transfer_leader(region.get_id(), store2_peer);
cluster.put(b"random_key1", b"random_val1").unwrap();

// Split the region into 2, and remove one of them, so that we can test both
// region peer list update and region creation.
pd_client.must_split_region(
region,
pdpb::CheckPolicy::Usekey,
vec![b"random_key1".to_vec()],
);
let region1 = pd_client.get_region(b"random_key".as_ref()).unwrap();
let region2 = pd_client.get_region(b"random_key1".as_ref()).unwrap();
let region1_store0_peer = find_peer(&region1, nodes[0]).unwrap().to_owned();
pd_client.must_remove_peer(region1.get_id(), region1_store0_peer);
cluster.must_remove_region(nodes[0], region1.get_id());

// Makes the group lose its quorum.
cluster.stop_node(nodes[1]);
cluster.stop_node(nodes[2]);
{
let put = new_put_cmd(b"k2", b"v2");
let req = new_request(
region2.get_id(),
region2.get_region_epoch().clone(),
vec![put],
true,
);
// marjority is lost, can't propose command successfully.
cluster
.call_command_on_leader(req, Duration::from_millis(10))
.unwrap_err();
}

cluster.must_enter_force_leader(region2.get_id(), nodes[0], vec![nodes[1], nodes[2]]);

// Construct recovery plan.
let mut plan = pdpb::RecoveryPlan::default();

let mut create = metapb::Region::default();
create.set_id(101);
create.set_end_key(b"random_key1".to_vec());
let mut peer = metapb::Peer::default();
peer.set_id(102);
peer.set_store_id(nodes[0]);
create.mut_peers().push(peer);
plan.mut_creates().push(create);

plan.mut_tombstones().push(region2.get_id());

pd_client.must_set_unsafe_recovery_plan(nodes[0], plan.clone());
cluster.must_send_store_heartbeat(nodes[0]);
sleep_ms(100);
pd_client.must_set_unsafe_recovery_plan(nodes[0], plan.clone());
cluster.must_send_store_heartbeat(nodes[0]);

// Store reports are sent once the entries are applied.
let mut store_report = None;
for _ in 0..20 {
store_report = pd_client.must_get_store_report(nodes[0]);
if store_report.is_some() {
break;
}
sleep_ms(100);
}
assert_ne!(store_report, None);
let report = store_report.unwrap();
let peer_reports = report.get_peer_reports();
assert_eq!(peer_reports.len(), 1);
let reported_region = peer_reports[0].get_region_state().get_region();
assert_eq!(reported_region.get_id(), 101);
assert_eq!(reported_region.get_peers().len(), 1);
assert_eq!(reported_region.get_peers()[0].get_id(), 102);
}
88 changes: 88 additions & 0 deletions tests/integrations/raftstore/test_unsafe_recovery.rs
Original file line number Diff line number Diff line change
Expand Up @@ -484,6 +484,94 @@ fn test_unsafe_recovery_create_region_reentrancy() {
assert_eq!(created, true);
}

#[test_case(test_raftstore::new_node_cluster)]
#[test_case(test_raftstore_v2::new_node_cluster)]
fn test_unsafe_recovery_create_destroy_reentrancy() {
let mut cluster = new_cluster(0, 3);
cluster.run();
let nodes = Vec::from_iter(cluster.get_node_ids());
assert_eq!(nodes.len(), 3);

let pd_client = Arc::clone(&cluster.pd_client);
pd_client.disable_default_operator();
let region = block_on(pd_client.get_region_by_id(1)).unwrap().unwrap();

// Makes the leadership definite.
let store2_peer = find_peer(&region, nodes[1]).unwrap().to_owned();
cluster.must_transfer_leader(region.get_id(), store2_peer);
cluster.put(b"random_key1", b"random_val1").unwrap();

// Split the region into 2, and remove one of them, so that we can test both
// region peer list update and region creation.
pd_client.must_split_region(
region,
pdpb::CheckPolicy::Usekey,
vec![b"random_key1".to_vec()],
);
let region1 = pd_client.get_region(b"random_key".as_ref()).unwrap();
let region2 = pd_client.get_region(b"random_key1".as_ref()).unwrap();
let region1_store0_peer = find_peer(&region1, nodes[0]).unwrap().to_owned();
pd_client.must_remove_peer(region1.get_id(), region1_store0_peer);
cluster.must_remove_region(nodes[0], region1.get_id());

// Makes the group lose its quorum.
cluster.stop_node(nodes[1]);
cluster.stop_node(nodes[2]);
{
let put = new_put_cmd(b"k2", b"v2");
let req = new_request(
region2.get_id(),
region2.get_region_epoch().clone(),
vec![put],
true,
);
// marjority is lost, can't propose command successfully.
cluster
.call_command_on_leader(req, Duration::from_millis(10))
.unwrap_err();
}

cluster.must_enter_force_leader(region2.get_id(), nodes[0], vec![nodes[1], nodes[2]]);

// Construct recovery plan.
let mut plan = pdpb::RecoveryPlan::default();

let mut create = metapb::Region::default();
create.set_id(101);
create.set_end_key(b"random_key1".to_vec());
let mut peer = metapb::Peer::default();
peer.set_id(102);
peer.set_store_id(nodes[0]);
create.mut_peers().push(peer);
plan.mut_creates().push(create);

plan.mut_tombstones().push(region2.get_id());

pd_client.must_set_unsafe_recovery_plan(nodes[0], plan.clone());
cluster.must_send_store_heartbeat(nodes[0]);
sleep_ms(100);
pd_client.must_set_unsafe_recovery_plan(nodes[0], plan.clone());
cluster.must_send_store_heartbeat(nodes[0]);

// Store reports are sent once the entries are applied.
let mut store_report = None;
for _ in 0..20 {
store_report = pd_client.must_get_store_report(nodes[0]);
if store_report.is_some() {
break;
}
sleep_ms(100);
}
assert_ne!(store_report, None);
let report = store_report.unwrap();
let peer_reports = report.get_peer_reports();
assert_eq!(peer_reports.len(), 1);
let reported_region = peer_reports[0].get_region_state().get_region();
assert_eq!(reported_region.get_id(), 101);
assert_eq!(reported_region.get_peers().len(), 1);
assert_eq!(reported_region.get_peers()[0].get_id(), 102);
}

macro_rules! must_get_error_recovery_in_progress {
($cluster:expr, $region:expr, $cmd:expr) => {
let req = new_request(
Expand Down

0 comments on commit 8381b45

Please sign in to comment.