-
Notifications
You must be signed in to change notification settings - Fork 292
CA-287343: Update HA failure tolerance plan for corosync/GFS2 and add… #3560
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
edwintorok
merged 1 commit into
xapi-project:master
from
thomasmck:private/thomasmc/max_failures
Apr 17, 2018
Merged
Changes from all commits
Commits
File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -58,6 +58,7 @@ type pool = { | |
master: host; | ||
slaves: host list; | ||
ha_host_failures_to_tolerate: int64; | ||
cluster: int; | ||
} | ||
|
||
let string_of_vm {memory; name_label} = | ||
|
@@ -68,12 +69,13 @@ let string_of_host {memory_total; name_label; vms} = | |
memory_total name_label | ||
(Test_printers.list string_of_vm vms) | ||
|
||
let string_of_pool {master; slaves; ha_host_failures_to_tolerate} = | ||
let string_of_pool {master; slaves; ha_host_failures_to_tolerate; cluster} = | ||
Printf.sprintf | ||
"{master = %s; slaves = %s; ha_host_failures_to_tolerate = %Ld}" | ||
"{master = %s; slaves = %s; ha_host_failures_to_tolerate = %Ld; cluster = %d}" | ||
(string_of_host master) | ||
(Test_printers.list string_of_host slaves) | ||
ha_host_failures_to_tolerate | ||
cluster | ||
|
||
let load_vm ~__context ~(vm:vm) ~local_sr ~shared_sr ~local_net ~shared_net = | ||
let vm_ref = make_vm ~__context | ||
|
@@ -120,10 +122,9 @@ let load_host ~__context ~host ~local_sr ~shared_sr ~local_net ~shared_net = | |
in | ||
host_ref | ||
|
||
let setup ~__context {master; slaves; ha_host_failures_to_tolerate} = | ||
let setup ~__context {master; slaves; ha_host_failures_to_tolerate; cluster} = | ||
let shared_sr = make_sr ~__context ~shared:true () in | ||
let shared_net = make_network ~__context ~bridge:"xenbr0" () in | ||
|
||
(* Remove all hosts added by make_test_database *) | ||
List.iter (fun host -> Db.Host.destroy ~__context ~self:host) (Db.Host.get_all ~__context); | ||
|
||
|
@@ -149,6 +150,13 @@ let setup ~__context {master; slaves; ha_host_failures_to_tolerate} = | |
let master_ref = load_host_and_local_resources master in | ||
let (_ : API.ref_host list) = List.map load_host_and_local_resources slaves in | ||
|
||
if cluster > 0 then | ||
Test_common.make_cluster_and_cluster_host ~__context () |> ignore; | ||
for i = 0 to (cluster - 1) do | ||
let host = List.nth (Db.Host.get_all ~__context) i in | ||
Test_common.make_cluster_host ~__context ~host () |> ignore; | ||
done; | ||
|
||
let pool = Db.Pool.get_all ~__context |> List.hd in | ||
|
||
Db.Pool.set_master ~__context ~self:pool ~value:master_ref; | ||
|
@@ -181,6 +189,7 @@ module AllProtectedVms = Generic.Make(Generic.EncapsulateState(struct | |
master = {memory_total = gib 256L; name_label = "master"; vms = []}; | ||
slaves = []; | ||
ha_host_failures_to_tolerate = 0L; | ||
cluster = 0; | ||
}, | ||
[]; | ||
(* One unprotected VM. *) | ||
|
@@ -194,6 +203,7 @@ module AllProtectedVms = Generic.Make(Generic.EncapsulateState(struct | |
}; | ||
slaves = []; | ||
ha_host_failures_to_tolerate = 0L; | ||
cluster = 0; | ||
}, | ||
[]; | ||
(* One VM which would be protected if it was running. *) | ||
|
@@ -204,6 +214,7 @@ module AllProtectedVms = Generic.Make(Generic.EncapsulateState(struct | |
}; | ||
slaves = []; | ||
ha_host_failures_to_tolerate = 0L; | ||
cluster = 0; | ||
}, | ||
[]; | ||
(* One protected VM. *) | ||
|
@@ -214,6 +225,7 @@ module AllProtectedVms = Generic.Make(Generic.EncapsulateState(struct | |
}; | ||
slaves = []; | ||
ha_host_failures_to_tolerate = 0L; | ||
cluster = 0; | ||
}, | ||
["vm"]; | ||
(* One protected VM and one unprotected VM. *) | ||
|
@@ -231,6 +243,7 @@ module AllProtectedVms = Generic.Make(Generic.EncapsulateState(struct | |
}; | ||
slaves = []; | ||
ha_host_failures_to_tolerate = 0L; | ||
cluster = 0; | ||
}, | ||
["vm1"]; | ||
] | ||
|
@@ -270,6 +283,7 @@ module PlanForNFailures = Generic.Make(Generic.EncapsulateState(struct | |
{memory_total = gib 256L; name_label = "slave"; vms = []} | ||
]; | ||
ha_host_failures_to_tolerate = 1L; | ||
cluster = 0; | ||
}, | ||
Xapi_ha_vm_failover.Plan_exists_for_all_VMs | ||
); | ||
|
@@ -288,6 +302,7 @@ module PlanForNFailures = Generic.Make(Generic.EncapsulateState(struct | |
{memory_total = gib 256L; name_label = "slave"; vms = []} | ||
]; | ||
ha_host_failures_to_tolerate = 1L; | ||
cluster = 0; | ||
}, | ||
Xapi_ha_vm_failover.Plan_exists_for_all_VMs | ||
); | ||
|
@@ -311,6 +326,7 @@ module PlanForNFailures = Generic.Make(Generic.EncapsulateState(struct | |
{memory_total = gib 256L; name_label = "slave"; vms = []} | ||
]; | ||
ha_host_failures_to_tolerate = 1L; | ||
cluster = 0; | ||
}, | ||
Xapi_ha_vm_failover.Plan_exists_for_all_VMs | ||
); | ||
|
@@ -346,6 +362,7 @@ module PlanForNFailures = Generic.Make(Generic.EncapsulateState(struct | |
} | ||
]; | ||
ha_host_failures_to_tolerate = 1L; | ||
cluster = 0; | ||
}, | ||
Xapi_ha_vm_failover.No_plan_exists | ||
); | ||
|
@@ -415,6 +432,7 @@ module AssertNewVMPreservesHAPlan = Generic.Make(Generic.EncapsulateState(struct | |
{memory_total = gib 256L; name_label = "slave"; vms = []} | ||
]; | ||
ha_host_failures_to_tolerate = 1L; | ||
cluster = 0; | ||
}, | ||
{basic_vm with | ||
ha_always_run = false; | ||
|
@@ -445,6 +463,7 @@ module AssertNewVMPreservesHAPlan = Generic.Make(Generic.EncapsulateState(struct | |
{memory_total = gib 256L; name_label = "slave"; vms = []} | ||
]; | ||
ha_host_failures_to_tolerate = 1L; | ||
cluster = 0; | ||
}, | ||
{basic_vm with | ||
ha_always_run = false; | ||
|
@@ -483,6 +502,7 @@ module AssertNewVMPreservesHAPlan = Generic.Make(Generic.EncapsulateState(struct | |
}; | ||
]; | ||
ha_host_failures_to_tolerate = 1L; | ||
cluster = 0; | ||
}, | ||
{basic_vm with | ||
ha_always_run = false; | ||
|
@@ -495,11 +515,78 @@ module AssertNewVMPreservesHAPlan = Generic.Make(Generic.EncapsulateState(struct | |
] | ||
end)) | ||
|
||
module ComputeMaxFailures = Generic.Make(Generic.EncapsulateState(struct | ||
module Io = struct | ||
open Xapi_ha_vm_failover | ||
|
||
type input_t = pool | ||
type output_t = int | ||
|
||
let string_of_input_t = string_of_pool | ||
let string_of_output_t = string_of_int | ||
end | ||
|
||
module State = Test_state.XapiDb | ||
|
||
let load_input __context = setup ~__context | ||
|
||
let extract_output __context pool = | ||
let max_hosts = Xapi_ha_vm_failover.compute_max_host_failures_to_tolerate ~__context () in | ||
(* Struct requires input_t but not used here *) | ||
pool |> ignore; | ||
Int64.to_int max_hosts | ||
|
||
let tests = [ | ||
(* Three host pool with no VMs. *) | ||
( | ||
{ | ||
master = {memory_total = gib 256L; name_label = "master"; vms = []}; | ||
slaves = [ | ||
{memory_total = gib 256L; name_label = "slave1"; vms = []}; | ||
{memory_total = gib 256L; name_label = "slave2"; vms = []} | ||
]; | ||
(* Placeholder value that is overridden when we call the compute function *) | ||
ha_host_failures_to_tolerate = 3L; | ||
cluster = 3; | ||
}, | ||
(* Assert that compute ha host failures to tolerate returns 1 *) | ||
1 | ||
); | ||
(* Two hosts pool with no VMs *) | ||
( | ||
{ | ||
master = {memory_total = gib 256L; name_label = "master"; vms = []}; | ||
slaves = [ | ||
{memory_total = gib 256L; name_label = "slave1"; vms = []} | ||
]; | ||
ha_host_failures_to_tolerate = 2L; | ||
cluster = 2; | ||
}, | ||
(* Assert that compute ha host failures to tolerate returns 0 *) | ||
0 | ||
); | ||
(* Two host pool with one down *) | ||
( | ||
{ | ||
master = {memory_total = gib 256L; name_label = "master"; vms = []}; | ||
slaves = [ | ||
{memory_total = gib 256L; name_label = "slave1"; vms = []} | ||
]; | ||
ha_host_failures_to_tolerate = 2L; | ||
cluster = 1; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Updated so cluster now dictates how many hosts to enable clustering on |
||
}, | ||
(* Assert that compute ha host failures to tolerate returns 1 *) | ||
1 | ||
); | ||
] | ||
end)) | ||
|
||
let test = | ||
"test_ha_vm_failover" >::: | ||
[ | ||
"test_all_protected_vms" >::: AllProtectedVms.tests; | ||
"test_plan_for_n_failures" >::: PlanForNFailures.tests; | ||
"test_assert_new_vm_preserves_ha_plan" >::: | ||
AssertNewVMPreservesHAPlan.tests; | ||
"test_corosync_max_host_failures" >::: ComputeMaxFailures.tests; | ||
] |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
If your pool only has 2 hosts how can you loose them all? I think you need some actual VMs for the tests to make sense, indeed if you have no VMs you might as well turn the whole pool off, but thats not a realistic scenario.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
See comment above. This number is inserted into the database as part of the set up but is over-ridden when we call the compute function. For 2 hosts currently expecting to tolerate 0 host failures as per the expected result three lines below.