From 4b5a62660c5baeb543239cdcb6aec203fb2fcce9 Mon Sep 17 00:00:00 2001 From: Konstantina Chremmou Date: Fri, 7 Apr 2017 16:23:29 +0100 Subject: [PATCH 01/59] Language correction. Signed-off-by: Konstantina Chremmou --- ocaml/idl/ocaml_backend/gen_db_actions.ml | 2 +- ocaml/xapi/import.ml | 4 ++-- ocaml/xapi/importexport.ml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ocaml/idl/ocaml_backend/gen_db_actions.ml b/ocaml/idl/ocaml_backend/gen_db_actions.ml index 8fcfec87a3e..4d52508a979 100644 --- a/ocaml/idl/ocaml_backend/gen_db_actions.ml +++ b/ocaml/idl/ocaml_backend/gen_db_actions.ml @@ -448,7 +448,7 @@ let db_action api : O.Module.t = (** Generate a signature for the Server.Make functor. It should have one field per member in the user-facing API (not the special full 'DB api') which has no custom action. The signature will be smaller than the - db_actions signature but the db_actions module will be compatable with it *) + db_actions signature but the db_actions module will be compatible with it *) let make_db_defaults_api = Dm_api.filter (fun _ -> true) (fun _ -> true) (fun x -> not(Gen_empty_custom.operation_requires_side_effect x)) diff --git a/ocaml/xapi/import.ml b/ocaml/xapi/import.ml index 50cafc5fdc4..65b0deacec9 100644 --- a/ocaml/xapi/import.ml +++ b/ocaml/xapi/import.ml @@ -1553,7 +1553,7 @@ let metadata_handler (req: Request.t) s _ = Tar_unix.Archive.skip s (Tar_unix.Header.length * 2); let header = header_of_xmlrpc metadata in - assert_compatable ~__context header.version; + assert_compatible ~__context header.version; if full_restore then assert_can_restore_backup ~__context rpc session_id header; with_error_handling (fun () -> @@ -1597,7 +1597,7 @@ let stream_import __context rpc session_id s content_length refresh_session conf else begin debug "importing new style VM"; let header = header_of_xmlrpc metadata in - assert_compatable ~__context header.version; + assert_compatible ~__context header.version; if config.full_restore then assert_can_restore_backup ~__context rpc session_id header; (* objects created here: *) diff --git a/ocaml/xapi/importexport.ml b/ocaml/xapi/importexport.ml index 6d1076329f0..b8701e9b4d7 100644 --- a/ocaml/xapi/importexport.ml +++ b/ocaml/xapi/importexport.ml @@ -79,7 +79,7 @@ let this_version __context = (** Raises an exception if a prospective import cannot be handled by this code. This will get complicated over time... *) -let assert_compatable ~__context other_version = +let assert_compatible ~__context other_version = let this_version = this_version __context in let error() = error "Import version is incompatible"; From b471ed5f11228e0e356a2249478a3fefb38f25fc Mon Sep 17 00:00:00 2001 From: Konstantina Chremmou Date: Fri, 7 Apr 2017 16:28:01 +0100 Subject: [PATCH 02/59] CA-249786: removed build number comparison from pool join rules. Signed-off-by: Konstantina Chremmou --- ocaml/xapi/xapi_pool.ml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ocaml/xapi/xapi_pool.ml b/ocaml/xapi/xapi_pool.ml index 6b3c20525e5..2f5054d9438 100644 --- a/ocaml/xapi/xapi_pool.ml +++ b/ocaml/xapi/xapi_pool.ml @@ -245,21 +245,21 @@ let pre_join_checks ~__context ~rpc ~session_id ~force = let master_ref = get_master rpc session_id in let master = Client.Host.get_record ~rpc ~session_id ~self:master_ref in - (* Check software version *) + (* Check software version, but as of CA-249786 don't check the build number*) let get_software_version_fields fields = let open Xapi_globs in begin try List.assoc _platform_version fields with _ -> "" end, begin match get_compatibility_name fields with Some x -> x | None -> "" end, - begin try List.assoc _build_number fields with _ -> "" end, begin try List.assoc _git_id fields with _ -> "" end, begin try if List.mem_assoc linux_pack_vsn_key fields then "installed" else "not present" with _ -> "not present" end in - let print_software_version (version,name,number,id,linux_pack) = - debug "version:%s, name:%s, build:%s, id:%s, linux_pack:%s" version name number id linux_pack in + + let print_software_version (version,name,id,linux_pack) = + debug "version:%s, name:%s, id:%s, linux_pack:%s" version name id linux_pack in let master_software_version = master.API.host_software_version in let my_software_version = Db.Host.get_software_version ~__context ~self:me in From 6af6adf0adbad44eca1c6bbf90f490e741922f94 Mon Sep 17 00:00:00 2001 From: Jon Ludlam Date: Tue, 11 Apr 2017 18:05:26 +0100 Subject: [PATCH 03/59] CA-250143: Stat the mirror _before_ removing it Signed-off-by: Jon Ludlam --- ocaml/xapi/xapi_vm_migrate.ml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ocaml/xapi/xapi_vm_migrate.ml b/ocaml/xapi/xapi_vm_migrate.ml index d65ee9dc3eb..7db8156cc2a 100644 --- a/ocaml/xapi/xapi_vm_migrate.ml +++ b/ocaml/xapi/xapi_vm_migrate.ml @@ -619,8 +619,8 @@ let vdi_copy_fun __context dbg vdi_map remote is_intra_pool remote_vdis so_far t match mirror_id with | Some mid -> ignore(Storage_access.unregister_mirror mid); - (try SMAPI.DATA.MIRROR.stop ~dbg ~id:mid with _ -> ()); let m = SMAPI.DATA.MIRROR.stat ~dbg ~id:mid in + (try SMAPI.DATA.MIRROR.stop ~dbg ~id:mid with _ -> ()); m.Mirror.failed | None -> false in if mirror_failed then raise (Api_errors.Server_error(Api_errors.mirror_failed,[Ref.string_of vconf.vdi])) From d71e1f6b5338fcf265903ebeb3b37bab0a2c6709 Mon Sep 17 00:00:00 2001 From: Gabor Igloi Date: Wed, 12 Apr 2017 10:56:12 +0100 Subject: [PATCH 04/59] Makefile: add setup.ml prerequisite To targets that need it and don't already depend on it transitively. Now these targets, such as "make all", will work with a clean repo without generated OASIS files. Signed-off-by: Gabor Igloi --- Makefile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 51c94ffb8c1..d32926b3efc 100644 --- a/Makefile +++ b/Makefile @@ -17,7 +17,7 @@ doc: setup.data build test: setup.data build $(SETUP) -test $(TESTFLAGS) -all: +all: setup.ml $(SETUP) -all $(ALLFLAGS) uninstall: setup.data @@ -26,10 +26,10 @@ uninstall: setup.data reinstall: setup.data $(SETUP) -reinstall $(REINSTALLFLAGS) -clean: +clean: setup.ml $(SETUP) -clean $(CLEANFLAGS) -distclean: +distclean: setup.ml $(SETUP) -distclean $(DISTCLEANFLAGS) setup.data: setup.ml @@ -38,7 +38,7 @@ setup.data: setup.ml setup.ml: _oasis oasis setup -setup-update dynamic -configure: +configure: setup.ml $(SETUP) -configure $(CONFIGUREFLAGS) .PHONY: build doc test all install uninstall reinstall clean distclean configure From a2891f4a54cb073f69ec356074e50ce4872ff854 Mon Sep 17 00:00:00 2001 From: Gabor Igloi Date: Tue, 28 Mar 2017 11:01:17 +0100 Subject: [PATCH 05/59] CA-242706: call update_getty at xapi startup During xapi startup, the on_dom0_networking_change function does not recognize the IP address change, as it gets set in the database at an earlier startup phase. This commit adds an unconditional call to the update_getty function during startup. Signed-off-by: Gabor Igloi --- ocaml/xapi/helpers.ml | 9 +++++++++ ocaml/xapi/xapi.ml | 3 ++- ocaml/xapi/xapi_mgmt_iface.ml | 13 ++----------- 3 files changed, 13 insertions(+), 12 deletions(-) diff --git a/ocaml/xapi/helpers.ml b/ocaml/xapi/helpers.ml index cd3cd2a9681..348adf086a3 100644 --- a/ocaml/xapi/helpers.ml +++ b/ocaml/xapi/helpers.ml @@ -185,6 +185,15 @@ let update_pif_address ~__context ~self = with _ -> debug "Bridge %s is not up; not updating IP" bridge +let update_getty () = + (* Running update-issue service on best effort basis *) + try + ignore (Forkhelpers.execute_command_get_output !Xapi_globs.update_issue_script []); + ignore (Forkhelpers.execute_command_get_output !Xapi_globs.kill_process_script ["-q"; "-HUP"; "mingetty"; "agetty"]) + with e -> + debug "update_getty at %s caught exception: %s" + __LOC__ (Printexc.to_string e) + let set_gateway ~__context ~pif ~bridge = let dbg = Context.string_of_task __context in try diff --git a/ocaml/xapi/xapi.ml b/ocaml/xapi/xapi.ml index 1784ae06c92..b2f529b0062 100644 --- a/ocaml/xapi/xapi.ml +++ b/ocaml/xapi/xapi.ml @@ -300,7 +300,8 @@ let bring_up_management_if ~__context () = warn "Failed to acquire a management IP address" end; (* Start the Host Internal Management Network, if needed. *) - Xapi_network.check_himn ~__context + Xapi_network.check_himn ~__context; + Helpers.update_getty () with e -> debug "Caught exception bringing up management interface: %s" (ExnHelper.string_of_exn e) diff --git a/ocaml/xapi/xapi_mgmt_iface.ml b/ocaml/xapi/xapi_mgmt_iface.ml index 9f268173f0e..1529dd04113 100644 --- a/ocaml/xapi/xapi_mgmt_iface.ml +++ b/ocaml/xapi/xapi_mgmt_iface.ml @@ -167,15 +167,6 @@ let wait_for_management_ip ~__context = done; end); !ip -let update_getty () = - (* Running update-issue service on best effort basis *) - try - ignore (Forkhelpers.execute_command_get_output !Xapi_globs.update_issue_script []); - ignore (Forkhelpers.execute_command_get_output !Xapi_globs.kill_process_script ["-q"; "-HUP"; "mingetty"; "agetty"]) - with e -> - debug "update_getty at %s caught exception: %s" - __LOC__ (Printexc.to_string e) - let on_dom0_networking_change ~__context = debug "Checking to see if hostname or management IP has changed"; (* Need to update: @@ -198,13 +189,13 @@ let on_dom0_networking_change ~__context = debug "Changing Host.address in database to: %s" ip; Db.Host.set_address ~__context ~self:localhost ~value:ip; debug "Refreshing console URIs"; - update_getty (); + Helpers.update_getty (); Dbsync_master.refresh_console_urls ~__context end | None -> if Db.Host.get_address ~__context ~self:localhost <> "" then begin debug "Changing Host.address in database to: '' (host has no management IP address)"; - update_getty (); + Helpers.update_getty (); Db.Host.set_address ~__context ~self:localhost ~value:"" end end; From e6b6414a41545117ada1b1586fdfadedad849e6f Mon Sep 17 00:00:00 2001 From: Taoyong Ding Date: Thu, 13 Apr 2017 00:17:18 -0700 Subject: [PATCH 06/59] CA-250376: Add protocol option in firewall-port script Signed-off-by: Taoyong Ding --- scripts/plugins/firewall-port | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/scripts/plugins/firewall-port b/scripts/plugins/firewall-port index 20b527b7575..fbd14fc2c9f 100644 --- a/scripts/plugins/firewall-port +++ b/scripts/plugins/firewall-port @@ -5,17 +5,19 @@ set -e ################################################# -# Use this script to open/close port. +# Use this script to open/close port with specified +# protocol. # # Usage: -# ./firewall-port {open|close} port +# ./firewall-port {open|close} port protocol # ################################################# OP="$1" PORT="$2" +PROTOCOL="${3:-tcp}" CHAIN="xapi-INPUT" -RULE="-p tcp -m conntrack --ctstate NEW -m tcp --dport $PORT -j ACCEPT" +RULE="-p $PROTOCOL -m conntrack --ctstate NEW -m $PROTOCOL --dport $PORT -j ACCEPT" case "${OP}" in open) @@ -38,7 +40,7 @@ case "${OP}" in fi ;; *) - echo $"Usage: $0 {open|close} {port}" 1>&2 + echo $"Usage: $0 {open|close} {port} {protocol}" 1>&2 exit 1 ;; esac From f58b9f38fb225834976ee5c9a38e90c7c18dfa4d Mon Sep 17 00:00:00 2001 From: Jon Ludlam Date: Thu, 13 Apr 2017 15:15:12 +0100 Subject: [PATCH 07/59] CA-171948: Make add_to_map DB calls idempotent Signed-off-by: Jon Ludlam --- ocaml/database/db_cache_types.ml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ocaml/database/db_cache_types.ml b/ocaml/database/db_cache_types.ml index e42402336ca..ab111fd2eb2 100644 --- a/ocaml/database/db_cache_types.ml +++ b/ocaml/database/db_cache_types.ml @@ -352,8 +352,8 @@ let remove_from_set key t = exception Duplicate let add_to_map key value t = let t = Schema.Value.Unsafe_cast.pairs t in - if List.mem key (List.map fst t) then raise Duplicate; - Schema.Value.Pairs ((key, value) :: t) + if List.mem_assoc key t && List.assoc key t <> value then raise Duplicate; + Schema.Value.Pairs ((key, value) :: List.filter (fun (k, _) -> k <> key) t) let remove_from_map key t = let t = Schema.Value.Unsafe_cast.pairs t in From 5f754cbae0eab41d28be6a8ab107f9b216c98c73 Mon Sep 17 00:00:00 2001 From: Jon Ludlam Date: Thu, 13 Apr 2017 21:58:20 +0100 Subject: [PATCH 08/59] CA-171948: Add a parameter to allow the db add_to_map to be idempotent Signed-off-by: Jon Ludlam --- ocaml/database/db_cache_impl.ml | 2 +- ocaml/database/db_cache_types.ml | 4 ++-- ocaml/database/db_cache_types.mli | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ocaml/database/db_cache_impl.ml b/ocaml/database/db_cache_impl.ml index 39e2e048f24..d8ca76456aa 100644 --- a/ocaml/database/db_cache_impl.ml +++ b/ocaml/database/db_cache_impl.ml @@ -257,7 +257,7 @@ let process_structured_field_locked t (key,value) tblname fld objref proc_fn_sel | AddMap -> begin try - add_to_map key value existing_str + add_to_map false key value existing_str with Duplicate -> error "Duplicate key in set or map: table %s; field %s; ref %s; key %s" tblname fld objref key; raise (Duplicate_key (tblname,fld,objref,key)); diff --git a/ocaml/database/db_cache_types.ml b/ocaml/database/db_cache_types.ml index ab111fd2eb2..4a0adf01571 100644 --- a/ocaml/database/db_cache_types.ml +++ b/ocaml/database/db_cache_types.ml @@ -350,9 +350,9 @@ let remove_from_set key t = Schema.Value.Set (List.filter (fun x -> x <> key) t) exception Duplicate -let add_to_map key value t = +let add_to_map idempotent key value t = let t = Schema.Value.Unsafe_cast.pairs t in - if List.mem_assoc key t && List.assoc key t <> value then raise Duplicate; + if List.mem_assoc key t && (not idempotent || List.assoc key t <> value) then raise Duplicate; Schema.Value.Pairs ((key, value) :: List.filter (fun (k, _) -> k <> key) t) let remove_from_map key t = diff --git a/ocaml/database/db_cache_types.mli b/ocaml/database/db_cache_types.mli index 028fa177893..0f0d80c486c 100644 --- a/ocaml/database/db_cache_types.mli +++ b/ocaml/database/db_cache_types.mli @@ -146,7 +146,7 @@ end exception Duplicate val add_to_set : string -> Schema.Value.t -> Schema.Value.t val remove_from_set : string -> Schema.Value.t -> Schema.Value.t -val add_to_map : string -> string -> Schema.Value.t -> Schema.Value.t +val add_to_map : bool -> string -> string -> Schema.Value.t -> Schema.Value.t val remove_from_map : string -> Schema.Value.t -> Schema.Value.t val set_field : string -> string -> string -> Schema.Value.t -> Database.t -> Database.t From d5a8900b8293142aa4c2f7d467e59d7cbabdc6e8 Mon Sep 17 00:00:00 2001 From: Jon Ludlam Date: Fri, 14 Apr 2017 12:18:28 +0100 Subject: [PATCH 09/59] CA-171948: Reinstate non-idempotency, with a switch * Xapi code must assume idempotency of 'add_to_' calls * Remote DB calls will _always_ be idempotent * DB calls on the master are controlled by the switch, defaulting to the original behaviour (non idempotent) * API calls follow the master DB behaviour Signed-off-by: Jon Ludlam --- ocaml/database/db_cache_impl.ml | 14 +++++++++-- ocaml/database/db_cache_types.ml | 1 + ocaml/database/db_cache_types.mli | 1 + ocaml/database/db_globs.ml | 3 +++ ocaml/database/db_rpc_common_v1.ml | 5 ++-- ocaml/database/db_rpc_common_v2.ml | 10 ++++++++ ocaml/idl/ocaml_backend/gen_db_actions.ml | 3 +-- ocaml/xapi/test_db_lowlevel.ml | 30 +++++++++++++++++++++++ ocaml/xapi/xapi_globs.ml | 4 +++ 9 files changed, 65 insertions(+), 6 deletions(-) diff --git a/ocaml/database/db_cache_impl.ml b/ocaml/database/db_cache_impl.ml index d8ca76456aa..32774e0156f 100644 --- a/ocaml/database/db_cache_impl.ml +++ b/ocaml/database/db_cache_impl.ml @@ -254,14 +254,24 @@ let process_structured_field_locked t (key,value) tblname fld objref proc_fn_sel let newval = match proc_fn_selector with | AddSet -> add_to_set key existing_str | RemoveSet -> remove_from_set key existing_str - | AddMap -> + | AddMap | AddMapLegacy -> begin try - add_to_map false key value existing_str + (* We use the idempotent map add if we're using the non-legacy + process function, or if the global field 'idempotent_map' has + been set. By default, the Db calls on the master use the + legacy functions, but those on the slave use the new one. + This means xapi code should always assume idempotent_map is + true *) + let idempotent = + (proc_fn_selector = AddMap) || !Db_globs.idempotent_map + in + add_to_map idempotent key value existing_str with Duplicate -> error "Duplicate key in set or map: table %s; field %s; ref %s; key %s" tblname fld objref key; raise (Duplicate_key (tblname,fld,objref,key)); end + | RemoveMap -> remove_from_map key existing_str in write_field_locked t tblname objref fld newval with Not_found -> diff --git a/ocaml/database/db_cache_types.ml b/ocaml/database/db_cache_types.ml index 4a0adf01571..0110ef15f65 100644 --- a/ocaml/database/db_cache_types.ml +++ b/ocaml/database/db_cache_types.ml @@ -500,4 +500,5 @@ type structured_op_t = | RemoveSet | AddMap | RemoveMap + | AddMapLegacy [@@deriving rpc] diff --git a/ocaml/database/db_cache_types.mli b/ocaml/database/db_cache_types.mli index 0f0d80c486c..4c81dcd7d1f 100644 --- a/ocaml/database/db_cache_types.mli +++ b/ocaml/database/db_cache_types.mli @@ -169,5 +169,6 @@ type structured_op_t = | RemoveSet | AddMap | RemoveMap + | AddMapLegacy val structured_op_t_of_rpc: Rpc.t -> structured_op_t val rpc_of_structured_op_t: structured_op_t -> Rpc.t diff --git a/ocaml/database/db_globs.ml b/ocaml/database/db_globs.ml index 50e15c3743d..e99fc110ebf 100644 --- a/ocaml/database/db_globs.ml +++ b/ocaml/database/db_globs.ml @@ -50,6 +50,9 @@ let static_vdis_dir = ref "/etc/xensource/static-vdis" (* Note the following has an equivalent in the xapi layer *) let http_limit_max_rpc_size = 300 * 1024 (* 300K *) +(* add_to_map is idempotent *) +let idempotent_map = ref false + (** Dynamic configurations to be read whenever xapi (re)start *) let permanent_master_failure_retry_interval = ref 60. diff --git a/ocaml/database/db_rpc_common_v1.ml b/ocaml/database/db_rpc_common_v1.ml index ce649775009..2d21b7494fb 100644 --- a/ocaml/database/db_rpc_common_v1.ml +++ b/ocaml/database/db_rpc_common_v1.ml @@ -79,7 +79,9 @@ let marshall_structured_op x = AddSet -> "addset" | RemoveSet -> "removeset" | AddMap -> "addmap" - | RemoveMap -> "removemap" in + | RemoveMap -> "removemap" + | AddMapLegacy -> "addmap" (* Nb, we always use 'non-legacy' mode for remote access *) + in XMLRPC.To.string str let unmarshall_structured_op xml = match (XMLRPC.From.string xml) with @@ -311,4 +313,3 @@ let unmarshall_read_records_where_response xml = [ref_xml; rec_xml] -> (XMLRPC.From.string ref_xml, unmarshall_read_record_response rec_xml) | _ -> raise DB_remote_marshall_error) xml_refs_and_recs_list - diff --git a/ocaml/database/db_rpc_common_v2.ml b/ocaml/database/db_rpc_common_v2.ml index cfd6b3c38be..aab47604b2c 100644 --- a/ocaml/database/db_rpc_common_v2.ml +++ b/ocaml/database/db_rpc_common_v2.ml @@ -34,6 +34,16 @@ module Request = struct | Read_records_where of string * Db_filter_types.expr | Process_structured_field of (string * string) * string * string * string * Db_cache_types.structured_op_t [@@deriving rpc] + + (* Make sure the slave only ever uses the idempotent version *) + let rpc_of_t t = + let t' = + match t with + | Process_structured_field (a,b,c,d,Db_cache_types.AddMapLegacy) -> + Process_structured_field (a,b,c,d,Db_cache_types.AddMap) + | x -> x + in + rpc_of_t t' end module Response = struct diff --git a/ocaml/idl/ocaml_backend/gen_db_actions.ml b/ocaml/idl/ocaml_backend/gen_db_actions.ml index 4d52508a979..f9242f244c6 100644 --- a/ocaml/idl/ocaml_backend/gen_db_actions.ml +++ b/ocaml/idl/ocaml_backend/gen_db_actions.ml @@ -312,7 +312,7 @@ let db_action api : O.Module.t = (Escaping.escape_id full_name) Client._self | FromField(Add, { DT.ty = DT.Map(_, _); full_name = full_name }) -> - Printf.sprintf "DB.process_structured_field __t (%s,%s) \"%s\" \"%s\" %s AddMap" + Printf.sprintf "DB.process_structured_field __t (%s,%s) \"%s\" \"%s\" %s AddMapLegacy" Client._key Client._value (Escaping.escape_obj obj.DT.name) (Escaping.escape_id full_name) @@ -472,4 +472,3 @@ let db_defaults api : O.Signature.t = { O.Signature.name = _db_defaults; elements = List.map (fun x -> O.Signature.Module (obj x)) (Dm_api.objects_of_api api) } - diff --git a/ocaml/xapi/test_db_lowlevel.ml b/ocaml/xapi/test_db_lowlevel.ml index 25e7592343e..c37bbecc49f 100644 --- a/ocaml/xapi/test_db_lowlevel.ml +++ b/ocaml/xapi/test_db_lowlevel.ml @@ -41,8 +41,38 @@ let test_db_get_all_records_race () = let tear_down () = Db_cache_impl.fist_delay_read_records_where := false +let test_idempotent_map () = + Db_globs.idempotent_map := false; + let __context = make_test_database () in + let (vm_ref: API.ref_VM) = make_vm ~__context () in + Db.VM.add_to_other_config ~__context ~self:vm_ref ~key:"test" ~value:"value"; + assert_raises (Db_exn.Duplicate_key ("VM","other_config",(Ref.string_of vm_ref),"test")) + (fun () -> Db.VM.add_to_other_config ~__context ~self:vm_ref ~key:"test" ~value:"value"); + assert_raises (Db_exn.Duplicate_key ("VM","other_config",(Ref.string_of vm_ref),"test")) + (fun () -> Db.VM.add_to_other_config ~__context ~self:vm_ref ~key:"test" ~value:"value2"); + + Db_globs.idempotent_map := true; + let __context = make_test_database () in + let (vm_ref: API.ref_VM) = make_vm ~__context () in + Db.VM.add_to_other_config ~__context ~self:vm_ref ~key:"test" ~value:"value"; + assert_equal (Db.VM.add_to_other_config ~__context ~self:vm_ref ~key:"test" ~value:"value") (); + assert_raises (Db_exn.Duplicate_key ("VM","other_config",(Ref.string_of vm_ref),"test")) + (fun () -> Db.VM.add_to_other_config ~__context ~self:vm_ref ~key:"test" ~value:"value2"); + + Db_globs.idempotent_map := false + +let test_slave_uses_nonlegacy_addmap () = + let operation = Db_cache_types.AddMapLegacy in + let operation' = Db_rpc_common_v1.marshall_structured_op operation |> Db_rpc_common_v1.unmarshall_structured_op in + assert_equal operation' Db_cache_types.AddMap; + let operationv2 = Db_rpc_common_v2.Request.Process_structured_field (("",""),"","","",Db_cache_types.AddMapLegacy) in + let operationv2' = Db_rpc_common_v2.Request.(operationv2 |> rpc_of_t |> t_of_rpc) in + assert_equal operationv2' (Db_rpc_common_v2.Request.Process_structured_field (("",""),"","","",Db_cache_types.AddMap)) + let test = "test_db_lowlevel" >::: [ "test_db_get_all_records_race" >:: (bracket id test_db_get_all_records_race tear_down); + "test_db_idempotent_map" >:: test_idempotent_map; + "test_slaves_use_nonlegacy_addmap" >:: test_slave_uses_nonlegacy_addmap; ] diff --git a/ocaml/xapi/xapi_globs.ml b/ocaml/xapi/xapi_globs.ml index c3712a4393a..dd975ba82ee 100644 --- a/ocaml/xapi/xapi_globs.ml +++ b/ocaml/xapi/xapi_globs.ml @@ -980,6 +980,10 @@ let other_options = [ "modprobe_path", Arg.Set_string modprobe_path, (fun () -> !modprobe_path), "Location of the modprobe(8) command: should match $(which modprobe)"; + + "db_idempotent_map", Arg.Set Db_globs.idempotent_map, + (fun () -> string_of_bool !Db_globs.idempotent_map), "True if the add_to_ API calls should be idempotent"; + ] let all_options = options_of_xapi_globs_spec @ other_options From 6f28f54b03fd30903251b79fe02800ee9b06a15a Mon Sep 17 00:00:00 2001 From: Jon Ludlam Date: Tue, 18 Apr 2017 16:45:16 +0100 Subject: [PATCH 10/59] CA-171948: Add a label for the boolean argument of idempotent Suggested by @lindig in PR#3001 Signed-off-by: Jon Ludlam --- ocaml/database/db_cache_impl.ml | 2 +- ocaml/database/db_cache_types.ml | 2 +- ocaml/database/db_cache_types.mli | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ocaml/database/db_cache_impl.ml b/ocaml/database/db_cache_impl.ml index 32774e0156f..702e5111db4 100644 --- a/ocaml/database/db_cache_impl.ml +++ b/ocaml/database/db_cache_impl.ml @@ -266,7 +266,7 @@ let process_structured_field_locked t (key,value) tblname fld objref proc_fn_sel let idempotent = (proc_fn_selector = AddMap) || !Db_globs.idempotent_map in - add_to_map idempotent key value existing_str + add_to_map ~idempotent key value existing_str with Duplicate -> error "Duplicate key in set or map: table %s; field %s; ref %s; key %s" tblname fld objref key; raise (Duplicate_key (tblname,fld,objref,key)); diff --git a/ocaml/database/db_cache_types.ml b/ocaml/database/db_cache_types.ml index 0110ef15f65..ab3b37fe427 100644 --- a/ocaml/database/db_cache_types.ml +++ b/ocaml/database/db_cache_types.ml @@ -350,7 +350,7 @@ let remove_from_set key t = Schema.Value.Set (List.filter (fun x -> x <> key) t) exception Duplicate -let add_to_map idempotent key value t = +let add_to_map ~idempotent key value t = let t = Schema.Value.Unsafe_cast.pairs t in if List.mem_assoc key t && (not idempotent || List.assoc key t <> value) then raise Duplicate; Schema.Value.Pairs ((key, value) :: List.filter (fun (k, _) -> k <> key) t) diff --git a/ocaml/database/db_cache_types.mli b/ocaml/database/db_cache_types.mli index 4c81dcd7d1f..a96849ab010 100644 --- a/ocaml/database/db_cache_types.mli +++ b/ocaml/database/db_cache_types.mli @@ -146,7 +146,7 @@ end exception Duplicate val add_to_set : string -> Schema.Value.t -> Schema.Value.t val remove_from_set : string -> Schema.Value.t -> Schema.Value.t -val add_to_map : bool -> string -> string -> Schema.Value.t -> Schema.Value.t +val add_to_map : idempotent:bool -> string -> string -> Schema.Value.t -> Schema.Value.t val remove_from_map : string -> Schema.Value.t -> Schema.Value.t val set_field : string -> string -> string -> Schema.Value.t -> Database.t -> Database.t From 13f7987ce5dee4bd94b9a9b44beace82e2807f75 Mon Sep 17 00:00:00 2001 From: Jon Ludlam Date: Thu, 20 Apr 2017 17:14:56 +0100 Subject: [PATCH 11/59] CA-249662: Pool_patch handler: If an SR is not specified, use default_SR Signed-off-by: Jon Ludlam --- ocaml/xapi/xapi_pool_patch.ml | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/ocaml/xapi/xapi_pool_patch.ml b/ocaml/xapi/xapi_pool_patch.ml index 80c35c5ec7b..8cd76cb688d 100644 --- a/ocaml/xapi/xapi_pool_patch.ml +++ b/ocaml/xapi/xapi_pool_patch.ml @@ -52,9 +52,21 @@ let pool_patch_upload_handler (req: Http.Request.t) s _ = is to avoid our task being prematurely marked as completed by the import_raw_vdi handler. *) let strip = List.filter (fun (k,v) -> k <> "task_id") in + let add_sr query = + match Importexport.sr_of_req ~__context req with + | Some _ -> query (* There was already an SR specified *) + | None -> + let pool = Db.Pool.get_all ~__context |> List.hd in + let default_SR = Db.Pool.get_default_SR ~__context ~self:pool in + ("sr_id",Ref.string_of default_SR)::query + in let subtask = Client.Task.create rpc session_id "VDI upload" "" in Stdext.Pervasiveext.finally (fun () -> - let req = Http.Request.{req with cookie = strip req.cookie; query = ("task_id",Ref.string_of subtask) :: strip req.query} in + let req = Http.Request.{ + req with + cookie = strip req.cookie; + query = ("task_id",Ref.string_of subtask) :: strip req.query |> add_sr; + } in let vdi_opt = Import_raw_vdi.localhost_handler rpc session_id (Importexport.vdi_of_req ~__context req) req s in match vdi_opt with | Some vdi -> From 56a55a8d1eb92bcad3bb8b016cff5807c2eff80c Mon Sep 17 00:00:00 2001 From: Cheng Zhang Date: Thu, 20 Apr 2017 09:30:46 +0100 Subject: [PATCH 12/59] CA-250757: Refresh software version after update been applied Signed-off-by: Cheng Zhang --- ocaml/xapi/xapi_pool_update.ml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ocaml/xapi/xapi_pool_update.ml b/ocaml/xapi/xapi_pool_update.ml index 4aaabf47358..69df4501e80 100644 --- a/ocaml/xapi/xapi_pool_update.ml +++ b/ocaml/xapi/xapi_pool_update.ml @@ -468,7 +468,8 @@ let resync_host ~__context ~host = let pool_patch_ref = Xapi_pool_patch.pool_patch_of_update ~__context update_ref in Xapi_pool_patch.write_patch_applied_db ~__context ~self:pool_patch_ref ~host () ) update_refs; - Create_misc.create_updates_requiring_reboot_info ~__context ~host + Create_misc.create_updates_requiring_reboot_info ~__context ~host; + Create_misc.create_software_version ~__context end else Db.Host.set_updates ~__context ~self:host ~value:[]; From 30a7392cdceacb847f38b83fc123ac311289fa32 Mon Sep 17 00:00:00 2001 From: Taoyong Ding Date: Fri, 21 Apr 2017 03:29:19 -0700 Subject: [PATCH 13/59] CA-250748: MTU on pif does not always sync to xapi db Fix when pif-plug is called, the mtu is not sync to xapi db. Signed-off-by: Taoyong Ding --- ocaml/xapi/nm.ml | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/ocaml/xapi/nm.ml b/ocaml/xapi/nm.ml index d02c4373ed1..9d95e946781 100644 --- a/ocaml/xapi/nm.ml +++ b/ocaml/xapi/nm.ml @@ -462,18 +462,17 @@ let bring_pif_up ~__context ?(management_interface=false) (pif: API.ref_PIF) = let master = Db.Bond.get_master ~__context ~self:bond in Db.PIF.set_currently_attached ~__context ~self:master ~value:false end; - - (* sync MTU *) - (try - let mtu = Int64.of_int (Net.Interface.get_mtu dbg ~name:bridge) in - Db.PIF.set_MTU ~__context ~self:pif ~value:mtu - with _ -> - debug "could not update MTU field on PIF %s" rc.API.pIF_uuid - ); - Xapi_mgmt_iface.on_dom0_networking_change ~__context - end - ) + end; + + (* sync MTU *) + try + let mtu = Int64.of_int (Net.Interface.get_mtu dbg ~name:bridge) in + if mtu <> rc.API.pIF_MTU then + Db.PIF.set_MTU ~__context ~self:pif ~value:mtu + with _ -> + warn "could not update MTU field on PIF %s" rc.API.pIF_uuid + ) let bring_pif_down ~__context ?(force=false) (pif: API.ref_PIF) = with_local_lock (fun () -> From db21beeae0d91e9befc3472bcdbe5e3c9bb4cf9f Mon Sep 17 00:00:00 2001 From: Cheng Zhang Date: Wed, 26 Apr 2017 02:13:11 +0000 Subject: [PATCH 14/59] CA-251251: Use /var/update/applied/uuid mtime for patch apply time Signed-off-by: Cheng Zhang --- ocaml/xapi/xapi_pool_update.ml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ocaml/xapi/xapi_pool_update.ml b/ocaml/xapi/xapi_pool_update.ml index 69df4501e80..90835f835d9 100644 --- a/ocaml/xapi/xapi_pool_update.ml +++ b/ocaml/xapi/xapi_pool_update.ml @@ -466,7 +466,9 @@ let resync_host ~__context ~host = List.iter (fun update_ref -> let pool_patch_ref = Xapi_pool_patch.pool_patch_of_update ~__context update_ref in - Xapi_pool_patch.write_patch_applied_db ~__context ~self:pool_patch_ref ~host () + let uuid = Db.Pool_update.get_uuid ~__context ~self:update_ref in + let mtime = (Unix.stat (Filename.concat update_applied_dir uuid)).Unix.st_mtime in + Xapi_pool_patch.write_patch_applied_db ~__context ~date:mtime ~self:pool_patch_ref ~host () ) update_refs; Create_misc.create_updates_requiring_reboot_info ~__context ~host; Create_misc.create_software_version ~__context From 1a4a7cd7f988fb3cfabb1d2abdf7f99821b79928 Mon Sep 17 00:00:00 2001 From: Yang Qian Date: Thu, 27 Apr 2017 21:54:56 +0800 Subject: [PATCH 15/59] CA-250858: Fix potential bug in `wlb_reports.ml` when WLB health check report from WLB response was cut 1. Refactor parser code. Replace hand-written state machine with OCaml XML parser 2. Refine the code of `send`, since XML parser has escape the content, there is no need to manually escape the content in `send` Signed-off-by: Yang Qian --- ocaml/xapi/wlb_reports.ml | 144 ++++++++++---------------------------- 1 file changed, 37 insertions(+), 107 deletions(-) diff --git a/ocaml/xapi/wlb_reports.ml b/ocaml/xapi/wlb_reports.ml index b97331c1008..719ca2d5d62 100644 --- a/ocaml/xapi/wlb_reports.ml +++ b/ocaml/xapi/wlb_reports.ml @@ -98,8 +98,8 @@ open Stdext.Xstringext module D = Debug.Make(struct let name="wlb_reports" end) open D -let report_tokens = ("", "") -let diagnostics_tokens = ("", "") +let report_tag = "XmlDataSet" +let diagnostics_tag = "DiagnosticData" let bufsize = 16384 @@ -107,13 +107,8 @@ let hex_entity s = (*debug "hex_entity %s" s; *) char_of_int (int_of_string ("0" ^ (String.sub s 1 (String.length s - 1)))) -let trim_and_send method_name (start_str, end_str) recv_sock send_sock = +let trim_and_send method_name tag recv_sock send_sock = let recv_buf = Buffer.create bufsize in - let send_buf = Buffer.create bufsize in - let recv_state = ref 1 in - let send_state = ref 1 in - let entity = ref "" in - let fill () = let s = String.create bufsize in let n = Unix.read recv_sock s 0 bufsize in @@ -121,106 +116,41 @@ let trim_and_send method_name (start_str, end_str) recv_sock send_sock = Buffer.add_string recv_buf (String.sub s 0 n); n in - + (* Since we use xml parser to parse the reponse message, we don't need to escape the xml content in `send` *) let send s = - let s_len = String.length s in - let rec send' i = - let c = s.[i] in - (* debug "%c" c; *) - if !send_state = 1 then - begin - if c = '&' then - send_state := 2 - else - Buffer.add_char send_buf c - end - else - begin - if c = ';' then - let e = !entity in - Buffer.add_char send_buf - (if e = "lt" then - '<' - else if e = "gt" then - '>' - else if e = "amp" then - '&' - else if e = "apos" then - '\'' - else if e = "quot" then - '"' - else - hex_entity e); - send_state := 1; - entity := "" - else - entity := !entity ^ (String.of_char c) - end; - if i < s_len - 1 then - send' (i + 1) - else - () - in - send' 0; - ignore (Unix.write send_sock (Buffer.contents send_buf) 0 - (Buffer.length send_buf)); - Buffer.clear send_buf + ignore (Unix.write send_sock s 0 (String.length s)) in - - let rec pump () = + let rec recv_all ()= let n = fill() in - if Buffer.length recv_buf > 0 then - begin - let s = Buffer.contents recv_buf in - (* debug "%s %d" s !recv_state; *) - if !recv_state = 1 then - match String.find_all start_str s with - | n :: _ -> - Buffer.clear recv_buf; - let i = n + String.length start_str in - Buffer.add_substring recv_buf s i (String.length s - i); - recv_state := 2 - | [] -> - () - else if !recv_state = 2 then - match String.find_all end_str s with - | n :: _ -> - send (String.sub s 0 n); - Buffer.clear recv_buf; - recv_state := 3 - | [] -> - send s; - Buffer.clear recv_buf - else - Buffer.clear recv_buf; - if n > 0 then - pump() - else if !recv_state != 3 then - (* if in state 1 we are still looking for the opening tag of the data set, expect xml to be valid - if in state 2 we are still looking for the closing tag of the data set, expect xml to be truncated *) - let rec_data = (Buffer.contents recv_buf) in - if !recv_state = 1 then - begin - try - let xml_data = Xml.parse_string rec_data in - Workload_balancing.parse_result_code - method_name - (Workload_balancing.retrieve_inner_xml method_name xml_data true) - "Failed to detect end of XML, data could be truncated" - rec_data - true - with - | Xml.Error err -> - Workload_balancing.raise_malformed_response' method_name (Xml.error err) rec_data - end - else - Workload_balancing.raise_malformed_response' method_name "Expected data is truncated." rec_data - end + if n > 0 then + recv_all() + else + () in - pump() - - -let handle req bio method_name tokens (method_name, request_func) = + recv_all(); + let s = Buffer.contents recv_buf in + debug "receive len: %d, content: %s" (String.length s) s; + try + let xml_data = Xml.parse_string s in + let report_result_xml = Workload_balancing.retrieve_inner_xml method_name xml_data true in + try + let xml_data_set_content = Workload_balancing.data_from_leaf (Workload_balancing.descend_and_match [tag] report_result_xml) in + debug "send conent: %s" xml_data_set_content; + send xml_data_set_content + with + | Workload_balancing.Xml_parse_failure error -> + Workload_balancing.parse_result_code + method_name + report_result_xml + "Failed to detect end of XML, data could be truncated" + s + true + with + | Xml.Error err -> + Workload_balancing.raise_malformed_response' method_name "Expected data is truncated." s + + +let handle req bio method_name tag (method_name, request_func) = let client_sock = Buf_io.fd_of bio in Buf_io.assert_buffer_empty bio; debug "handle: fd = %d" (Stdext.Unixext.int_of_file_descr client_sock); @@ -236,7 +166,7 @@ let handle req bio method_name tokens (method_name, request_func) = let parse response wlb_sock = Http_svr.headers client_sock (Http.http_200_ok ()); - trim_and_send method_name tokens wlb_sock client_sock + trim_and_send method_name tag wlb_sock client_sock in try request_func ~__context ~handler:parse @@ -267,11 +197,11 @@ let report_handler (req: Request.t) (bio: Buf_io.t) _ = not (List.mem k ["session_id"; "task_id"; "report"])) req.Request.query in - handle req bio "ExecuteReport" report_tokens + handle req bio "ExecuteReport" report_tag (Workload_balancing.wlb_report_request report params) (* GET /wlb_diagnostics?session_id=&task_id= *) let diagnostics_handler (req: Request.t) (bio: Buf_io.t) _ = - handle req bio "GetDiagnostics" diagnostics_tokens + handle req bio "GetDiagnostics" diagnostics_tag Workload_balancing.wlb_diagnostics_request From 119f0fa56d5ede0a78202abe86d323ce70506dc3 Mon Sep 17 00:00:00 2001 From: Andrew Cooper Date: Fri, 28 Apr 2017 15:23:30 +0100 Subject: [PATCH 16/59] End the temporary yum.conf file with a newline Signed-off-by: Andrew Cooper --- ocaml/xapi/xapi_pool_update.ml | 1 + 1 file changed, 1 insertion(+) diff --git a/ocaml/xapi/xapi_pool_update.ml b/ocaml/xapi/xapi_pool_update.ml index 90835f835d9..72aa7aff7d6 100644 --- a/ocaml/xapi/xapi_pool_update.ml +++ b/ocaml/xapi/xapi_pool_update.ml @@ -190,6 +190,7 @@ let create_yum_config ~__context ~self ~url = ; Printf.sprintf "name=%s" name_label ; Printf.sprintf "baseurl=%s" url ; if signed then Printf.sprintf ("gpgkey=file:///etc/pki/rpm-gpg/%s") key else "" + ; "" (* Newline at the end of the file *) ] let attach_helper ~__context ~uuid ~vdi = From b2dbcfb632fbc3b40a314d65128f4a9cc4816697 Mon Sep 17 00:00:00 2001 From: Gabor Igloi Date: Wed, 3 May 2017 14:01:27 +0100 Subject: [PATCH 17/59] Remove old SM backends Signed-off-by: Gabor Igloi --- ocaml/xapi/sm-back-file | 137 -------------------------------------- ocaml/xapi/sm-back-lvm | 144 ---------------------------------------- 2 files changed, 281 deletions(-) delete mode 100755 ocaml/xapi/sm-back-file delete mode 100644 ocaml/xapi/sm-back-lvm diff --git a/ocaml/xapi/sm-back-file b/ocaml/xapi/sm-back-file deleted file mode 100755 index 3dc87f304c8..00000000000 --- a/ocaml/xapi/sm-back-file +++ /dev/null @@ -1,137 +0,0 @@ -#!/bin/sh -# Copyright (c) 2006 XenSource Inc. -# Author: Vincent Hanquez -# -# storage manager backend: qcow operations -# - -check_arg_ge() { - if [ "$1" -lt "$2" ]; then exit 3; fi; -} - -check_arg_eq() { - if [ "$1" -ne "$2" ]; then exit 3; fi; -} - -sr_create() { - sruuid=$1 - shift -} - -sr_delete() { - sruuid=$1 - exit 2 -} - -sr_attach() { - sruuid=$1 - mkdir -p "/SR-${sruuid}" - mkdir -p "/SR-${sruuid}/images" -} - -sr_detach() { - sruuid=$1 - rm -rf "/SR-${sruuid}" -} - -vdi_create() { - sruuid=$1 - vdiuuid=$2 - size="$3k" - srname="/var/xensource/SR-${sruuid}" - vdiname="vdi-${vdiuuid}" - - # FIXME: count is wrong here - dd if=/dev/zero of=${srname}/${vdiname} count=${size} -} - -vdi_delete() { - sruuid=$1 - vdiuuid=$2 - srname="/var/xensource/SR-${sruuid}" - vdiname="vdi-${vdiuuid}" - rm ${srname}/${vdiname} -} - -vdi_attach() { - sruuid=$1 - vdiuuid=$2 - srname="/var/xensource/SR-${sruuid}" - vdiname="vdi-${vdiuuid}" - - ln -f -s "${srname}/${vdiname}" "/SR-${sruuid}/images/${vdiuuid}" -} - -vdi_detach() { - sruuid=$1 - vdiuuid=$2 - - rm -f "/SR-${sruuid}/images/${vdiuuid}" -} - -vdi_clone() { - sruuid=$1 - vdiuuid=$2 - dvdiuuid=$3 - srname="/var/xensource/SR-${sruuid}" - vdiname="vdi-${vdiuuid}" - dvdiname="vdi-${dvdiuuid}" - - cp "${srname}/${vdiname}" "${srname}/${dvdiname}" -} - -vdi_resize() { - sruuid=$1 - vdiuuid=$2 - newsize=$3 - - exit 0 -} - -cmd=$1 -shift -case "$cmd" in -sr_create) - check_arg_ge $# 2 - sr_create $* - ;; -sr_delete) - check_arg_eq $# 1 - sr_delete $* - ;; -sr_attach) - check_arg_eq $# 1 - sr_attach $* - ;; -sr_detach) - check_arg_eq $# 1 - sr_detach $* - ;; -vdi_create) - check_arg_eq $# 3 - vdi_create $* - ;; -vdi_delete) - check_arg_eq $# 2 - vdi_delete $* - ;; -vdi_attach) - check_arg_eq $# 2 - vdi_attach $* - ;; -vdi_detach) - check_arg_eq $# 2 - vdi_detach $* - ;; -vdi_clone) - check_arg_eq $# 3 - vdi_clone $* - ;; -vdi_resize) - check_arg_eq $# 3 - vdi_resize $* - ;; -*) - exit 1 -esac -exit $? diff --git a/ocaml/xapi/sm-back-lvm b/ocaml/xapi/sm-back-lvm deleted file mode 100644 index f99ab42e7be..00000000000 --- a/ocaml/xapi/sm-back-lvm +++ /dev/null @@ -1,144 +0,0 @@ -#!/bin/sh -# Copyright (c) 2006 XenSource Inc. -# Author: Vincent Hanquez -# -# storage manager example backend: lvm operations -# - -check_arg_ge() { - if [ "$1" -lt "$2" ]; then exit 3; fi; -} - -check_arg_eq() { - if [ "$1" -ne "$2" ]; then exit 3; fi; -} - -sr_create() { - sruuid=$1 - vgname="VG_XenStorage-${sruuid}" - shift - - vgcreate ${vgname} $* - vgs --separator : --noheadings --units k ${vgname} | cut -f 5,6 -d: | \ - sed -e 's/:/ /' -} - -sr_delete() { - sruuid=$1 - exit 2 -} - -sr_attach() { - sruuid=$1 - mkdir -p "/SR-${sruuid}" - mkdir -p "/SR-${sruuid}/images" -} - -sr_detach() { - sruuid=$1 - rm -rf "/SR-${sruuid}" -} - -vdi_create() { - sruuid=$1 - vdiuuid=$2 - size="$3k" - vgname="VG_XenStorage-${sruuid}" - vdiname="LV-${vdiuuid}" - lvcreate -L${size} -n"${vdiname}" ${vgname} -} - -vdi_delete() { - sruuid=$1 - vdiuuid=$2 - vgname="VG_XenStorage-${sruuid}" - vdiname="LV-${vdiuuid}" - lvremove -f "/dev/${vgname}/${vdiname}" -} - -vdi_attach() { - sruuid=$1 - vdiuuid=$2 - - ln -f -s "/dev/VG_XenStorage-${sruuid}/LV-${vdiuuid}" \ - "/SR-${sruuid}/images/${vdiuuid}" -} - -vdi_detach() { - sruuid=$1 - vdiuuid=$2 - - rm -f "/SR-${sruuid}/images/${vdiuuid}" -} - -vdi_clone() { - sruuid=$1 - vdiuuid=$2 - dvdiuuid=$3 - vgname="VG_XenStorage-${sruuid}" - - size=$(lvs --separator : --noheadings --units k "${vgname}/LV-${vdiuuid}" \ - | cut -d: -f 3) - lvcreate -L${size} -n"LV-${dvdiuuid}" ${vgname} - if [ $? -ne 0 ]; then exit $?; fi - - dd if="/dev/${vgname}/LV-${vdiuuid}" of="/dev/${vgname}/LV-${dvdiuuid}" - if [ $? -ne 0 ]; then exit $?; fi -} - -vdi_resize() { - sruuid=$1 - vdiuuid=$2 - newsize=$3 - vgname="VG_XenStorage-${sruuid}" - - lvresize -L${newsize} "${vgname}/LV-${vdiuuid}" -} - -cmd=$1 -shift -case "$cmd" in -sr_create) - check_arg_ge $# 2 - sr_create $* - ;; -sr_delete) - check_arg_eq $# 1 - sr_delete $* - ;; -sr_attach) - check_arg_eq $# 1 - sr_attach $* - ;; -sr_detach) - check_arg_eq $# 1 - sr_detach $* - ;; -vdi_create) - check_arg_eq $# 3 - vdi_create $* - ;; -vdi_delete) - check_arg_eq $# 2 - vdi_delete $* - ;; -vdi_attach) - check_arg_eq $# 2 - vdi_attach $* - ;; -vdi_detach) - check_arg_eq $# 2 - vdi_detach $* - ;; -vdi_clone) - check_arg_eq $# 3 - vdi_clone $* - ;; -vdi_resize) - check_arg_eq $# 3 - vdi_resize $* - ;; -*) - exit 1 -esac -exit $? From 3c0595024693754dc5c359c7e023d2e28b79ff2b Mon Sep 17 00:00:00 2001 From: Gabor Igloi Date: Wed, 3 May 2017 14:07:15 +0100 Subject: [PATCH 18/59] Remove unused, obsolete storage_proxy.ml Storage_proxy has been obsoleted by commit b0dba67 Signed-off-by: Gabor Igloi --- ocaml/xapi/storage_proxy.ml | 112 ------------------------------------ 1 file changed, 112 deletions(-) delete mode 100644 ocaml/xapi/storage_proxy.ml diff --git a/ocaml/xapi/storage_proxy.ml b/ocaml/xapi/storage_proxy.ml deleted file mode 100644 index 6121f83f7e3..00000000000 --- a/ocaml/xapi/storage_proxy.ml +++ /dev/null @@ -1,112 +0,0 @@ -(* - * Copyright (C) 2011 Citrix Systems Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as published - * by the Free Software Foundation; version 2.1 only. with the special - * exception on linking described in file LICENSE. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Lesser General Public License for more details. - *) - -(* This file should be auto-generated from storage_interface. - Corrollary: don't add anything which can't be auto-generated from storage_interface! *) - -open Storage_interface - -module type RPC = sig - val rpc : Rpc.call -> Rpc.response -end - -module Proxy = functor(RPC: RPC) -> struct - type context = Smint.request - - module Client = Client(RPC) - - module Query = struct - let query _ = Client.Query.query - let diagnostics _ = Client.Query.diagnostics - end - module DP = struct - let create _ = Client.DP.create - let destroy _ = Client.DP.destroy - let diagnostics _ = Client.DP.diagnostics - let attach_info _ = Client.DP.attach_info - let stat_vdi _ = Client.DP.stat_vdi - end - module SR = struct - let probe _ = Client.SR.probe - let create _ = Client.SR.create - let set_name_label _ = Client.SR.set_name_label - let set_name_description _ = Client.SR.set_name_description - let attach _ = Client.SR.attach - let detach _ = Client.SR.detach - let reset _ = Client.SR.reset - let destroy _ = Client.SR.destroy - let scan _ = Client.SR.scan - let stat _ = Client.SR.stat - let list _ = Client.SR.list - let update_snapshot_info_src _ = Client.SR.update_snapshot_info_src - let update_snapshot_info_dest _ = Client.SR.update_snapshot_info_dest - end - module VDI = struct - let epoch_begin _ = Client.VDI.epoch_begin - let attach _ = Client.VDI.attach - let activate _ = Client.VDI.activate - let deactivate _ = Client.VDI.deactivate - let detach _ = Client.VDI.detach - let epoch_end _ = Client.VDI.epoch_end - - let create _ = Client.VDI.create - let set_name_label _ = Client.VDI.set_name_label - let set_name_description _ = Client.VDI.set_name_description - let snapshot _ = Client.VDI.snapshot - let clone _ = Client.VDI.clone - let destroy _ = Client.VDI.destroy - let resize _ = Client.VDI.resize - let stat _ = Client.VDI.stat - let introduce _ = Client.VDI.introduce - let set_persistent _ = Client.VDI.set_persistent - let get_by_name _ = Client.VDI.get_by_name - let set_content_id _ = Client.VDI.set_content_id - let similar_content _ = Client.VDI.similar_content - let compose _ = Client.VDI.compose - let add_to_sm_config _ = Client.VDI.add_to_sm_config - let remove_from_sm_config _ = Client.VDI.remove_from_sm_config - let get_url _ = Client.VDI.get_url - end - - let get_by_name _ = Client.get_by_name - - module Policy = struct - let get_backend_vm _ = Client.Policy.get_backend_vm - end - - module DATA = struct - let copy_into _ = Client.DATA.copy_into - let copy _ = Client.DATA.copy - module MIRROR = struct - let start _ = Client.DATA.MIRROR.start - let stop _ = Client.DATA.MIRROR.stop - let stat _ = Client.DATA.MIRROR.stat - let receive_start _ = Client.DATA.MIRROR.receive_start - let receive_finalize _ = Client.DATA.MIRROR.receive_finalize - let receive_cancel _ = Client.DATA.MIRROR.receive_cancel - let list _ = Client.DATA.MIRROR.list - end - end - - module TASK = struct - let stat _ = Client.TASK.stat - let cancel _ = Client.TASK.cancel - let destroy _ = Client.TASK.destroy - let list _ = Client.TASK.list - end - - module UPDATES = struct - let get _ = Client.UPDATES.get - end -end From 749e763a6aa0250ed5e5d464f6e8ff79922edbe6 Mon Sep 17 00:00:00 2001 From: Sharad Yadav Date: Fri, 28 Apr 2017 09:44:19 +0100 Subject: [PATCH 19/59] CA-249668: Add new API error `TLS_CONNECTION_FAILED` Add new API error `TLS_CONNECTION_FAILED` for TLS connection failure on the specified address and port. Signed-off-by: Sharad Yadav --- ocaml/idl/datamodel.ml | 3 +++ ocaml/xapi-consts/api_errors.ml | 1 + 2 files changed, 4 insertions(+) diff --git a/ocaml/idl/datamodel.ml b/ocaml/idl/datamodel.ml index 5ffa328b94a..34046ee9326 100644 --- a/ocaml/idl/datamodel.ml +++ b/ocaml/idl/datamodel.ml @@ -505,6 +505,9 @@ let _ = error Api_errors.cannot_contact_host ["host"] ~doc:"Cannot forward messages because the host cannot be contacted. The host may be switched off or there may be network connectivity problems." (); + error Api_errors.tls_connection_failed ["address"; "port"] + ~doc:"Cannot contact the other host using TLS on the specified address and port" (); + error Api_errors.uuid_invalid [ "type"; "uuid" ] ~doc:"The uuid you supplied was invalid." (); error Api_errors.object_nolonger_exists [] diff --git a/ocaml/xapi-consts/api_errors.ml b/ocaml/xapi-consts/api_errors.ml index 5639c7c86f4..f540b94a1bc 100644 --- a/ocaml/xapi-consts/api_errors.ml +++ b/ocaml/xapi-consts/api_errors.ml @@ -42,6 +42,7 @@ let session_invalid = "SESSION_INVALID" let change_password_rejected = "CHANGE_PASSWORD_REJECTED" let user_is_not_local_superuser = "USER_IS_NOT_LOCAL_SUPERUSER" let cannot_contact_host = "CANNOT_CONTACT_HOST" +let tls_connection_failed = "TLS_CONNECTION_FAILED" let not_supported_during_upgrade = "NOT_SUPPORTED_DURING_UPGRADE" let handle_invalid = "HANDLE_INVALID" From 96c21c5635fb86c28f82e02629a50d02092f6edf Mon Sep 17 00:00:00 2001 From: Sharad Yadav Date: Fri, 28 Apr 2017 11:28:40 +0100 Subject: [PATCH 20/59] CA-249668: Raise an API error `tls_connection_failed` on TLS connection failure. Copying VM metadata to remote Host can fail with internal error `Xmlrpc_client.Stunnel_connection_failed`, if there is a TLS connection failure on specified remote address. Raise an API error `tls_connection_failed` instead of internal error. Internally fixes SXM error handling: If VM migration performed on remote network which got TLS connection failure from source host, currently fails with internal error. It will allow SXM to fail with API error `tls_connection_failed`. Signed-off-by: Sharad Yadav --- ocaml/xapi/importexport.ml | 42 +++++++++++++++++++++----------------- 1 file changed, 23 insertions(+), 19 deletions(-) diff --git a/ocaml/xapi/importexport.ml b/ocaml/xapi/importexport.ml index b8701e9b4d7..fcdecac4449 100644 --- a/ocaml/xapi/importexport.ml +++ b/ocaml/xapi/importexport.ml @@ -257,27 +257,31 @@ let remote_metadata_export_import ~__context ~rpc ~session_id ~remote_address ~r ] ~keep_alive:false Http.Put remote_import_request in debug "Piping HTTP %s to %s" (Http.Request.to_string get) (Http.Request.to_string put); - with_transport (Unix Xapi_globs.unix_domain_socket) - (with_http get - (fun (r, ifd) -> - debug "Content-length: %s" (Stdext.Opt.default "None" (Stdext.Opt.map Int64.to_string r.Http.Response.content_length)); - let put = { put with Http.Request.content_length = r.Http.Response.content_length } in - debug "Connecting to %s:%d" remote_address !Xapi_globs.https_port; - (* Spawn a cached stunnel instance. Otherwise, once metadata tranmission completes, the connection - between local xapi and stunnel will be closed immediately, and the new spawned stunnel instance - will be revoked, this might cause the remote stunnel gets partial metadata xml file, and the - ripple effect is that remote xapi fails to parse metadata xml file. Using a cached stunnel can - not always avoid the problem since any cached stunnel entry might be evicted. However, it is - unlikely to happen in practice because the cache size is large enough.*) - with_transport (SSL (SSL.make ~use_stunnel_cache:true (), remote_address, !Xapi_globs.https_port)) - (with_http put - (fun (_, ofd) -> + begin try + with_transport (Unix Xapi_globs.unix_domain_socket) + (with_http get + (fun (r, ifd) -> + debug "Content-length: %s" (Stdext.Opt.default "None" (Stdext.Opt.map Int64.to_string r.Http.Response.content_length)); + let put = { put with Http.Request.content_length = r.Http.Response.content_length } in + debug "Connecting to %s:%d" remote_address !Xapi_globs.https_port; + (* Spawn a cached stunnel instance. Otherwise, once metadata tranmission completes, the connection + between local xapi and stunnel will be closed immediately, and the new spawned stunnel instance + will be revoked, this might cause the remote stunnel gets partial metadata xml file, and the + ripple effect is that remote xapi fails to parse metadata xml file. Using a cached stunnel can + not always avoid the problem since any cached stunnel entry might be evicted. However, it is + unlikely to happen in practice because the cache size is large enough.*) + with_transport (SSL (SSL.make ~use_stunnel_cache:true (), remote_address, !Xapi_globs.https_port)) + (with_http put + (fun (_, ofd) -> let (n: int64) = Stdext.Unixext.copy_file ?limit:r.Http.Response.content_length ifd ofd in debug "Written %Ld bytes" n - ) - ) - ) - ); + ) + ) + ) + ) + with Xmlrpc_client.Stunnel_connection_failed -> + raise (Api_errors.Server_error(Api_errors.tls_connection_failed, [remote_address; (string_of_int !Xapi_globs.https_port)])) + end; (* Wait for remote task to succeed or fail *) Cli_util.wait_for_task_completion rpc session_id remote_task; match Client.Task.get_status rpc session_id remote_task with From 9713bdc15934b99e400f6233fe15fffe519460c1 Mon Sep 17 00:00:00 2001 From: Gabor Igloi Date: Thu, 4 May 2017 11:30:33 +0100 Subject: [PATCH 21/59] Storage_access: fix task names to match called op Some of the task names did not match the names of the invoked SMAPIv2 operation. Signed-off-by: Gabor Igloi --- ocaml/xapi/storage_access.ml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ocaml/xapi/storage_access.ml b/ocaml/xapi/storage_access.ml index 13ee8fe11e9..6492f177191 100644 --- a/ocaml/xapi/storage_access.ml +++ b/ocaml/xapi/storage_access.ml @@ -179,7 +179,7 @@ module SMAPIv1 = struct String.sub queue (i + 1) (String.length queue -i - 1) with Not_found -> queue in - Server_helpers.exec_with_new_task "SR.create" ~subtask_of:(Ref.of_string dbg) + Server_helpers.exec_with_new_task "SR.probe" ~subtask_of:(Ref.of_string dbg) (fun __context -> let task = Context.get_task_id __context in Storage_interface.Raw (Sm.sr_probe (Some task,(Sm.sm_master true :: device_config)) _type sm_config) @@ -779,7 +779,7 @@ module SMAPIv1 = struct (fun __context -> (* This call 'operates' on vdi2 *) let vdi1 = find_vdi ~__context sr vdi1 |> fst in - for_vdi ~dbg ~sr ~vdi:vdi2 "VDI.activate" + for_vdi ~dbg ~sr ~vdi:vdi2 "VDI.compose" (fun device_config _type sr self -> Sm.vdi_compose device_config _type sr vdi1 self ) @@ -811,7 +811,7 @@ module SMAPIv1 = struct info "VDI.get_url dbg:%s sr:%s vdi:%s" dbg sr vdi; (* XXX: PR-1255: tapdisk shouldn't hardcode xapi urls *) (* peer_ip/session_ref/vdi_ref *) - Server_helpers.exec_with_new_task "VDI.compose" ~subtask_of:(Ref.of_string dbg) + Server_helpers.exec_with_new_task "VDI.get_url" ~subtask_of:(Ref.of_string dbg) (fun __context -> let ip = Helpers.get_management_ip_addr ~__context |> Opt.unbox in let rpc = Helpers.make_rpc ~__context in From 598cf23a39c2a4424a27c9cc145df590da0061bf Mon Sep 17 00:00:00 2001 From: Gabor Igloi Date: Fri, 5 May 2017 14:37:18 +0100 Subject: [PATCH 22/59] opam: update to xs-opam version Signed-off-by: Gabor Igloi --- opam | 44 +++++++++++++++++++++++++++++++------------- 1 file changed, 31 insertions(+), 13 deletions(-) diff --git a/opam b/opam index c20eafe352b..f6bdd8296a8 100644 --- a/opam +++ b/opam @@ -1,39 +1,57 @@ -opam-version: "1" -maintainer: "dave.scott@citrix.com" +opam-version: "1.2" +maintainer: "xen-api@lists.xen.org" +authors: [ "xen-api@lists.xen.org" ] +homepage: "https://github.com/xapi-project/xen-api" +bug-reports: "https://github.com/xapi-project/xen-api/issues" +dev-repo: "https://github.com/xapi-project/xen-api.git" build: [ - ["./configure" "--disable-warn-error" "--varpatchdir" "%{prefix}%/var/patch" "--optdir" "%{lib}%/xcp" "--plugindir" "%{lib}%/xcp/plugins" "--hooksdir" "%{prefix}%/etc/hook-scripts" "--xapiconf" "%{prefix}%/etc/xapi.conf" "--libexecdir" "%{lib}%/xcp/bin" "--scriptsdir" "%{lib}%/xcp/scripts" "--sharedir" "%{share}%/xcp" "--webdir" "%{share}%/xcp/web" "--cluster-stack-root" "%{lib}%/xcp/bin/cluster-stack" "--bindir" "%{bin}%" "--sbindir" "%{bin}%" "--etcdir" "%{prefix}%/etc"] + ["./configure"] [make] - ["install" "-m" "0755" "ocaml/xapi/xapi.opt" "%{bin}%/xapi"] ] -build-test: [make "test" ] -remove: ["rm" "%{bin}%/xapi"] +install: [ + ["oasis" "setup"] + ["ocaml" "setup.ml" "-install"] +] +build-test: [make "test"] +remove: [ + ["oasis" "setup"] + ["ocaml" "setup.ml" "-uninstall"] + ["ocamlfind" "remove" "xapi"] + ["ocamlfind" "remove" "xapi-client"] + ["ocamlfind" "remove" "xapi-cli-protocol"] + ["ocamlfind" "remove" "xapi-consts"] + ["ocamlfind" "remove" "xapi-datamodel"] + ["ocamlfind" "remove" "xapi-database"] + ["ocamlfind" "remove" "xapi-types"] +] depends: [ - "ocamlfind" + "oasis" {build} + "ocamlfind" {build} "xapi-test-utils" - "xapi-idl" {>= "0.12.2"} + "xapi-idl" "xapi-libs-transitional" "xen-api-client" "xapi-netdev" - "omake" "cdrom" "fd-send-recv" "xapi-forkexecd" - "libvhd" + "vhd-format" "nbd" "oclock" "ounit" "rpc" "ssl" - "xapi-stdext" {>= "0.13.0"} + "xapi-stdext" "xapi-tapctl" "xenctrl" "xenstore" "xapi-inventory" "tar-format" - "opasswd" {>= "0.9.3"} + "opasswd" "xapi-rrdd-plugin" - "pci" {>= "0.2.0"} + "pci" "sha" + "xapi-xenopsd" ] depexts: [ [["centos"] ["pam-devel"]] From 8a0a6661187a2eb604928be4dbe703a72c93c520 Mon Sep 17 00:00:00 2001 From: Gabor Igloi Date: Fri, 5 May 2017 14:25:26 +0100 Subject: [PATCH 23/59] Travis: Add OPAM build method and coverage Because our depext package names aren't compatible with older Ubuntus, we have to use a newer Ubuntu container for the OPAM build. Signed-off-by: Gabor Igloi --- .travis-opam-coverage.sh | 33 +++++++++++++++++++++++++++++++++ .travis-xenserver-build-env.sh | 19 +++++++++++++++++++ .travis.yml | 23 +++++++++-------------- 3 files changed, 61 insertions(+), 14 deletions(-) create mode 100644 .travis-opam-coverage.sh create mode 100644 .travis-xenserver-build-env.sh diff --git a/.travis-opam-coverage.sh b/.travis-opam-coverage.sh new file mode 100644 index 00000000000..52651fe0cd7 --- /dev/null +++ b/.travis-opam-coverage.sh @@ -0,0 +1,33 @@ +# SUMMARY: +# Builds & tests xapi with coverage in a Ubuntu 16.04 Docker container with +# OCaml 4.02.3, then uploads the coverage information to coveralls. + +set -uex + +# Currently there is no way of specifying OPAM depexts for multiple versions of +# a given disto, and our current depexts only work with Ubuntu >= 16.04, due to +# a change in packages (libsystemd-dev). Since the build environments of Travis +# are older then Ubuntu 16.04, we have to run the build in a Docker container +# with an appropriate Ubuntu version. +# We need to pass some Travis environment variables to the container to enable +# uploading to coveralls and detection of Travis CI. +docker run --rm --volume=$PWD:/mnt --workdir=/mnt \ + --env "TRAVIS=$TRAVIS" \ + --env "TRAVIS_JOB_ID=$TRAVIS_JOB_ID" \ + ocaml/opam:ubuntu-16.04_ocaml-4.02.3 \ + bash -uex -c ' +# replace the base remote with xs-opam +opam repository remove default +opam repository add xs-opam https://github.com/xapi-project/xs-opam.git + +# install the dependencies of xapi +opam pin add --no-action xapi . +opam depext --yes xapi +opam install --deps-only xapi + +# build and test xapi with coverage, then submit the coverage information to coveralls +sudo apt-get install --yes wget +wget https://raw.githubusercontent.com/simonjbeaumont/ocaml-travis-coveralls/master/travis-coveralls.sh +COV_CONF="./configure" bash -ex travis-coveralls.sh +' + diff --git a/.travis-xenserver-build-env.sh b/.travis-xenserver-build-env.sh new file mode 100644 index 00000000000..b9a9b94c97d --- /dev/null +++ b/.travis-xenserver-build-env.sh @@ -0,0 +1,19 @@ +# SUMMARY: +# Builds and tests xapi using xenserver-build-env, which installs the +# dependencies as RPMs. + +set -uex + +wget https://raw.githubusercontent.com/xenserver/xenserver-build-env/master/utils/travis-build-repo.sh + +# only run deploy.sh when the build succeeds +env \ + CONTAINER_NAME=build-env \ + OCAMLRUNPARAM=b \ + REPO_PACKAGE_NAME=xapi \ + REPO_CONFIGURE_CMD=./configure \ + REPO_BUILD_CMD=make \ + REPO_TEST_CMD='make test' \ + REPO_DOC_CMD='make doc-json' \ + bash travis-build-repo.sh && \ + ( ( test $TRAVIS_PULL_REQUEST == "false" && test $TRAVIS_BRANCH == "master" && bash deploy.sh ) || true ) diff --git a/.travis.yml b/.travis.yml index 29c35f819b2..197c54fcb6a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,22 +1,17 @@ language: c +sudo: required services: docker -install: - - wget https://raw.githubusercontent.com/xenserver/xenserver-build-env/master/utils/travis-build-repo.sh -script: bash travis-build-repo.sh -after_success: - - test $TRAVIS_PULL_REQUEST == "false" && test $TRAVIS_BRANCH == "master" && bash deploy.sh -sudo: true +script: bash ./.travis-$BUILD_METHOD.sh env: global: - - CONTAINER_NAME=build-env - - OCAMLRUNPARAM=b - - REPO_PACKAGE_NAME=xapi - - REPO_CONFIGURE_CMD=./configure - - REPO_BUILD_CMD=make - - REPO_TEST_CMD='make test' - - REPO_DOC_CMD='make doc-json' + # for BUILD_METHOD=xenserver-build-env - secure: tokxJl2litqu/T6UUwzkLRZzlbxnbYqVG2QRKKQz3tkIXyZHQWTS2NAyH7mwDgdBq2dDVSxAUxS1jWq/vGraX7MmbVz37Pz8wjykoIfIRtQuEx+REDAvAzWSw+1LTpUf7ZcI+F2SpgJrnH87uN5AAc220UqIx8TvAtGrita+2+o= matrix: - - OCAML_VERSION=4.02 + - BUILD_METHOD=xenserver-build-env + - BUILD_METHOD=opam-coverage notifications: slack: citrix:BHYQZbI8m036ELU21gZil75Y +matrix: + fast_finish: true + allow_failures: + - env: BUILD_METHOD=opam-coverage From f93a8762bad1b064eaa8bc28942217d96eadde5e Mon Sep 17 00:00:00 2001 From: Gabor Igloi Date: Mon, 8 May 2017 14:00:59 +0100 Subject: [PATCH 24/59] README: add build, coverage, LoC badges Signed-off-by: Gabor Igloi --- README.markdown | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.markdown b/README.markdown index 6f544fe0e94..f485011e326 100644 --- a/README.markdown +++ b/README.markdown @@ -1,6 +1,10 @@ Xapi Project's XenAPI Management Toolstack ========================================== +[![Build Status](https://travis-ci.org/xapi-project/xen-api.svg?branch=master)](https://travis-ci.org/xapi-project/xen-api) +[![Coverage Status](https://coveralls.io/repos/github/xapi-project/xen-api/badge.svg?branch=master)](https://coveralls.io/github/xapi-project/xen-api?branch=master) +[![Lines of Code](https://tokei.rs/b1/github/xapi-project/xen-api)](https://github.com/xapi-project/xen-api) + Xen API (or xapi) is a management stack that configures and controls Xen-enabled hosts and resource pools, and co-ordinates resources within the pool. Xapi exposes the Xen API interface for many From 1315fa6ee7d82c11f79fc452e53f6202722242f3 Mon Sep 17 00:00:00 2001 From: Gabor Igloi Date: Fri, 5 May 2017 18:07:28 +0100 Subject: [PATCH 25/59] VDI.snapshot: check that SM has this capability It seems that this capability is reported by SM for most SR types. Signed-off-by: Gabor Igloi --- ocaml/xapi/xapi_vdi.ml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ocaml/xapi/xapi_vdi.ml b/ocaml/xapi/xapi_vdi.ml index c31f8d7fedf..b3c1dbfbd07 100644 --- a/ocaml/xapi/xapi_vdi.ml +++ b/ocaml/xapi/xapi_vdi.ml @@ -177,6 +177,8 @@ let check_operation_error ~__context ?(sr_records=[]) ?(pbd_records=[]) ?(vbd_re if not Smint.(has_capability Vdi_generate_config sm_features) then Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) else None + | `snapshot when not Smint.(has_capability Vdi_snapshot sm_features) -> + Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) | `snapshot when record.Db_actions.vDI_sharable -> Some (Api_errors.vdi_is_sharable, [ _ref ]) | `snapshot when reset_on_boot -> From ddf6f3b01e090721aeccb054744546085fd4cc22 Mon Sep 17 00:00:00 2001 From: Thomas Sanders Date: Wed, 10 May 2017 17:40:01 +0100 Subject: [PATCH 26/59] Remove Datamodel.last_release_schema_m{aj|in}or_vsn We have been updating these two constants with every release, but nothing has been reading them since 2009. Signed-off-by: Thomas Sanders --- ocaml/idl/datamodel.ml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/ocaml/idl/datamodel.ml b/ocaml/idl/datamodel.ml index 34046ee9326..ed67e704eed 100644 --- a/ocaml/idl/datamodel.ml +++ b/ocaml/idl/datamodel.ml @@ -87,10 +87,6 @@ let ely_release_schema_minor_vsn = 108 let falcon_release_schema_major_vsn = 5 let falcon_release_schema_minor_vsn = 120 -(* the schema vsn of the last release: used to determine whether we can upgrade or not.. *) -let last_release_schema_major_vsn = falcon_release_schema_major_vsn -let last_release_schema_minor_vsn = falcon_release_schema_minor_vsn - (* List of tech-preview releases. Fields in these releases are not guaranteed to be retained when * upgrading to a full release. *) let tech_preview_releases = [ From 6b094d342aad993c61c66adb95909b1174c1a2ca Mon Sep 17 00:00:00 2001 From: Gabor Igloi Date: Thu, 11 May 2017 15:21:23 +0100 Subject: [PATCH 27/59] Record_util: Remove unused string_to_vdi_type fn Commit 363d622 removed the last usages of this function. Signed-off-by: Gabor Igloi --- ocaml/xapi/record_util.ml | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/ocaml/xapi/record_util.ml b/ocaml/xapi/record_util.ml index 6c70ac0c180..d5db5868341 100644 --- a/ocaml/xapi/record_util.ml +++ b/ocaml/xapi/record_util.ml @@ -375,19 +375,6 @@ let power_to_string h = | `ShuttingDown -> "shutting down" | `Migrating -> "migrating" -let string_to_vdi_type x = match (String.lowercase x) with - | "system" -> Some `system - | "user" -> Some `user - | "ephemeral" -> Some `ephemeral - | "suspend" -> Some `suspend - | "crashdump" -> Some `crashdump - | "ha statefile" -> Some `ha_statefile - | "metadata" -> Some `metadata - | "redo log" -> Some `redo_log - | "rrd" -> Some `rrd - | "pvs_cache" -> Some `pvs_cache - | _ -> None - let vdi_type_to_string t = match t with | `system -> "System" From 4c70ad8ded41c18a665e5e7ba13dffa17de2af1d Mon Sep 17 00:00:00 2001 From: Gabor Igloi Date: Fri, 12 May 2017 14:56:05 +0100 Subject: [PATCH 28/59] Cli_operations: fix confusing indentation Signed-off-by: Gabor Igloi --- ocaml/xapi/cli_operations.ml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ocaml/xapi/cli_operations.ml b/ocaml/xapi/cli_operations.ml index 4672d0df008..6ac2d910cc8 100644 --- a/ocaml/xapi/cli_operations.ml +++ b/ocaml/xapi/cli_operations.ml @@ -4341,11 +4341,12 @@ let update_upload fd printer rpc session_id params = if List.mem_assoc "sr-uuid" params then Client.SR.get_by_uuid rpc session_id (List.assoc "sr-uuid" params) else Client.Pool.get_default_SR ~rpc ~session_id ~self:(List.hd pools) - in + in let uri = Printf.sprintf "%s%s?session_id=%s&sr_id=%s&task_id=%s" prefix Constants.import_raw_vdi_uri (Ref.string_of session_id) (Ref.string_of sr)(Ref.string_of task_id) in let _ = debug "trying to post patch to uri:%s" uri in - HttpPut (filename, uri) in + HttpPut (filename, uri) + in let result = track_http_operation fd rpc session_id make_command "host patch upload" in let vdi_ref = API.Legacy.From.ref_VDI "" (Xml.parse_string result) in let update_ref = From 1e9220a292bc2d47cec4361a7cefa85916dad866 Mon Sep 17 00:00:00 2001 From: Gabor Igloi Date: Mon, 15 May 2017 14:47:02 +0100 Subject: [PATCH 29/59] smint.ml: Remove unused all_capabilites variable This is not used at all right now, and it seems that it was never used. Signed-off-by: Gabor Igloi --- ocaml/xapi/smint.ml | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/ocaml/xapi/smint.ml b/ocaml/xapi/smint.ml index 97e94f205db..86d09e839fa 100644 --- a/ocaml/xapi/smint.ml +++ b/ocaml/xapi/smint.ml @@ -45,18 +45,6 @@ type capability = type feature = capability * int64 -let all_capabilites = - [ Sr_create; Sr_delete; Sr_attach; Sr_detach; Sr_scan; Sr_probe; Sr_update; - Sr_supports_local_caching; - Sr_metadata; - Sr_trim; - Sr_stats; - Vdi_create; Vdi_delete; Vdi_attach; Vdi_detach; - Vdi_clone; Vdi_resize; Vdi_activate; Vdi_deactivate; - Vdi_update; Vdi_introduce; - Vdi_resize_online - ] - let string_to_capability_table = [ "SR_PROBE", Sr_probe; "SR_UPDATE", Sr_update; From 7efa662c32992e142e17e03167c1a280e48c4c8b Mon Sep 17 00:00:00 2001 From: Gabor Igloi Date: Fri, 12 May 2017 15:48:33 +0100 Subject: [PATCH 30/59] CA-253489 xe update-upload: test if default SR is valid if no sr-uuid parameter was specified, as in this case the command falls back to the pool's default SR. Now an error will be shown to the user in case of a null/invalid default SR, and we'll fail early, instead of failing later in the import_raw_vdi handler. Signed-off-by: Gabor Igloi --- ocaml/xapi/cli_operations.ml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/ocaml/xapi/cli_operations.ml b/ocaml/xapi/cli_operations.ml index 6ac2d910cc8..7a0c42091d7 100644 --- a/ocaml/xapi/cli_operations.ml +++ b/ocaml/xapi/cli_operations.ml @@ -4340,7 +4340,16 @@ let update_upload fd printer rpc session_id params = let sr = if List.mem_assoc "sr-uuid" params then Client.SR.get_by_uuid rpc session_id (List.assoc "sr-uuid" params) - else Client.Pool.get_default_SR ~rpc ~session_id ~self:(List.hd pools) + else begin + let sr = Client.Pool.get_default_SR ~rpc ~session_id ~self:(List.hd pools) in + let ref_is_valid = Server_helpers.exec_with_new_task + ~session_id "Checking default SR validity" + (fun __context -> Db.is_valid_ref __context sr) in + if ref_is_valid then sr + else failwith "No sr-uuid parameter was given, and the pool's default SR \ + is unspecified or invalid. Please explicitly specify the SR to use \ + in the sr-uuid parameter, or set the pool's default SR." + end in let uri = Printf.sprintf "%s%s?session_id=%s&sr_id=%s&task_id=%s" prefix Constants.import_raw_vdi_uri (Ref.string_of session_id) (Ref.string_of sr)(Ref.string_of task_id) in From b2a21c8a1d7e4db1fd6c24745839873d81adebdd Mon Sep 17 00:00:00 2001 From: Gabor Igloi Date: Fri, 12 May 2017 15:55:48 +0100 Subject: [PATCH 31/59] CA-253489 xe update-upload: extend help message To explain that it falls back to the pool's default SR if the sr-uuid optional parameter is not specified. Signed-off-by: Gabor Igloi --- ocaml/xapi/cli_frontend.ml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ocaml/xapi/cli_frontend.ml b/ocaml/xapi/cli_frontend.ml index 387737890ba..a9556e33cb1 100644 --- a/ocaml/xapi/cli_frontend.ml +++ b/ocaml/xapi/cli_frontend.ml @@ -855,7 +855,7 @@ let rec cmdtable_data : (string*cmd_spec) list = { reqd=["file-name"]; optn=["sr-uuid"]; - help="Stream new update to the server."; + help="Stream new update to the server. The update will be uploaded to the SR , or, if it is not specified, to the pool's default SR."; implementation=With_fd Cli_operations.update_upload; flags=[]; }; From 7abecc42eb44ed3e00fdf1648c9d588e0a86d618 Mon Sep 17 00:00:00 2001 From: minglumlu Date: Mon, 15 May 2017 06:44:44 +0100 Subject: [PATCH 32/59] CA-205515 i18n: JA/SC: The error message about failed to join a domain is not localized. Signed-off-by: minglumlu --- ocaml/idl/datamodel.ml | 8 +++ ocaml/xapi-consts/api_errors.ml | 5 +- ocaml/xapi/auth_signature.ml | 3 +- ocaml/xapi/extauth_plugin_ADpbis.ml | 36 ++++++++++++-- ocaml/xapi/suite.ml | 1 + ocaml/xapi/test_extauth_plugin_ADpbis.ml | 63 ++++++++++++++++++++++++ 6 files changed, 111 insertions(+), 5 deletions(-) create mode 100644 ocaml/xapi/test_extauth_plugin_ADpbis.ml diff --git a/ocaml/idl/datamodel.ml b/ocaml/idl/datamodel.ml index ed67e704eed..74f2cb5ddd0 100644 --- a/ocaml/idl/datamodel.ml +++ b/ocaml/idl/datamodel.ml @@ -846,6 +846,10 @@ let _ = ~doc:"The host failed to enable external authentication." (); error Api_errors.auth_enable_failed_unavailable ["message"] ~doc:"The host failed to enable external authentication." (); + error Api_errors.auth_enable_failed_invalid_ou ["message"] + ~doc:"The host failed to enable external authentication." (); + error Api_errors.auth_enable_failed_invalid_account ["message"] + ~doc:"The host failed to enable external authentication." (); error Api_errors.auth_disable_failed ["message"] ~doc:"The host failed to disable external authentication." (); error Api_errors.auth_disable_failed_wrong_credentials ["message"] @@ -899,6 +903,10 @@ let _ = ~doc:"The pool failed to enable external authentication." (); error Api_errors.pool_auth_enable_failed_duplicate_hostname ["host";"message"] ~doc:"The pool failed to enable external authentication." (); + error Api_errors.pool_auth_enable_failed_invalid_ou ["host";"message"] + ~doc:"The pool failed to enable external authentication." (); + error Api_errors.pool_auth_enable_failed_invalid_account ["host";"message"] + ~doc:"The pool failed to enable external authentication." (); error Api_errors.pool_auth_disable_failed ["host";"message"] ~doc:"The pool failed to disable the external authentication of at least one host." (); error Api_errors.pool_auth_disable_failed_wrong_credentials ["host";"message"] diff --git a/ocaml/xapi-consts/api_errors.ml b/ocaml/xapi-consts/api_errors.ml index f540b94a1bc..380267634e4 100644 --- a/ocaml/xapi-consts/api_errors.ml +++ b/ocaml/xapi-consts/api_errors.ml @@ -463,12 +463,14 @@ let auth_suffix_permission_denied = "_PERMISSION_DENIED" let auth_suffix_domain_lookup_failed = "_DOMAIN_LOOKUP_FAILED" let auth_suffix_unavailable = "_UNAVAILABLE" let auth_suffix_invalid_ou = "_INVALID_OU" +let auth_suffix_invalid_account = "_INVALID_ACCOUNT" let auth_enable_failed = "AUTH_ENABLE_FAILED" let auth_enable_failed_wrong_credentials = auth_enable_failed^auth_suffix_wrong_credentials let auth_enable_failed_permission_denied = auth_enable_failed^auth_suffix_permission_denied let auth_enable_failed_domain_lookup_failed = auth_enable_failed^auth_suffix_domain_lookup_failed let auth_enable_failed_unavailable = auth_enable_failed^auth_suffix_unavailable let auth_enable_failed_invalid_ou = auth_enable_failed^auth_suffix_invalid_ou +let auth_enable_failed_invalid_account = auth_enable_failed^auth_suffix_invalid_account let auth_disable_failed = "AUTH_DISABLE_FAILED" let auth_disable_failed_wrong_credentials = auth_disable_failed^auth_suffix_wrong_credentials let auth_disable_failed_permission_denied = auth_disable_failed^auth_suffix_permission_denied @@ -479,7 +481,8 @@ let pool_auth_enable_failed_wrong_credentials = pool_auth_enable_failed^auth_suf let pool_auth_enable_failed_permission_denied = pool_auth_enable_failed^auth_suffix_permission_denied let pool_auth_enable_failed_domain_lookup_failed = pool_auth_enable_failed^auth_suffix_domain_lookup_failed let pool_auth_enable_failed_unavailable = pool_auth_enable_failed^auth_suffix_unavailable -let pool_auth_enable_failed_unavailable = pool_auth_enable_failed^auth_suffix_invalid_ou +let pool_auth_enable_failed_invalid_ou = pool_auth_enable_failed^auth_suffix_invalid_ou +let pool_auth_enable_failed_invalid_account = pool_auth_enable_failed^auth_suffix_invalid_account let pool_auth_enable_failed_duplicate_hostname = pool_auth_enable_failed^"_DUPLICATE_HOSTNAME" let pool_auth_disable_failed = pool_auth_prefix^auth_disable_failed let pool_auth_disable_failed_wrong_credentials = pool_auth_disable_failed^auth_suffix_wrong_credentials diff --git a/ocaml/xapi/auth_signature.ml b/ocaml/xapi/auth_signature.ml index 28999179873..8c98f1d95b8 100644 --- a/ocaml/xapi/auth_signature.ml +++ b/ocaml/xapi/auth_signature.ml @@ -22,7 +22,7 @@ *) exception Auth_failure of string -type auth_service_error_tag = E_GENERIC|E_LOOKUP|E_DENIED|E_CREDENTIALS|E_UNAVAILABLE|E_INVALID_OU +type auth_service_error_tag = E_GENERIC|E_LOOKUP|E_DENIED|E_CREDENTIALS|E_UNAVAILABLE|E_INVALID_OU|E_INVALID_ACCOUNT exception Auth_service_error of auth_service_error_tag * string exception Subject_cannot_be_resolved @@ -34,6 +34,7 @@ let suffix_of_tag errtag = | E_CREDENTIALS -> Api_errors.auth_suffix_wrong_credentials | E_UNAVAILABLE -> Api_errors.auth_suffix_unavailable | E_INVALID_OU -> Api_errors.auth_suffix_invalid_ou + | E_INVALID_ACCOUNT -> Api_errors.auth_suffix_invalid_account (* required fields in subject.other_config *) let subject_information_field_subject_name = "subject-name" diff --git a/ocaml/xapi/extauth_plugin_ADpbis.ml b/ocaml/xapi/extauth_plugin_ADpbis.ml index 7f02408b1f8..d1ea8bd7297 100644 --- a/ocaml/xapi/extauth_plugin_ADpbis.ml +++ b/ocaml/xapi/extauth_plugin_ADpbis.ml @@ -18,6 +18,36 @@ module D = Debug.Make(struct let name="extauth_plugin_ADpbis" end) open D +let match_error_tag (lines:string list) = + let err_catch_list = + [ "DNS_ERROR_BAD_PACKET", Auth_signature.E_LOOKUP; + "LW_ERROR_PASSWORD_MISMATCH", Auth_signature.E_CREDENTIALS; + "LW_ERROR_INVALID_ACCOUNT", Auth_signature.E_INVALID_ACCOUNT; + "LW_ERROR_ACCESS_DENIED", Auth_signature.E_DENIED; + "LW_ERROR_DOMAIN_IS_OFFLINE", Auth_signature.E_UNAVAILABLE; + "LW_ERROR_INVALID_OU", Auth_signature.E_INVALID_OU; + (* More errors to be caught here *) + ] + in + let split_to_words = fun str -> + let open Stdext.Xstringext in + let seps = ['('; ')'; ' '; '\t'; '.'] in + String.split_f (fun s -> List.exists (fun sep -> sep = s) seps) str + in + let rec has_err lines err_pattern = + match lines with + | [] -> false + | line :: rest -> + try + ignore(List.find (fun w -> w = err_pattern) (split_to_words line)); + true + with Not_found -> has_err rest err_pattern + in + try + let (_, errtag) = List.find (fun (err_pattern, _) -> has_err lines err_pattern) err_catch_list in + errtag + with Not_found -> Auth_signature.E_GENERIC + module AuthADlw : Auth_signature.AUTH_MODULE = struct @@ -57,9 +87,9 @@ struct error "execute %s exited with code %d [stdout = '%s'; stderr = '%s']" debug_cmd n stdout stderr; let lines = List.filter (fun l-> String.length l > 0) (splitlines (stdout ^ stderr)) in let errmsg = List.hd (List.rev lines) in - raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC, errmsg)) - | e -> - error "execute %s exited: %s" debug_cmd (ExnHelper.string_of_exn e); + let errtag = match_error_tag lines in + raise (Auth_signature.Auth_service_error (errtag, errmsg)) + | e -> error "execute %s exited: %s" debug_cmd (ExnHelper.string_of_exn e); raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC, user_friendly_error_msg)) let pbis_config (name:string) (value:string) = diff --git a/ocaml/xapi/suite.ml b/ocaml/xapi/suite.ml index 0bfe8cb2aa8..4ada58f8d64 100644 --- a/ocaml/xapi/suite.ml +++ b/ocaml/xapi/suite.ml @@ -59,6 +59,7 @@ let base_suite = Test_pvs_cache_storage.test; Test_sdn_controller.test; Test_event.test; + Test_extauth_plugin_ADpbis.test; ] let handlers = [ diff --git a/ocaml/xapi/test_extauth_plugin_ADpbis.ml b/ocaml/xapi/test_extauth_plugin_ADpbis.ml new file mode 100644 index 00000000000..293306d16dc --- /dev/null +++ b/ocaml/xapi/test_extauth_plugin_ADpbis.ml @@ -0,0 +1,63 @@ +(* + * Copyright (C) Citrix Systems Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; version 2.1 only. with the special + * exception on linking described in file LICENSE. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + *) + +open OUnit +open Test_highlevel + +module PbisAuthErrorsCatch= Generic.Make(struct + module Io = struct + type input_t = string list + type output_t = Auth_signature.auth_service_error_tag + + let string_of_input_t = Test_printers.(list string) + let string_of_output_t output = + match output with + | Auth_signature.E_GENERIC -> "E_GENERIC" + | Auth_signature.E_LOOKUP -> "E_LOOKUP" + | Auth_signature.E_DENIED -> "E_DENIED" + | Auth_signature.E_CREDENTIALS -> "E_CREDENTIALS" + | Auth_signature.E_UNAVAILABLE -> "E_UNAVAILABLE" + | Auth_signature.E_INVALID_OU -> "E_INVALID_OU" + | Auth_signature.E_INVALID_ACCOUNT -> "E_INVALID_ACCOUNT" + + end + + let transform = Extauth_plugin_ADpbis.match_error_tag + + let tests = [ + [], Auth_signature.E_GENERIC; + [""; ""], Auth_signature.E_GENERIC; + [""; "some words"], Auth_signature.E_GENERIC; + [""; "DNS_ERROR_BAD_PACKET"], Auth_signature.E_LOOKUP; + [""; "LW_ERROR_PASSWORD_MISMATCH"], Auth_signature.E_CREDENTIALS; + [""; "LW_ERROR_INVALID_ACCOUNT"], Auth_signature.E_INVALID_ACCOUNT; + [""; "LW_ERROR_ACCESS_DENIED"], Auth_signature.E_DENIED; + [""; "LW_ERROR_DOMAIN_IS_OFFLINE"], Auth_signature.E_UNAVAILABLE; + [""; "LW_ERROR_INVALID_OU"], Auth_signature.E_INVALID_OU; + + [""; "prefixDNS_ERROR_BAD_PACKETsuffix"], Auth_signature.E_GENERIC; + [""; "prefix_DNS_ERROR_BAD_PACKET_suffix"], Auth_signature.E_GENERIC; + [""; "prefix(DNS_ERROR_BAD_PACKET)suffix"], Auth_signature.E_LOOKUP; + [""; "prefix.DNS_ERROR_BAD_PACKET.suffix"], Auth_signature.E_LOOKUP; + [""; "prefix DNS_ERROR_BAD_PACKET suffix"], Auth_signature.E_LOOKUP; + [""; "prefix\tDNS_ERROR_BAD_PACKET\tsuffix"], Auth_signature.E_LOOKUP; + ] + end) + +let test = + "test_extauth_ADpbis" >::: + [ + "test_pbis_auth_errors_catch" >::: PbisAuthErrorsCatch.tests; + ] + From c591e675bb9c7139564b63ea5fbce495c5da06ed Mon Sep 17 00:00:00 2001 From: Marcello Seri Date: Tue, 16 May 2017 20:16:58 +0100 Subject: [PATCH 33/59] Update xapi_vm_helpers and xapi_xenops for ocaml 4.03+ These changes are needed to silence `Warning 52`. Signed-off-by: Marcello Seri --- ocaml/xapi/xapi_vm_helpers.ml | 11 ++++++++--- ocaml/xapi/xapi_xenops.ml | 8 +++----- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/ocaml/xapi/xapi_vm_helpers.ml b/ocaml/xapi/xapi_vm_helpers.ml index 3ea36ef7919..afff11174f7 100644 --- a/ocaml/xapi/xapi_vm_helpers.ml +++ b/ocaml/xapi/xapi_vm_helpers.ml @@ -647,6 +647,10 @@ let choose_host_uses_wlb ~__context = ~self:(Helpers.get_pool ~__context))) +(* This is a stub used in the pattern_matching below to silence a + * warning in the newer ocaml compilers *) +exception Float_of_string_failure + (** Given a virtual machine, returns a host it can boot on, giving *) (** priority to an affinity host if one is present. WARNING: called *) (** while holding the global lock from the message forwarding layer. *) @@ -662,8 +666,9 @@ let choose_host_for_vm ~__context ~vm ~snapshot = | ["WLB"; "0.0"; rec_id; zero_reason] -> filter_and_convert tl | ["WLB"; stars; rec_id] -> - (h, float_of_string stars, rec_id) - :: filter_and_convert tl + let st = try float_of_string stars with Failure _ -> raise Float_of_string_failure + in + (h, st, rec_id) :: filter_and_convert tl | _ -> filter_and_convert tl end | [] -> [] @@ -728,7 +733,7 @@ let choose_host_for_vm ~__context ~vm ~snapshot = with _ -> () end; choose_host_for_vm_no_wlb ~__context ~vm ~snapshot - | Failure "float_of_string" -> + | Float_of_string_failure -> debug "Star ratings from wlb could not be parsed to floats. \ Using original algorithm"; choose_host_for_vm_no_wlb ~__context ~vm ~snapshot diff --git a/ocaml/xapi/xapi_xenops.ml b/ocaml/xapi/xapi_xenops.ml index 05f6d61fe48..7bcfdf05c0e 100644 --- a/ocaml/xapi/xapi_xenops.ml +++ b/ocaml/xapi/xapi_xenops.ml @@ -435,7 +435,7 @@ module MD = struct let rate = Int64.of_string (List.assoc "kbps" qos_params) in Some (rate, timeslice) with - | Failure "int_of_string" -> + | Failure _ (* int_of_string *) -> log_qos_failure "parameter \"kbps\" not an integer"; None | Not_found -> log_qos_failure "necessary parameter \"kbps\" not found"; None @@ -604,8 +604,7 @@ module MD = struct } with | Not_found -> failwith "Intel GVT-g settings not specified" - | Failure "int_of_string" -> - failwith "Intel GVT-g settings invalid" + | Failure _ (* int_of_string *)-> failwith "Intel GVT-g settings invalid" let of_mxgpu_vgpu ~__context vm vgpu = let open Vgpu in @@ -630,8 +629,7 @@ module MD = struct } with | Not_found -> failwith "AMD MxGPU settings not specified" - | Failure "int_of_string" -> - failwith "AMD MxGPU settings invalid" + | Failure _ (* int_of_string *) -> failwith "AMD MxGPU settings invalid" let vgpus_of_vm ~__context (vmref, vm) = let open Vgpu in From 57bc48743b55d1847ecca430fab7eb0aef81d04e Mon Sep 17 00:00:00 2001 From: Marcello Seri Date: Wed, 17 May 2017 11:29:36 +0100 Subject: [PATCH 34/59] Update cli_operations to work with 4.03+ This only adds a code-smell warning in the code and silences Warning 52 for the specific file. Fixing the warning there requires a change that is larger than the scope of this fixes. Signed-off-by: Marcello Seri --- _tags | 3 +++ ocaml/xapi/cli_operations.ml | 3 ++- 2 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 _tags diff --git a/_tags b/_tags new file mode 100644 index 00000000000..5587ac5cca5 --- /dev/null +++ b/_tags @@ -0,0 +1,3 @@ +# OASIS_START +# OASIS_STOP +: warn(-52) diff --git a/ocaml/xapi/cli_operations.ml b/ocaml/xapi/cli_operations.ml index 4672d0df008..cb05c1bbcc1 100644 --- a/ocaml/xapi/cli_operations.ml +++ b/ocaml/xapi/cli_operations.ml @@ -678,6 +678,7 @@ let make_param_funs getall getallrecs getbyuuid record class_name def_filters de try set v with + (* XXX: -- warning 52 -- this might break with new ocaml compilers *) (Failure "int_of_string") -> failwith ("Parameter "^k^" must be an integer") | (Failure "float_of_string") -> failwith ("Parameter "^k^" must be a floating-point number") | (Invalid_argument "bool_of_string") -> failwith ("Parameter "^k^" must be a boolean (true or false)") @@ -1839,7 +1840,7 @@ let select_vms ?(include_control_vms = false) ?(include_template_vms = false) rp let params = if not include_template_vms then ("is-a-template" , "false") :: params else params in let vm_name_or_ref = try Some ( (* Escape every quote character *) - List.assoc "vm" params |> Stdext.Xstringext.String.replace "\"" "\\\"" + List.assoc "vm" params |> String.replace "\"" "\\\"" ) with _ -> None in let params, where_clause = match vm_name_or_ref with | None -> params, "true" From 4f30998a38c457e5d48c093e64c811bd0e607ef9 Mon Sep 17 00:00:00 2001 From: Gabor Igloi Date: Thu, 18 May 2017 09:41:09 +0100 Subject: [PATCH 35/59] Travis: fix OPAM coverage job By adding a "sudo apt-get update" step. Signed-off-by: Gabor Igloi --- .travis-opam-coverage.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.travis-opam-coverage.sh b/.travis-opam-coverage.sh index 52651fe0cd7..a0e09a4ebca 100644 --- a/.travis-opam-coverage.sh +++ b/.travis-opam-coverage.sh @@ -16,6 +16,8 @@ docker run --rm --volume=$PWD:/mnt --workdir=/mnt \ --env "TRAVIS_JOB_ID=$TRAVIS_JOB_ID" \ ocaml/opam:ubuntu-16.04_ocaml-4.02.3 \ bash -uex -c ' +sudo apt-get update + # replace the base remote with xs-opam opam repository remove default opam repository add xs-opam https://github.com/xapi-project/xs-opam.git From ddbd96766837f2a67cf4e344bf3ed5088f4bc1ff Mon Sep 17 00:00:00 2001 From: Thomas Sanders Date: Thu, 18 May 2017 12:20:26 +0100 Subject: [PATCH 36/59] Refactor Xapi_vdi.check_operation_error This is in preparation for CP-22009: changes needed in this area to support changed block tracking. Signed-off-by: Thomas Sanders --- ocaml/xapi/xapi_vdi.ml | 58 ++++++++++++++++++++++-------------------- 1 file changed, 30 insertions(+), 28 deletions(-) diff --git a/ocaml/xapi/xapi_vdi.ml b/ocaml/xapi/xapi_vdi.ml index c31f8d7fedf..75815d4874d 100644 --- a/ocaml/xapi/xapi_vdi.ml +++ b/ocaml/xapi/xapi_vdi.ml @@ -25,6 +25,30 @@ open Printf (**************************************************************************************) (* current/allowed operations checking *) +let check_sm_feature_error (op:API.vdi_operations) sm_features sr = + let required_sm_feature = Smint.(match op with + | `forget + | `snapshot + | `copy + | `scan + | `force_unlock + | `blocked + -> None + | `destroy -> Some Vdi_delete + | `resize -> Some Vdi_resize + | `update -> Some Vdi_update + | `resize_online -> Some Vdi_resize_online + | `generate_config -> Some Vdi_generate_config + | `clone -> Some Vdi_clone + | `mirror -> Some Vdi_mirror + ) in + match required_sm_feature with + | None -> None + | Some feature -> + if Smint.(has_capability feature sm_features) + then None + else Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) + (** Checks to see if an operation is valid in this state. Returns Some exception if not and None if everything is ok. *) let check_operation_error ~__context ?(sr_records=[]) ?(pbd_records=[]) ?(vbd_records=[]) ha_enabled record _ref' op = @@ -113,8 +137,6 @@ let check_operation_error ~__context ?(sr_records=[]) ?(pbd_records=[]) ?(vbd_re (* NB RO vs RW sharing checks are done in xapi_vbd.ml *) - let sm_features = Xapi_sr_operations.features_of_sr_internal ~__context ~_type:sr_type in - let blocked_by_attach = if operation_can_be_performed_live then false @@ -128,6 +150,11 @@ let check_operation_error ~__context ?(sr_records=[]) ?(pbd_records=[]) ?(vbd_re then Some (Api_errors.vdi_in_use,[_ref; (Record_util.vdi_operation_to_string op)]) else if my_has_current_operation_vbd_records <> [] then Some (Api_errors.other_operation_in_progress, [ "VDI"; _ref ]) + else + let sm_features = Xapi_sr_operations.features_of_sr_internal ~__context ~_type:sr_type in + let sm_feature_error = check_sm_feature_error op sm_features sr in + if sm_feature_error <> None + then sm_feature_error else ( match op with | `forget -> @@ -151,31 +178,14 @@ let check_operation_error ~__context ?(sr_records=[]) ?(pbd_records=[]) ?(vbd_re then Some (Api_errors.ha_enable_in_progress, []) else if List.mem record.Db_actions.vDI_type [`ha_statefile; `metadata ] && Xapi_pool_helpers.ha_disable_in_progress ~__context then Some (Api_errors.ha_disable_in_progress, []) - else - if not Smint.(has_capability Vdi_delete sm_features) - then Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) else None | `resize -> if ha_enabled && List.mem record.Db_actions.vDI_type [ `ha_statefile; `redo_log ] then Some (Api_errors.ha_is_enabled, []) - else - if not Smint.(has_capability Vdi_resize sm_features) - then Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) - else None - | `update -> - if not Smint.(has_capability Vdi_update sm_features) - then Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) else None | `resize_online -> if ha_enabled && List.mem record.Db_actions.vDI_type [ `ha_statefile; `redo_log ] then Some (Api_errors.ha_is_enabled, []) - else - if not Smint.(has_capability Vdi_resize_online sm_features) - then Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) - else None - | `generate_config -> - if not Smint.(has_capability Vdi_generate_config sm_features) - then Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) else None | `snapshot when record.Db_actions.vDI_sharable -> Some (Api_errors.vdi_is_sharable, [ _ref ]) @@ -189,15 +199,7 @@ let check_operation_error ~__context ?(sr_records=[]) ?(pbd_records=[]) ?(vbd_re if List.mem record.Db_actions.vDI_type [ `ha_statefile; `redo_log ] then Some (Api_errors.operation_not_allowed, ["VDI containing HA statefile or redo log cannot be copied (check the VDI's allowed operations)."]) else None - | `clone -> - if not Smint.(has_capability Vdi_clone sm_features) - then Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) - else None - | `mirror -> - if not Smint.(has_capability Vdi_mirror sm_features) - then Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) - else None - | _ -> None + | `mirror | `clone | `generate_config | `scan | `force_unlock | `blocked | `update -> None ) let assert_operation_valid ~__context ~self ~(op:API.vdi_operations) = From 80e38c4355ec096cdd4b445dc6cd9aceb819eae1 Mon Sep 17 00:00:00 2001 From: Thomas Sanders Date: Thu, 18 May 2017 17:57:06 +0100 Subject: [PATCH 37/59] Remove unused function Xapi_vdi.cancel_task Signed-off-by: Thomas Sanders --- ocaml/xapi/xapi_vdi.ml | 9 --------- 1 file changed, 9 deletions(-) diff --git a/ocaml/xapi/xapi_vdi.ml b/ocaml/xapi/xapi_vdi.ml index 75815d4874d..3eb84070acc 100644 --- a/ocaml/xapi/xapi_vdi.ml +++ b/ocaml/xapi/xapi_vdi.ml @@ -225,15 +225,6 @@ let update_allowed_operations_internal ~__context ~self ~sr_records ~pbd_records let update_allowed_operations ~__context ~self : unit = update_allowed_operations_internal ~__context ~self ~sr_records:[] ~pbd_records:[] ~vbd_records:[] -(** Someone is cancelling a task so remove it from the current_operations *) -let cancel_task ~__context ~self ~task_id = - let all = List.map fst (Db.VDI.get_current_operations ~__context ~self) in - if List.mem task_id all then - begin - Db.VDI.remove_from_current_operations ~__context ~self ~key:task_id; - update_allowed_operations ~__context ~self - end - let cancel_tasks ~__context ~self ~all_tasks_in_db ~task_ids = let ops = Db.VDI.get_current_operations ~__context ~self in let set = (fun value -> Db.VDI.set_current_operations ~__context ~self ~value) in From d2fc72dbf82b12388aa837f49dc9d3c34a46a972 Mon Sep 17 00:00:00 2001 From: Thomas Sanders Date: Thu, 18 May 2017 17:32:32 +0100 Subject: [PATCH 38/59] New file Xapi_vdi.mli This contains declarations for only the externally used functions. Comments have been copied from the .ml file where appropriate. Inferred polymorphic variant types have been replaced manually with API.vdi_operations and API.vdi_type where applicable. Signed-off-by: Thomas Sanders --- ocaml/xapi/xapi_vdi.ml | 10 +- ocaml/xapi/xapi_vdi.mli | 204 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 209 insertions(+), 5 deletions(-) create mode 100644 ocaml/xapi/xapi_vdi.mli diff --git a/ocaml/xapi/xapi_vdi.ml b/ocaml/xapi/xapi_vdi.ml index 3eb84070acc..32c9a1eb4c1 100644 --- a/ocaml/xapi/xapi_vdi.ml +++ b/ocaml/xapi/xapi_vdi.ml @@ -1,5 +1,5 @@ (* - * Copyright (C) 2006-2009 Citrix Systems Inc. + * Copyright (C) 2006-2017 Citrix Systems Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published @@ -11,7 +11,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) -(** Module that defines API functions for VDI objects +(* Module that defines API functions for VDI objects * @group XenAPI functions *) @@ -49,8 +49,8 @@ let check_sm_feature_error (op:API.vdi_operations) sm_features sr = then None else Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) -(** Checks to see if an operation is valid in this state. Returns Some exception - if not and None if everything is ok. *) +(* Checks to see if an operation is valid in this state. Returns Some exception + if not and None if everything is ok. *) let check_operation_error ~__context ?(sr_records=[]) ?(pbd_records=[]) ?(vbd_records=[]) ha_enabled record _ref' op = let _ref = Ref.string_of _ref' in let current_ops = record.Db_actions.vDI_current_operations in @@ -232,7 +232,7 @@ let cancel_tasks ~__context ~self ~all_tasks_in_db ~task_ids = (**************************************************************************************) -(** Helper function to create a new VDI record with all fields copied from +(* Helper function to create a new VDI record with all fields copied from an original, except ref and *_operations, UUID and others supplied as optional arguments. If a new UUID is not supplied, a fresh one is generated. storage_lock defaults to false. diff --git a/ocaml/xapi/xapi_vdi.mli b/ocaml/xapi/xapi_vdi.mli new file mode 100644 index 00000000000..127008fa3ea --- /dev/null +++ b/ocaml/xapi/xapi_vdi.mli @@ -0,0 +1,204 @@ +(* + * Copyright (C) 2017 Citrix Systems Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; version 2.1 only. with the special + * exception on linking described in file LICENSE. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + *) + +(** Module that defines API functions for VDI objects + * @group XenAPI functions +*) + +(** Checks to see if an operation is valid in this state. Returns Some exception + if not and None if everything is ok. *) +(* Exposed here only for use by Test_vdi_allowed_operations, this + declaration also serves to annotate the (op:API.vdi_operations) + parameter. *) +val check_operation_error : + __context:Context.t -> + ?sr_records:'a list -> + ?pbd_records:('b API.Ref.t * API.pBD_t) list -> + ?vbd_records:('c * Db_actions.vBD_t) list -> + bool -> + Db_actions.vDI_t -> + API.ref_VDI -> + API.vdi_operations -> + (string * string list) option + +val assert_operation_valid : + __context:Context.t -> + self:[ `VDI ] API.Ref.t -> op:API.vdi_operations -> unit + +val update_allowed_operations_internal : + __context:Context.t -> + self:[ `VDI ] API.Ref.t -> + sr_records:'a list -> + pbd_records:('b API.Ref.t * API.pBD_t) list -> + vbd_records:('c * Db_actions.vBD_t) list -> unit + +val update_allowed_operations : + __context:Context.t -> self:[ `VDI ] API.Ref.t -> unit + +val cancel_tasks : + __context:Context.t -> + self:[ `VDI ] API.Ref.t -> + all_tasks_in_db:'a Ref.t list -> task_ids:string list -> unit + +val create : + __context:Context.t -> + name_label:string -> + name_description:string -> + sR:[ `SR ] API.Ref.t -> + virtual_size:int64 -> + _type:API.vdi_type -> + sharable:bool -> + read_only:bool -> + other_config:(string * string) list -> + xenstore_data:(string * string) list -> + sm_config:(string * string) list -> tags:string list -> [ `VDI ] API.Ref.t + +val pool_introduce : + __context:Context.t -> + uuid:string -> + name_label:string -> + name_description:string -> + sR:[ `SR ] API.Ref.t -> + _type:API.vdi_type -> + sharable:bool -> + read_only:bool -> + other_config:(string * string) list -> + location:string -> + xenstore_data:(string * string) list -> + sm_config:(string * string) list -> + managed:bool -> + virtual_size:int64 -> + physical_utilisation:int64 -> + metadata_of_pool:[ `pool ] API.Ref.t -> + is_a_snapshot:bool -> + snapshot_time:API.Date.iso8601 -> + snapshot_of:[ `VDI ] API.Ref.t -> [ `VDI ] Ref.t + +val db_introduce : + __context:Context.t -> + uuid:string -> + name_label:string -> + name_description:string -> + sR:[ `SR ] API.Ref.t -> + _type:API.vdi_type -> + sharable:bool -> + read_only:bool -> + other_config:(string * string) list -> + location:string -> + xenstore_data:(string * string) list -> + sm_config:(string * string) list -> + managed:bool -> + virtual_size:int64 -> + physical_utilisation:int64 -> + metadata_of_pool:[ `pool ] API.Ref.t -> + is_a_snapshot:bool -> + snapshot_time:API.Date.iso8601 -> + snapshot_of:[ `VDI ] API.Ref.t -> [ `VDI ] Ref.t + +val db_forget : __context:Context.t -> vdi:[ `VDI ] API.Ref.t -> unit + +val introduce : + __context:Context.t -> + uuid:string -> + name_label:string -> + name_description:string -> + sR:[ `SR ] API.Ref.t -> + _type:API.vdi_type -> + sharable:bool -> + read_only:'a -> + other_config:(string * string) list -> + location:string -> + xenstore_data:(string * string) list -> + sm_config:(string * string) list -> + managed:'b -> + virtual_size:'c -> + physical_utilisation:'d -> + metadata_of_pool:'e -> + is_a_snapshot:'f -> + snapshot_time:'g -> snapshot_of:'h -> [ `VDI ] API.Ref.t +val update : __context:Context.t -> vdi:[ `VDI ] API.Ref.t -> unit +val forget : __context:Context.t -> vdi:[ `VDI ] API.Ref.t -> unit + +(** driver_params is the storage-backend-specific parameters that are used to drive the + snapshot operation (e.g. vmhint for NetAPP) +*) +val snapshot_and_clone : + (dbg:string -> + sr:string -> + vdi_info:Storage_interface.vdi_info -> Storage_interface.vdi_info) -> + __context:Context.t -> + vdi:[ `VDI ] API.Ref.t -> + driver_params:(string * string) list -> [ `VDI ] API.Ref.t + +val snapshot : + __context:Context.t -> + vdi:[ `VDI ] API.Ref.t -> + driver_params:(string * string) list -> [ `VDI ] API.Ref.t +val destroy : __context:Context.t -> self:[ `VDI ] API.Ref.t -> unit +val resize_online : + __context:Context.t -> vdi:[ `VDI ] API.Ref.t -> size:int64 -> unit +val resize : + __context:Context.t -> vdi:[ `VDI ] API.Ref.t -> size:int64 -> unit +val generate_config : + __context:Context.t -> host:'a -> vdi:[ `VDI ] API.Ref.t -> string +val clone : + __context:Context.t -> + vdi:[ `VDI ] API.Ref.t -> + driver_params:(string * string) list -> [ `VDI ] API.Ref.t +val copy : + __context:Context.t -> + vdi:[ `VDI ] API.Ref.t -> + sr:'a Ref.t -> + base_vdi:API.ref_VDI -> + into_vdi:[ `VDI ] API.Ref.t Client.Id.t -> [ `VDI ] API.Ref.t Client.Id.t +val force_unlock : __context:'a -> vdi:'b -> 'c +val set_sharable : + __context:Context.t -> self:[ `VDI ] API.Ref.t -> value:bool -> unit +val set_managed : + __context:Context.t -> self:[ `VDI ] API.Ref.t -> value:bool -> unit +val set_read_only : + __context:Context.t -> self:[ `VDI ] API.Ref.t -> value:bool -> unit +val set_missing : + __context:Context.t -> self:[ `VDI ] API.Ref.t -> value:bool -> unit +val set_virtual_size : + __context:Context.t -> self:[ `VDI ] API.Ref.t -> value:int64 -> unit +val set_physical_utilisation : + __context:Context.t -> self:[ `VDI ] API.Ref.t -> value:int64 -> unit +val set_is_a_snapshot : + __context:Context.t -> self:[ `VDI ] API.Ref.t -> value:bool -> unit +val set_snapshot_of : + __context:Context.t -> + self:[ `VDI ] API.Ref.t -> value:[ `VDI ] API.Ref.t -> unit +val set_snapshot_time : + __context:Context.t -> + self:[ `VDI ] API.Ref.t -> value:API.Date.iso8601 -> unit +val set_metadata_of_pool : + __context:Context.t -> + self:[ `VDI ] API.Ref.t -> value:[ `pool ] API.Ref.t -> unit +val set_on_boot : + __context:Context.t -> + self:[ `VDI ] API.Ref.t -> value:[< `persist | `reset > `persist ] -> unit +val set_allow_caching : + __context:Context.t -> self:[ `VDI ] API.Ref.t -> value:bool -> unit +val set_name_label : + __context:Context.t -> self:[ `VDI ] API.Ref.t -> value:string -> unit +val set_name_description : + __context:Context.t -> self:[ `VDI ] API.Ref.t -> value:string -> unit +val checksum : __context:Context.t -> self:API.ref_VDI -> string + +(** Open a foreign database on a VDI *) +val open_database : + __context:Context.t -> self:[ `VDI ] API.Ref.t -> API.ref_session + +val read_database_pool_uuid : __context:'a -> self:API.ref_VDI -> string From 9d3a28bb6e1d47716c89b5c92243c6c81d896059 Mon Sep 17 00:00:00 2001 From: Konstantina Chremmou Date: Fri, 12 May 2017 16:49:09 +0100 Subject: [PATCH 39/59] Replaced hardcoded values with variables. Signed-off-by: Konstantina Chremmou --- ocaml/idl/datamodel.ml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/ocaml/idl/datamodel.ml b/ocaml/idl/datamodel.ml index 74f2cb5ddd0..f69f51f0997 100644 --- a/ocaml/idl/datamodel.ml +++ b/ocaml/idl/datamodel.ml @@ -303,37 +303,37 @@ let cowley_release = } let midnight_ride_release = - { internal=get_product_releases "midnight-ride" + { internal=get_product_releases rel_midnight_ride ; opensource=get_oss_releases None ; internal_deprecated_since=None } let george_release = - { internal=get_product_releases "george" + { internal=get_product_releases rel_george ; opensource=get_oss_releases None ; internal_deprecated_since=None } let orlando_release = - { internal=get_product_releases "orlando" + { internal=get_product_releases rel_orlando ; opensource=get_oss_releases None ; internal_deprecated_since=None } let miami_symc_release = - { internal=get_product_releases "symc" + { internal=get_product_releases rel_symc ; opensource=get_oss_releases None ; internal_deprecated_since=None } let miami_release = - { internal=get_product_releases "miami" + { internal=get_product_releases rel_miami ; opensource=get_oss_releases None ; internal_deprecated_since=None } let rio_release = - { internal=get_product_releases "rio" + { internal=get_product_releases rel_rio ; opensource=get_oss_releases (Some "3.0.3") ; internal_deprecated_since=None } From 47921361862350864c158d20bc38aebc78bc5be6 Mon Sep 17 00:00:00 2001 From: Konstantina Chremmou Date: Fri, 19 May 2017 13:21:40 +0100 Subject: [PATCH 40/59] Removed release dundee_plus as it was never released. Signed-off-by: Konstantina Chremmou --- ocaml/idl/datamodel.ml | 18 +++++++++--------- ocaml/idl/datamodel_types.ml | 2 -- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/ocaml/idl/datamodel.ml b/ocaml/idl/datamodel.ml index f69f51f0997..7966dd8ffc5 100644 --- a/ocaml/idl/datamodel.ml +++ b/ocaml/idl/datamodel.ml @@ -236,8 +236,8 @@ let falcon_release = ; internal_deprecated_since=None } -let dundee_plus_release = - { internal = get_product_releases rel_dundee_plus +let ely_release = + { internal = get_product_releases rel_ely ; opensource=get_oss_releases None ; internal_deprecated_since=None } @@ -3219,7 +3219,7 @@ let host_call_plugin = call let host_has_extension = call ~name:"has_extension" - ~in_product_since:rel_dundee_plus + ~in_product_since:rel_ely ~doc:"Return true if the extension is available on the host" ~params:[Ref _host, "host", "The host"; String, "name", "The name of the API call";] @@ -3229,7 +3229,7 @@ let host_has_extension = call let host_call_extension = call ~name:"call_extension" - ~in_product_since:rel_dundee_plus + ~in_product_since:rel_ely ~custom_marshaller:true ~doc:"Call a XenAPI extension on this host" ~params:[Ref _host, "host", "The host"; @@ -4555,7 +4555,7 @@ let host_emergency_ha_disable = call ~flags:[`Session] ~in_oss_since:None ~in_product_since:rel_orlando ~versioned_params: - [{param_type=Bool; param_name="soft"; param_doc="Disable HA temporarily, revert upon host reboot or further changes, idempotent"; param_release=dundee_plus_release; param_default=Some(VBool false)}; + [{param_type=Bool; param_name="soft"; param_doc="Disable HA temporarily, revert upon host reboot or further changes, idempotent"; param_release=ely_release; param_default=Some(VBool false)}; ] ~doc:"This call disables HA on the local host. This should only be used with extreme care." ~allowed_roles:_R_POOL_OP @@ -5064,7 +5064,7 @@ let host = field ~qualifier:RW ~in_product_since:rel_tampa ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "guest_VCPUs_params" "VCPUs params to apply to all resident guests"; field ~qualifier:RW ~in_product_since:rel_cream ~default_value:(Some (VEnum "enabled")) ~ty:host_display "display" "indicates whether the host is configured to output its console to a physical display device"; field ~qualifier:DynamicRO ~in_product_since:rel_cream ~default_value:(Some (VSet [VInt 0L])) ~ty:(Set (Int)) "virtual_hardware_platform_versions" "The set of versions of the virtual hardware platform that the host can offer to its guests"; - field ~qualifier:DynamicRO ~default_value:(Some (VRef null_ref)) ~in_product_since:rel_dundee_plus ~ty:(Ref _vm) "control_domain" "The control domain (domain 0)"; + field ~qualifier:DynamicRO ~default_value:(Some (VRef null_ref)) ~in_product_since:rel_ely ~ty:(Ref _vm) "control_domain" "The control domain (domain 0)"; field ~qualifier:DynamicRO ~lifecycle:[Published, rel_ely, ""] ~ty:(Set (Ref _pool_update)) ~ignore_foreign_key:true "updates_requiring_reboot" "List of updates which require reboot"; field ~qualifier:DynamicRO ~lifecycle:[Published, rel_falcon, ""] ~ty:(Set (Ref _feature)) "features" "List of features available on this host" ]) @@ -9256,7 +9256,7 @@ let vgpu_type = () module PVS_site = struct - let lifecycle = [Prototyped, rel_dundee_plus, ""] + let lifecycle = [Prototyped, rel_ely, ""] let introduce = call ~name:"introduce" @@ -9342,7 +9342,7 @@ end let pvs_site = PVS_site.obj module PVS_server = struct - let lifecycle = [Prototyped, rel_dundee_plus, ""] + let lifecycle = [Prototyped, rel_ely, ""] let introduce = call ~name:"introduce" @@ -9411,7 +9411,7 @@ end let pvs_server = PVS_server.obj module PVS_proxy = struct - let lifecycle = [Prototyped, rel_dundee_plus, ""] + let lifecycle = [Prototyped, rel_ely, ""] let status = Enum ("pvs_proxy_status", [ "stopped", "The proxy is not currently running"; diff --git a/ocaml/idl/datamodel_types.ml b/ocaml/idl/datamodel_types.ml index 41a88212dd3..61dc9fbadf8 100644 --- a/ocaml/idl/datamodel_types.ml +++ b/ocaml/idl/datamodel_types.ml @@ -54,7 +54,6 @@ let rel_creedence = "creedence" let rel_cream = "cream" let rel_indigo = "indigo" let rel_dundee = "dundee" -let rel_dundee_plus = "dundee-plus" let rel_ely = "ely" let rel_falcon = "falcon" @@ -78,7 +77,6 @@ let release_order = ; rel_cream ; rel_indigo ; rel_dundee - ; rel_dundee_plus ; rel_ely ; rel_falcon ] From fcd0232a5008a7ed3e7463b4e2d4ee0d4c4641ef Mon Sep 17 00:00:00 2001 From: Konstantina Chremmou Date: Fri, 19 May 2017 11:49:41 +0100 Subject: [PATCH 41/59] CA-245333: Extended the release order to include the corresponding API version and branding info. Thus clients and documentation generated from the API can be udpated automatically without having each time to map manually the release code name to the version or the branding, which is an error prone process Signed-off-by: Konstantina Chremmou --- ocaml/doc/jsapi.ml | 10 +- ocaml/idl/datamodel.ml | 5 +- ocaml/idl/datamodel_types.ml | 158 ++++++++++++++++++++++++----- ocaml/idl/dm_api.ml | 2 +- ocaml/idl/json_backend/gen_json.ml | 8 +- 5 files changed, 147 insertions(+), 36 deletions(-) diff --git a/ocaml/doc/jsapi.ml b/ocaml/doc/jsapi.ml index b0dae63c058..978e4dd708f 100644 --- a/ocaml/doc/jsapi.ml +++ b/ocaml/doc/jsapi.ml @@ -48,14 +48,14 @@ let generate_files destdir = let changes_in_release rel = let search_obj obj = - let changes = List.filter (fun (transition, release, doc) -> release = rel) obj.obj_lifecycle in + let changes = List.filter (fun (transition, release, doc) -> release = code_name_of_release rel) obj.obj_lifecycle in let obj_changes : changes_t = List.map (fun (transition, release, doc) -> (transition, obj.name, if doc = "" && transition = Published then obj.description else doc) ) changes in let changes_for_msg m = - let changes = List.filter (fun (transition, release, doc) -> release = rel) m.msg_lifecycle in + let changes = List.filter (fun (transition, release, doc) -> release = code_name_of_release rel) m.msg_lifecycle in List.map (fun (transition, release, doc) -> (transition, m.msg_name, if doc = "" && transition = Published then m.msg_doc else doc) ) changes @@ -64,7 +64,7 @@ let generate_files destdir = let msg_changes : changes_t = List.fold_left (fun l m -> l @ (changes_for_msg m)) [] msgs in let changes_for_field f = - let changes = List.filter (fun (transition, release, doc) -> release = rel) f.lifecycle in + let changes = List.filter (fun (transition, release, doc) -> release = code_name_of_release rel) f.lifecycle in let field_name = String.concat "_" f.full_name in List.map (fun (transition, release, doc) -> (transition, field_name, if doc = "" && transition = Published then f.field_description else doc) @@ -83,11 +83,11 @@ let generate_files destdir = "{'cls': '" ^ obj.name ^ "', 'obj_changes': " ^ Jsonrpc.to_string (rpc_of_changes_t obj_changes) ^ ", 'field_changes': " ^ Jsonrpc.to_string (rpc_of_changes_t field_changes) ^ ", 'msg_changes': " ^ Jsonrpc.to_string (rpc_of_changes_t msg_changes) ^ "}" in let release_info = String.concat ", " (List.map search_obj objs) in - let fname = rel ^ ".json" in + let fname = (code_name_of_release rel) ^ ".json" in Stdext.Unixext.write_string_to_file (Filename.concat api_dir fname) ("release_info = [" ^ release_info ^ "]") in List.iter changes_in_release release_order; - let release_list = String.concat ", " (List.map (fun s -> "'" ^ s ^ "'") release_order) in + let release_list = String.concat ", " (List.map (fun s -> "'" ^ (code_name_of_release s) ^ "'") release_order) in Stdext.Unixext.write_string_to_file (Filename.concat api_dir "releases.json") ("releases = [" ^ release_list ^ "]") diff --git a/ocaml/idl/datamodel.ml b/ocaml/idl/datamodel.ml index 7966dd8ffc5..7fe0ed395f2 100644 --- a/ocaml/idl/datamodel.ml +++ b/ocaml/idl/datamodel.ml @@ -215,8 +215,6 @@ let _R_ALL = _R_READ_ONLY let errors = Hashtbl.create 10 let messages = Hashtbl.create 10 -exception UnspecifiedRelease - let get_oss_releases in_oss_since = match in_oss_since with None -> [] @@ -227,7 +225,8 @@ let get_product_releases in_product_since = let rec go_through_release_order rs = match rs with [] -> raise UnspecifiedRelease - | x::xs -> if x=in_product_since then "closed"::x::xs else go_through_release_order xs + | x::xs when code_name_of_release x = in_product_since -> "closed"::in_product_since::(List.map code_name_of_release xs) + | x::xs -> go_through_release_order xs in go_through_release_order release_order let falcon_release = diff --git a/ocaml/idl/datamodel_types.ml b/ocaml/idl/datamodel_types.ml index 61dc9fbadf8..bd0d840e12d 100644 --- a/ocaml/idl/datamodel_types.ml +++ b/ocaml/idl/datamodel_types.ml @@ -57,37 +57,149 @@ let rel_dundee = "dundee" let rel_ely = "ely" let rel_falcon = "falcon" -let release_order = - [ rel_rio - ; rel_miami - ; rel_symc - ; rel_orlando - ; rel_orlando_update_1 - ; rel_george - ; rel_midnight_ride - ; rel_cowley - ; rel_boston - ; rel_tampa - ; rel_clearwater - ; rel_vgpu_tech_preview - ; rel_vgpu_productisation - ; rel_clearwater_felton - ; rel_clearwater_whetstone - ; rel_creedence - ; rel_cream - ; rel_indigo - ; rel_dundee - ; rel_ely - ; rel_falcon +type api_release = { + code_name: string option; + version_major: int; + version_minor: int; + branding: string; +} + +let release_order_full = [{ + code_name = Some rel_rio; + version_major = 1; + version_minor = 1; + branding = "XenServer 4.0"; + }; { + code_name = Some rel_miami; + version_major = 1; + version_minor = 2; + branding = "XenServer 4.1"; + }; { + code_name = Some rel_symc; + version_major = 1; + version_minor = 2; + branding = "XenServer 4.1.1"; + }; { + code_name = Some rel_orlando; + version_major = 1; + version_minor = 3; + branding = "XenServer 5.0"; + }; { + code_name = Some rel_orlando_update_1; + version_major = 1; + version_minor = 3; + branding = "XenServer 5.0 Update 1"; + }; { + code_name = None; + version_major = 1; + version_minor = 4; + branding = "Unreleased"; + }; { + code_name = None; + version_major = 1; + version_minor = 5; + branding = "XenServer 5.0 update 3"; + }; { + code_name = Some rel_george; + version_major = 1; + version_minor = 6; + branding = "XenServer 5.5"; + }; { + code_name = Some rel_midnight_ride; + version_major = 1; + version_minor = 7; + branding = "XenServer 5.6"; + }; { + code_name = Some rel_cowley; + version_major = 1; + version_minor = 8; + branding = "XenServer 5.6 FP1"; + }; { + code_name = Some rel_boston; + version_major = 1; + version_minor = 9; + branding = "XenServer 6.0"; + }; { + code_name = Some rel_tampa; + version_major = 1; + version_minor = 10; + branding = "XenServer 6.1"; + }; { + code_name = Some rel_clearwater; + version_major = 2; + version_minor = 0; + branding = "XenServer 6.2"; + }; { + code_name = Some rel_vgpu_tech_preview; + version_major = 2; + version_minor = 0; + branding = "XenServer 6.2 SP1 Tech-Preview"; + }; { + code_name = Some rel_vgpu_productisation; + version_major = 2; + version_minor = 1; + branding = "XenServer 6.2 SP1"; + }; { + code_name = Some rel_clearwater_felton; + version_major = 2; + version_minor = 2; + branding = "XenServer 6.2 SP1 Hotfix 4"; + }; { + code_name = Some rel_clearwater_whetstone; + version_major = 2; + version_minor = 2; + branding = "XenServer 6.2 SP1 Hotfix 11"; + }; { + code_name = Some rel_creedence; + version_major = 2; + version_minor = 3; + branding = "XenServer 6.5"; + }; { + code_name = Some rel_cream; + version_major = 2; + version_minor = 4; + branding = "XenServer 6.5 SP1"; + }; { + code_name = Some rel_indigo; + version_major = 2; + version_minor = 4; + branding = "XenServer 6.5 SP1 Hotfix 31"; + }; { + code_name = Some rel_dundee; + version_major = 2; + version_minor = 5; + branding = "XenServer 7.0"; + }; { + code_name = Some rel_ely; + version_major = 2; + version_minor = 6; + branding = "XenServer 7.1"; + }; { + code_name = Some rel_falcon; + version_major = 2; + version_minor = 7; + branding = "XenServer 7.2"; + }; ] +let release_order = + List.filter (fun x -> x.code_name <> None) release_order_full + exception Unknown_release of string +exception UnspecifiedRelease + +let code_name_of_release x = + match x.code_name with + | Some r -> r + | None -> raise UnspecifiedRelease + (* ordering function on releases *) let release_leq x y = let rec posn_in_list i x l = match l with [] -> raise (Unknown_release x) - | r::rs -> if r=x then i else posn_in_list (i+1) x rs in + | r::rs when code_name_of_release r = x -> i + | r::rs-> posn_in_list (i+1) x rs in (posn_in_list 0 x release_order) <= (posn_in_list 0 y release_order) (** Types of object fields. Accessor functions are generated for each field automatically according to its type and qualifiers. *) diff --git a/ocaml/idl/dm_api.ml b/ocaml/idl/dm_api.ml index beadc1bd8b5..54b4e0a5fe7 100644 --- a/ocaml/idl/dm_api.ml +++ b/ocaml/idl/dm_api.ml @@ -233,7 +233,7 @@ let check api emergency_calls = [] -> sofar | "closed"::xs -> find_smallest sofar xs (* closed is not a real release, so skip it *) | x::xs -> if release_lt x sofar then find_smallest x xs else find_smallest sofar xs in - find_smallest (getlast release_order) releases in + find_smallest (getlast release_order |> code_name_of_release) releases in let rec check_vsns max_release_sofar ps = match ps with [] -> true diff --git a/ocaml/idl/json_backend/gen_json.ml b/ocaml/idl/json_backend/gen_json.ml index ce70cc4e1fa..b1011268e9e 100644 --- a/ocaml/idl/json_backend/gen_json.ml +++ b/ocaml/idl/json_backend/gen_json.ml @@ -257,7 +257,7 @@ let compare_changes (a_t, a_n, _, a_k) (b_t, b_n, _, b_k) = let releases objs = let changes_in_release rel = let search_obj obj = - let changes = List.filter (fun (transition, release, doc) -> release = rel) obj.obj_lifecycle in + let changes = List.filter (fun (transition, release, doc) -> release = code_name_of_release rel) obj.obj_lifecycle in let obj_changes = List.map (fun (transition, release, doc) -> transition, @@ -267,7 +267,7 @@ let releases objs = ) changes in let changes_for_msg m = - let changes = List.filter (fun (transition, release, doc) -> release = rel) m.msg_lifecycle in + let changes = List.filter (fun (transition, release, doc) -> release = code_name_of_release rel) m.msg_lifecycle in List.map (fun (transition, release, doc) -> transition, obj.name ^ "." ^ m.msg_name, @@ -280,7 +280,7 @@ let releases objs = let msg_changes = List.fold_left (fun l m -> l @ (changes_for_msg m)) [] msgs in let changes_for_field f = - let changes = List.filter (fun (transition, release, doc) -> release = rel) f.lifecycle in + let changes = List.filter (fun (transition, release, doc) -> release = code_name_of_release rel) f.lifecycle in let field_name = String.concat "_" f.full_name in List.map (fun (transition, release, doc) -> transition, @@ -302,7 +302,7 @@ let releases objs = in JArray (List.map search_obj objs |> List.flatten |> List.sort compare_changes |> List.map jobject_of_change) in - let release_info = JObject (List.map (fun rel -> rel, changes_in_release rel) release_order) in + let release_info = JObject (List.map (fun rel -> code_name_of_release rel, changes_in_release rel) release_order) in Stdext.Unixext.write_string_to_file ("release_info.json") (string_of_json 0 release_info) let _ = From 25b42f94516fb731a8f40e71eaa9b9680ddf9eba Mon Sep 17 00:00:00 2001 From: Konstantina Chremmou Date: Fri, 19 May 2017 12:08:25 +0100 Subject: [PATCH 42/59] Generate automatically the list of releases for the docs navigation menu. This solution proposes to use mustache templates which can be populated at runtime with the values defined in the datamodel. Thus we avoid duplication of definitions which may as well get out of sync. Also, removed the obsolete XCP branding. Signed-off-by: Konstantina Chremmou --- Makefile | 5 +- _oasis | 3 +- ocaml/doc/branding.js | 72 ------------------- ocaml/doc/jsapi.ml | 29 +++++++- .../branding.mustache} | 51 +++---------- opam | 1 + 6 files changed, 41 insertions(+), 120 deletions(-) delete mode 100644 ocaml/doc/branding.js rename ocaml/doc/{xenserver/branding.js => templates/branding.mustache} (51%) diff --git a/Makefile b/Makefile index d32926b3efc..993f999a5ea 100644 --- a/Makefile +++ b/Makefile @@ -12,7 +12,7 @@ build: setup.data doc: setup.data build $(SETUP) -doc $(DOCFLAGS) - ./jsapi.native -destdir _build/ocaml/doc + ./jsapi.native -destdir _build/ocaml/doc -templdir ocaml/doc/templates test: setup.data build $(SETUP) -test $(TESTFLAGS) @@ -95,5 +95,4 @@ install: setup.data rbac_static.csv mkdir -p $(DESTDIR)$(DOCDIR)/html/xenserver cp -r -L _build/ocaml/doc/api $(DESTDIR)$(DOCDIR)/html/xenserver cd ocaml/doc && cp *.js *.html *.css *.png $(DESTDIR)$(DOCDIR)/html/xenserver - cp ocaml/doc/xenserver/* $(DESTDIR)$(DOCDIR)/html/xenserver - + cp _build/ocaml/doc/branding.js $(DESTDIR)$(DOCDIR)/html/xenserver/branding.js diff --git a/_oasis b/_oasis index 71ec9716c8c..2dc4ab15f8c 100644 --- a/_oasis +++ b/_oasis @@ -424,7 +424,8 @@ Executable jsapi xapi-consts, stdext, uuid, - gzip + gzip, + mustache ############################################################################ diff --git a/ocaml/doc/branding.js b/ocaml/doc/branding.js deleted file mode 100644 index 81da0b1d37d..00000000000 --- a/ocaml/doc/branding.js +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (C) 2006-2009 Citrix Systems Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as published - * by the Free Software Foundation; version 2.1 only. with the special - * exception on linking described in file LICENSE. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Lesser General Public License for more details. - */ - -function make_title() { - document.write('Xapi Documentation'); -} - -function make_header(t) { - if (t == 'apidoc') - title = 'Xapi – XenAPI Documentation'; - else if (t == 'codedoc') - title = 'Xapi – OCaml Code Documentation'; - else - title = 'Xapi – Documentation'; - - html = '

XenServer Management API

' - document.getElementById('header').innerHTML = html; -} - -first_release = 'midnight-ride'; - -function get_release_name(s) -{ - switch (s) { - case 'rio': - case 'miami': - case 'symc': - case 'orlando': - case 'orlando-update-1': - case 'george': - case 'midnight-ride': - return 'XCP 0.5'; - case 'cowley': - return 'XCP 1.0'; - case 'boston': - return 'XCP 1.5'; - case 'tampa': - return 'XCP 1.6'; - case 'clearwater': - return 'XenServer 6.2'; - case 'vgpu-tech-preview': - return 'XenServer 6.2 vGPU preview'; - case 'vgpu-productisation': - return 'XenServer 6.2 SP1'; - case 'clearwater-felton': - return 'XenServer 6.2 SP1 Hotfix 4'; - case 'clearwater-whetstone': - return 'XenServer 6.2 SP1 Hotfix 11'; - case 'creedence': - return 'XenServer 6.5'; - case 'cream': - return 'XenServer 6.5 SP1'; - case 'dundee': - return 'XenServer 7.0'; - case 'ely': - return 'XenServer 7.1'; - default: - return (s + ' (unreleased)'); - } -} - diff --git a/ocaml/doc/jsapi.ml b/ocaml/doc/jsapi.ml index 978e4dd708f..0994aaed52d 100644 --- a/ocaml/doc/jsapi.ml +++ b/ocaml/doc/jsapi.ml @@ -12,6 +12,8 @@ * GNU Lesser General Public License for more details. *) +open Stdext +open Pervasiveext open Datamodel_types type change_t = lifecycle_change * string * string @@ -19,10 +21,12 @@ and changes_t = change_t list [@@deriving rpc] let destdir = ref "." +let templdir = ref "" let parse_args () = Arg.parse [ "-destdir", Arg.Set_string destdir, "the destination directory for the generated files"; + "-templdir", Arg.Set_string templdir, "the directory with the template (mustache) files"; ] (fun x-> Printf.printf "Ignoring anonymous argument %s" x) ("Generates documentation for the datamodel classes. See -help.") @@ -90,7 +94,30 @@ let generate_files destdir = let release_list = String.concat ", " (List.map (fun s -> "'" ^ (code_name_of_release s) ^ "'") release_order) in Stdext.Unixext.write_string_to_file (Filename.concat api_dir "releases.json") ("releases = [" ^ release_list ^ "]") +let json_releases = + let json_of_rel x = `O [ + "code_name", `String (code_name_of_release x); + "version_major", `Float (float_of_int x.version_major); + "version_minor", `Float (float_of_int x.version_minor); + "branding", `String x.branding; + ] + in + `O [ "releases", `A (List.map json_of_rel release_order) ] + +let render_template template_file json output_file = + let templ = Stdext.Unixext.string_of_file template_file |> Mustache.of_string in + let rendered = Mustache.render templ json in + let out_chan = open_out output_file in + finally (fun () -> output_string out_chan rendered) + (fun () -> close_out out_chan) + +let populate_releases templates_dir dest_dir= + let inpath x = Filename.concat templates_dir x in + let outpath x = Filename.concat dest_dir x in + let render (infile, outfile) = render_template (inpath infile) json_releases (outpath outfile) in + [ "branding.mustache", "branding.js"] |> List.iter render let _ = parse_args (); - generate_files !destdir \ No newline at end of file + generate_files !destdir; + populate_releases !templdir !destdir diff --git a/ocaml/doc/xenserver/branding.js b/ocaml/doc/templates/branding.mustache similarity index 51% rename from ocaml/doc/xenserver/branding.js rename to ocaml/doc/templates/branding.mustache index 2095f961822..3924445efe5 100644 --- a/ocaml/doc/xenserver/branding.js +++ b/ocaml/doc/templates/branding.mustache @@ -24,49 +24,14 @@ function make_header(t) { first_release = 'rio'; -function get_release_name(s) -{ +function get_release_name(s) { switch (s) { - case 'rio': - return 'XenServer 4.0'; - case 'miami': - return 'XenServer 4.1'; - case 'symc': - return 'XenServer 4.1.1'; - case 'orlando': - return 'XenServer 5.0'; - case 'orlando-update-1': - return 'XenServer 5.0 Update 1'; - case 'george': - return 'XenServer 5.5'; - case 'midnight-ride': - return 'XenServer 5.6'; - case 'cowley': - return 'XenServer 5.6 FP1'; - case 'boston': - return 'XenServer 6.0'; - case 'tampa': - return 'XenServer 6.1'; - case 'clearwater': - return 'XenServer 6.2'; - case 'vgpu-tech-preview': - return 'XenServer 6.2 vGPU preview'; - case 'vgpu-productisation': - return 'XenServer 6.2 SP1'; - case 'clearwater-felton': - return 'XenServer 6.2 SP1 Hotfix 4'; - case 'clearwater-whetstone': - return 'XenServer 6.2 SP1 Hotfix 11'; - case 'creedence': - return 'XenServer 6.5'; - case 'cream': - return 'XenServer 6.5 SP1'; - case 'dundee': - return 'XenServer 7.0'; - case 'ely': - return 'XenServer 7.1'; - default: - return 'Unreleased'; - } +{{#releases}} + case '{{code_name}}': + return '{{branding}}'; +{{/releases}} + default: + return 'Unreleased'; + } } diff --git a/opam b/opam index f6bdd8296a8..7a4606eb86f 100644 --- a/opam +++ b/opam @@ -52,6 +52,7 @@ depends: [ "pci" "sha" "xapi-xenopsd" + "mustache" ] depexts: [ [["centos"] ["pam-devel"]] From b7f582adbf9a752f0355076e86383ba88a8b61b8 Mon Sep 17 00:00:00 2001 From: Gabor Igloi Date: Tue, 16 May 2017 17:57:04 +0100 Subject: [PATCH 43/59] Factor out duplication into Cli_util.is_valid_ref Signed-off-by: Gabor Igloi --- ocaml/xapi/cli_operations.ml | 11 ++--------- ocaml/xapi/cli_util.ml | 4 ++++ 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/ocaml/xapi/cli_operations.ml b/ocaml/xapi/cli_operations.ml index 7a0c42091d7..09c783c79e4 100644 --- a/ocaml/xapi/cli_operations.ml +++ b/ocaml/xapi/cli_operations.ml @@ -2402,11 +2402,7 @@ let vm_install_real printer rpc session_id template name description params = let suspend_sr_ref = match sr_ref with | Some sr -> - let ref_is_valid = Server_helpers.exec_with_new_task - ~session_id "Checking suspend_SR validity" - (fun __context -> Db.is_valid_ref __context sr) - in - if ref_is_valid then + if Cli_util.is_valid_ref session_id sr then (* sr-uuid and/or sr-name-label was specified - use this as the suspend_SR *) sr else @@ -4342,10 +4338,7 @@ let update_upload fd printer rpc session_id params = then Client.SR.get_by_uuid rpc session_id (List.assoc "sr-uuid" params) else begin let sr = Client.Pool.get_default_SR ~rpc ~session_id ~self:(List.hd pools) in - let ref_is_valid = Server_helpers.exec_with_new_task - ~session_id "Checking default SR validity" - (fun __context -> Db.is_valid_ref __context sr) in - if ref_is_valid then sr + if Cli_util.is_valid_ref session_id sr then sr else failwith "No sr-uuid parameter was given, and the pool's default SR \ is unspecified or invalid. Please explicitly specify the SR to use \ in the sr-uuid parameter, or set the pool's default SR." diff --git a/ocaml/xapi/cli_util.ml b/ocaml/xapi/cli_util.ml index 1058373da6d..fedab52002e 100644 --- a/ocaml/xapi/cli_util.ml +++ b/ocaml/xapi/cli_util.ml @@ -178,6 +178,10 @@ let ref_convert x = | Some ir -> ir.Ref_index.uuid^(match ir.Ref_index.name_label with None->"" | Some x -> " ("^x^")") +let is_valid_ref session_id ref = + Server_helpers.exec_with_new_task + ~session_id "Checking validity of reference" + (fun __context -> Db.is_valid_ref __context ref) (* Marshal an API-style server-error *) let get_server_error code params = From b445af3aa03d189fbb87f121f33b41d2ad97641c Mon Sep 17 00:00:00 2001 From: Liang Dai Date: Tue, 9 May 2017 10:06:15 +0000 Subject: [PATCH 44/59] CA-252876: AD group name with parenthesis not work as expect in XenServer 7.0 pool This also fixes CP-22274 Replace string_trim with built-in String.trim Signed-off-by: Liang Dai --- ocaml/xapi/extauth_plugin_ADpbis.ml | 57 +++++++++++------------- ocaml/xapi/test_extauth_plugin_ADpbis.ml | 38 +++++++++++++++- 2 files changed, 62 insertions(+), 33 deletions(-) diff --git a/ocaml/xapi/extauth_plugin_ADpbis.ml b/ocaml/xapi/extauth_plugin_ADpbis.ml index d1ea8bd7297..25f825c7175 100644 --- a/ocaml/xapi/extauth_plugin_ADpbis.ml +++ b/ocaml/xapi/extauth_plugin_ADpbis.ml @@ -18,8 +18,10 @@ module D = Debug.Make(struct let name="extauth_plugin_ADpbis" end) open D +open Stdext.Xstringext + let match_error_tag (lines:string list) = - let err_catch_list = + let err_catch_list = [ "DNS_ERROR_BAD_PACKET", Auth_signature.E_LOOKUP; "LW_ERROR_PASSWORD_MISMATCH", Auth_signature.E_CREDENTIALS; "LW_ERROR_INVALID_ACCOUNT", Auth_signature.E_INVALID_ACCOUNT; @@ -27,26 +29,35 @@ let match_error_tag (lines:string list) = "LW_ERROR_DOMAIN_IS_OFFLINE", Auth_signature.E_UNAVAILABLE; "LW_ERROR_INVALID_OU", Auth_signature.E_INVALID_OU; (* More errors to be caught here *) - ] + ] in - let split_to_words = fun str -> - let open Stdext.Xstringext in - let seps = ['('; ')'; ' '; '\t'; '.'] in - String.split_f (fun s -> List.exists (fun sep -> sep = s) seps) str + let split_to_words = fun str -> + let seps = ['('; ')'; ' '; '\t'; '.'] in + String.split_f (fun s -> List.exists (fun sep -> sep = s) seps) str in let rec has_err lines err_pattern = match lines with | [] -> false - | line :: rest -> - try + | line :: rest -> + try ignore(List.find (fun w -> w = err_pattern) (split_to_words line)); true with Not_found -> has_err rest err_pattern in - try + try let (_, errtag) = List.find (fun (err_pattern, _) -> has_err lines err_pattern) err_catch_list in errtag - with Not_found -> Auth_signature.E_GENERIC + with Not_found -> Auth_signature.E_GENERIC + +let extract_sid_from_group_list = fun group_list -> + List.map (fun (n,v)-> + let v = String.replace ")" "" v in + let v = String.replace "sid =" "|" v in + let vs = String.split_f (fun c -> c = '|') v in + let sid = String.trim (List.nth vs 1) in + debug "extract_sid_from_group_list get sid=[%s]" sid; + sid + ) (List.filter (fun (n,v)->n="") group_list) module AuthADlw : Auth_signature.AUTH_MODULE = struct @@ -64,17 +75,6 @@ struct let splitlines s = String.split_f (fun c -> c = '\n') (String.replace "#012" "\n" s) - let rec string_trim s = - let l = String.length s in - if l = 0 then - s - else if s.[0] = ' ' || s.[0] = '\t' || s.[0] = '\n' || s.[0] = '\r' then - string_trim (String.sub s 1 (l-1)) - else if s.[l-1] = ' ' || s.[l-1] = '\t' || s.[l-1] = '\n' || s.[l-1] = '\r' then - string_trim (String.sub s 0 (l-1)) - else - s - let pbis_common_with_password (password:string) (pbis_cmd:string) (pbis_args:string list) = let debug_cmd = pbis_cmd ^ " " ^ (List.fold_left (fun p pp -> p^" "^pp) " " pbis_args) in try @@ -241,8 +241,8 @@ struct debug "parse %s: currkey=[%s] line=[%s]" debug_cmd currkey line; if List.length slices > 1 then begin - let key = string_trim (List.hd slices) in - let value = string_trim (List.nth slices 1) in + let key = String.trim (List.hd slices) in + let value = String.trim (List.nth slices 1) in debug "parse %s: key=[%s] value=[%s] currkey=[%s]" debug_cmd key value currkey; if String.length value > 0 then (acc @ [(key, value)], "") @@ -251,7 +251,7 @@ struct end else let key = currkey in - let value = string_trim line in + let value = String.trim line in debug "parse %s: key=[%s] value=[%s] currkey=[%s]" debug_cmd key value currkey; (acc @ [(key, value)], currkey) ) in @@ -353,14 +353,7 @@ struct And pbis_common will return subject_attrs as [("Number of groups found for user 'test@testdomain'", "2"), ("", line1), ("", line2) ... ("", lineN)] *) - List.map (fun (n,v)-> - let v = String.replace ")" "|" v in - let v = String.replace "sid =" "|" v in - let vs = String.split_f (fun c -> c = '|') v in - let sid = string_trim (List.nth vs 1) in - debug "pbis_get_group_sids_byname %s get sid=[%s]" _subject_name sid; - sid - ) (List.filter (fun (n,v)->n="") subject_attrs) + extract_sid_from_group_list subject_attrs let pbis_get_sid_bygid gid = diff --git a/ocaml/xapi/test_extauth_plugin_ADpbis.ml b/ocaml/xapi/test_extauth_plugin_ADpbis.ml index 293306d16dc..964dd1d1dde 100644 --- a/ocaml/xapi/test_extauth_plugin_ADpbis.ml +++ b/ocaml/xapi/test_extauth_plugin_ADpbis.ml @@ -15,7 +15,7 @@ open OUnit open Test_highlevel -module PbisAuthErrorsCatch= Generic.Make(struct +module PbisAuthErrorsCatch = Generic.Make(struct module Io = struct type input_t = string list type output_t = Auth_signature.auth_service_error_tag @@ -55,9 +55,45 @@ module PbisAuthErrorsCatch= Generic.Make(struct ] end) +module PbisExtractSid = Generic.Make(struct + module Io = struct + type input_t = (string * string) list + type output_t = string list + + let string_of_input_t = Test_printers.(list (pair string string)) + let string_of_output_t = Test_printers.(list string) + end + + let transform = Extauth_plugin_ADpbis.extract_sid_from_group_list + + let tests = [ + [(" ", " ")], []; + + [("Exception","Remote connection shutdown!")], []; + + [("Number of groups found for user 'testAD@BLE'", "0"); + ("Error", "No record found")], + []; + + [("Number of groups found for user 'admin@NVC'", "1"); + ("", "Group[1 of 1] name = NVC\\testg(ab) (gid = 564135020, sid = S-1-5-21-1171552557-368733809-2946345504-1132)")], + ["S-1-5-21-1171552557-368733809-2946345504-1132"]; + + [("Number of groups found for user 'cnk3@UN'", "1"); + ("", "Group[1 of 1] name = UN\\KnmOJ (gid = 492513842, sid = S-1-5-31-5921451325-154521381-3135732118-4527)")], + ["S-1-5-31-5921451325-154521381-3135732118-4527"]; + + [("Number of groups found for user 'test@testdomain'", "2"); + ("", "Group[1 of 2] name = testdomain\\dnsadmins (gid = 580912206, sid = S-1-5-21-791009147-1041474540-2433379237-1102)"); + ("", "Group[2 of 2] name = testdomain\\domain+users (gid = 580911617, sid = S-1-5-21-791009147-1041474540-2433379237-513)")], + ["S-1-5-21-791009147-1041474540-2433379237-1102"; "S-1-5-21-791009147-1041474540-2433379237-513"]; + ] + end) + let test = "test_extauth_ADpbis" >::: [ "test_pbis_auth_errors_catch" >::: PbisAuthErrorsCatch.tests; + "test_pbis_extract_sid" >::: PbisExtractSid.tests; ] From 4fbaf2f21f1ba9cfb2e8cdf47a368778ae1d501b Mon Sep 17 00:00:00 2001 From: Gabor Igloi Date: Mon, 22 May 2017 15:37:04 +0100 Subject: [PATCH 45/59] CA-242706: Add "warning" comment explaining issue Signed-off-by: Gabor Igloi --- ocaml/xapi/xapi_mgmt_iface.ml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ocaml/xapi/xapi_mgmt_iface.ml b/ocaml/xapi/xapi_mgmt_iface.ml index 1529dd04113..cddd1f2c9c1 100644 --- a/ocaml/xapi/xapi_mgmt_iface.ml +++ b/ocaml/xapi/xapi_mgmt_iface.ml @@ -185,6 +185,8 @@ let on_dom0_networking_change ~__context = Db.Host.set_name_label ~__context ~self:localhost ~value:new_hostname; begin match Helpers.get_management_ip_addr ~__context with | Some ip -> + (* WARNING: this does NOT detect IP address changes that happen before + xapi's startup (see CA-242706) *) if Db.Host.get_address ~__context ~self:localhost <> ip then begin debug "Changing Host.address in database to: %s" ip; Db.Host.set_address ~__context ~self:localhost ~value:ip; From f4eda00fb364db531e58477c7639850254eb5d4b Mon Sep 17 00:00:00 2001 From: Gabor Igloi Date: Mon, 22 May 2017 16:27:20 +0100 Subject: [PATCH 46/59] Add inferred mli file for Xapi_ha Signed-off-by: Gabor Igloi --- ocaml/xapi/xapi_ha.mli | 87 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100644 ocaml/xapi/xapi_ha.mli diff --git a/ocaml/xapi/xapi_ha.mli b/ocaml/xapi/xapi_ha.mli new file mode 100644 index 00000000000..0393e40f7d3 --- /dev/null +++ b/ocaml/xapi/xapi_ha.mli @@ -0,0 +1,87 @@ +module D : Debug.DEBUG +module Rrdd = Rrd_client.Client +val ha_redo_log : Redo_log.redo_log +val query_liveset : unit -> Xha_interface.LiveSetInformation.t +val i_have_statefile_access : unit -> bool +val propose_master : unit -> bool +val local_failover_decisions_are_ok : unit -> bool +val write_uuid_to_ip_mapping : __context:Context.t -> unit +val get_uuid_to_ip_mapping : unit -> (string * string) list +val address_of_host_uuid : string -> string +val uuid_of_host_address : string -> string +val on_master_failure : unit -> unit +module Timeouts : + sig + type t = { + heart_beat_interval : int; + state_file_interval : int; + heart_beat_timeout : int; + state_file_timeout : int; + heart_beat_watchdog_timeout : int; + state_file_watchdog_timeout : int; + boot_join_timeout : int; + enable_join_timeout : int; + xapi_healthcheck_timeout : int; + xapi_healthcheck_interval : int; + xapi_restart_timeout : int; + xapi_restart_attempts : int; + } + val derive : int -> t + val get_base_t : __context:Context.t -> int + end +module Monitor : + sig + val request_shutdown : bool ref + val prevent_failover_actions_until : float ref + val block_delay_calls : bool ref + val block_delay_calls_c : Condition.t + val m : Stdext.Threadext.Mutex.t + val delay : Stdext.Threadext.Delay.t + val thread : Stdext.Threadext.Thread.t option ref + val thread_m : Stdext.Threadext.Mutex.t + val database_state_valid : bool ref + val database_state_valid_c : Condition.t + val plan_out_of_date : bool ref + exception Already_started + exception Not_started + val ha_monitor : unit -> unit + val prevent_restarts_for : int64 -> unit + val start : unit -> unit + val signal_database_state_valid : unit -> unit + val stop : unit -> unit + end +val ha_prevent_restarts_for : 'a -> int64 -> unit +val redo_log_ha_enabled_during_runtime : Context.t -> unit +val redo_log_ha_disabled_during_runtime : Context.t -> unit +val redo_log_ha_enabled_at_startup : unit -> unit +val on_server_restart : unit -> unit +val on_database_engine_ready : unit -> unit +val ha_disable_failover_decisions : 'a -> 'b -> unit +val ha_disarm_fencing : 'a -> 'b -> unit +val ha_set_excluded : 'a -> 'b -> unit +val ha_stop_daemon : 'a -> 'b -> unit +val emergency_ha_disable : 'a -> bool -> unit +val ha_release_resources : Context.t -> 'a -> unit +val ha_wait_for_shutdown_via_statefile : 'a -> 'b -> unit +val attach_statefiles : + __context:Context.t -> [ `VDI ] API.Ref.t list -> string list +val attach_metadata_vdi : __context:Context.t -> [ `VDI ] API.Ref.t -> string +val write_config_file : __context:Context.t -> string list -> string -> unit +val preconfigure_host : + Context.t -> + [ `host ] API.Ref.t -> + [ `VDI ] API.Ref.t list -> [ `VDI ] API.Ref.t -> string -> unit +val join_liveset : 'a -> 'b Ref.t -> unit +val proposed_master : string option ref +val proposed_master_time : float ref +val proposed_master_m : Stdext.Threadext.Mutex.t +val propose_new_master_internal : + __context:'a -> address:string -> manual:'b -> unit +val propose_new_master : __context:'a -> address:string -> manual:'b -> unit +val commit_new_master : __context:Context.t -> address:string -> unit +val abort_new_master : __context:'a -> address:string -> unit +val disable_internal : Context.t -> unit +val disable : Context.t -> unit +val enable : + Context.t -> [ `SR ] API.Ref.t list -> (string * string) list -> unit +val before_clean_shutdown_or_reboot : __context:Context.t -> host:'a -> unit From f914603f0d5a38a247df977c2cd60faf8c332c0c Mon Sep 17 00:00:00 2001 From: Gabor Igloi Date: Mon, 22 May 2017 16:55:34 +0100 Subject: [PATCH 47/59] xapi_ha.mli: restrict to values used externally Signed-off-by: Gabor Igloi --- ocaml/xapi/xapi_ha.mli | 60 ------------------------------------------ 1 file changed, 60 deletions(-) diff --git a/ocaml/xapi/xapi_ha.mli b/ocaml/xapi/xapi_ha.mli index 0393e40f7d3..65754e7146a 100644 --- a/ocaml/xapi/xapi_ha.mli +++ b/ocaml/xapi/xapi_ha.mli @@ -1,86 +1,26 @@ -module D : Debug.DEBUG -module Rrdd = Rrd_client.Client val ha_redo_log : Redo_log.redo_log -val query_liveset : unit -> Xha_interface.LiveSetInformation.t -val i_have_statefile_access : unit -> bool -val propose_master : unit -> bool -val local_failover_decisions_are_ok : unit -> bool -val write_uuid_to_ip_mapping : __context:Context.t -> unit -val get_uuid_to_ip_mapping : unit -> (string * string) list -val address_of_host_uuid : string -> string -val uuid_of_host_address : string -> string -val on_master_failure : unit -> unit -module Timeouts : - sig - type t = { - heart_beat_interval : int; - state_file_interval : int; - heart_beat_timeout : int; - state_file_timeout : int; - heart_beat_watchdog_timeout : int; - state_file_watchdog_timeout : int; - boot_join_timeout : int; - enable_join_timeout : int; - xapi_healthcheck_timeout : int; - xapi_healthcheck_interval : int; - xapi_restart_timeout : int; - xapi_restart_attempts : int; - } - val derive : int -> t - val get_base_t : __context:Context.t -> int - end module Monitor : sig - val request_shutdown : bool ref - val prevent_failover_actions_until : float ref - val block_delay_calls : bool ref - val block_delay_calls_c : Condition.t - val m : Stdext.Threadext.Mutex.t - val delay : Stdext.Threadext.Delay.t - val thread : Stdext.Threadext.Thread.t option ref - val thread_m : Stdext.Threadext.Mutex.t - val database_state_valid : bool ref - val database_state_valid_c : Condition.t val plan_out_of_date : bool ref - exception Already_started - exception Not_started - val ha_monitor : unit -> unit - val prevent_restarts_for : int64 -> unit - val start : unit -> unit - val signal_database_state_valid : unit -> unit val stop : unit -> unit end val ha_prevent_restarts_for : 'a -> int64 -> unit -val redo_log_ha_enabled_during_runtime : Context.t -> unit -val redo_log_ha_disabled_during_runtime : Context.t -> unit -val redo_log_ha_enabled_at_startup : unit -> unit val on_server_restart : unit -> unit val on_database_engine_ready : unit -> unit val ha_disable_failover_decisions : 'a -> 'b -> unit val ha_disarm_fencing : 'a -> 'b -> unit -val ha_set_excluded : 'a -> 'b -> unit val ha_stop_daemon : 'a -> 'b -> unit val emergency_ha_disable : 'a -> bool -> unit val ha_release_resources : Context.t -> 'a -> unit val ha_wait_for_shutdown_via_statefile : 'a -> 'b -> unit -val attach_statefiles : - __context:Context.t -> [ `VDI ] API.Ref.t list -> string list -val attach_metadata_vdi : __context:Context.t -> [ `VDI ] API.Ref.t -> string -val write_config_file : __context:Context.t -> string list -> string -> unit val preconfigure_host : Context.t -> [ `host ] API.Ref.t -> [ `VDI ] API.Ref.t list -> [ `VDI ] API.Ref.t -> string -> unit val join_liveset : 'a -> 'b Ref.t -> unit -val proposed_master : string option ref -val proposed_master_time : float ref -val proposed_master_m : Stdext.Threadext.Mutex.t -val propose_new_master_internal : - __context:'a -> address:string -> manual:'b -> unit val propose_new_master : __context:'a -> address:string -> manual:'b -> unit val commit_new_master : __context:Context.t -> address:string -> unit val abort_new_master : __context:'a -> address:string -> unit -val disable_internal : Context.t -> unit val disable : Context.t -> unit val enable : Context.t -> [ `SR ] API.Ref.t list -> (string * string) list -> unit From 3c6b5869e2523b9773e89fd77e98ab1c91cb5460 Mon Sep 17 00:00:00 2001 From: Gabor Igloi Date: Mon, 22 May 2017 17:31:43 +0100 Subject: [PATCH 48/59] xapi_ha.mli: remove unused Not_started exception Signed-off-by: Gabor Igloi --- ocaml/xapi/xapi_ha.ml | 1 - ocaml/xapi/xapi_ha.mli | 7 +++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/ocaml/xapi/xapi_ha.ml b/ocaml/xapi/xapi_ha.ml index 245889f7ad2..79fa608aef3 100644 --- a/ocaml/xapi/xapi_ha.ml +++ b/ocaml/xapi/xapi_ha.ml @@ -240,7 +240,6 @@ module Monitor = struct let plan_out_of_date = ref true exception Already_started - exception Not_started (** Background thread which monitors the membership set and takes action if HA is armed and something goes wrong *) diff --git a/ocaml/xapi/xapi_ha.mli b/ocaml/xapi/xapi_ha.mli index 65754e7146a..bc7ef8f1b04 100644 --- a/ocaml/xapi/xapi_ha.mli +++ b/ocaml/xapi/xapi_ha.mli @@ -1,6 +1,13 @@ +(** Functions for implementing 'High Availability' (HA). + @group High Availability (HA) *) + val ha_redo_log : Redo_log.redo_log +(** The redo log instance used for HA *) + module Monitor : sig + (** Control the background HA monitoring thread *) + val plan_out_of_date : bool ref val stop : unit -> unit end From 90d3e0a469955319f8f49d797a13f50ca3ed53e6 Mon Sep 17 00:00:00 2001 From: Gabor Igloi Date: Mon, 22 May 2017 19:01:36 +0100 Subject: [PATCH 49/59] xapi_ha.mli: add ocamldoc comments Signed-off-by: Gabor Igloi --- ocaml/xapi/xapi_ha.ml | 44 +++------------------- ocaml/xapi/xapi_ha.mli | 84 +++++++++++++++++++++++++++++++++++++++++- 2 files changed, 88 insertions(+), 40 deletions(-) diff --git a/ocaml/xapi/xapi_ha.ml b/ocaml/xapi/xapi_ha.ml index 79fa608aef3..8ee2c57b8ea 100644 --- a/ocaml/xapi/xapi_ha.ml +++ b/ocaml/xapi/xapi_ha.ml @@ -11,11 +11,11 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) -(** Functions for implementing 'High Availability' (HA). File is divided into 3 sections: - + scripts and functions which form part of the HA subsystem interface - + internal API calls used for arming and disarming individual hosts - + external API calls (Pool.enable_ha, Pool.disable_ha) used for turning on/off HA pool-wide - * @group High Availability (HA) + +(* Functions for implementing 'High Availability' (HA). File is divided into 3 sections: + + scripts and functions which form part of the HA subsystem interface + + internal API calls used for arming and disarming individual hosts + + external API calls (Pool.enable_ha, Pool.disable_ha) used for turning on/off HA pool-wide *) module D = Debug.Make(struct let name="xapi_ha" end) @@ -218,7 +218,6 @@ module Timeouts = struct end module Monitor = struct - (** Control the background HA monitoring thread *) let request_shutdown = ref false let prevent_failover_actions_until = ref 0. (* protected by the request_shutdown_m too *) @@ -236,7 +235,6 @@ module Monitor = struct let database_state_valid = ref false let database_state_valid_c = Condition.create () - (* Used to explicitly signal that we should replan *) let plan_out_of_date = ref true exception Already_started @@ -623,7 +621,6 @@ module Monitor = struct end -(** Called by MTC in Orlando Update 1 to temporarily block the VM restart thread. *) let ha_prevent_restarts_for __context seconds = (* Even if HA is not enabled, this should still go ahead (rather than doing * a successful no-op) in case HA is about to be enabled within the specified @@ -681,14 +678,6 @@ let redo_log_ha_enabled_at_startup () = (* ----------------------------- *) -(** Called when xapi restarts: server may be in emergency mode at this point. We need - to inspect the local configuration and if HA is supposed to be armed we need to - set everything up. - Note that - the master shouldn't be able to activate HA while we are offline since that would cause - us to come up with a broken configuration (the enable-HA stage has the critical task of - synchronising the HA configuration on all the hosts). So really we only want to notice - if the Pool has had HA disabled while we were offline. *) let on_server_restart () = let armed = bool_of_string (Localdb.get Constants.ha_armed) in @@ -782,9 +771,6 @@ let on_server_restart () = (* We signal the monitor that the database state is valid (wrt liveness + disabledness of hosts) later *) end -(** Called in the master xapi startup when the database is ready. We set all hosts (including this one) to - disabled then signal the monitor thread to look. It can then wait for slaves to turn up - before trying to restart VMs. *) let on_database_engine_ready () = info "Setting all hosts to dead and disabled. Hosts must re-enable themselves explicitly"; Server_helpers.exec_with_new_task "Setting all hosts to dead and disabled" @@ -802,8 +788,6 @@ let on_database_engine_ready () = (*********************************************************************************************) (* Internal API calls to configure individual hosts *) -(** Internal API call to prevent this node making an unsafe failover decision. - This call is idempotent. *) let ha_disable_failover_decisions __context localhost = debug "Disabling failover decisions"; (* FIST *) @@ -813,10 +797,6 @@ let ha_disable_failover_decisions __context localhost = end; Localdb.put Constants.ha_disable_failover_decisions "true" -(** Internal API call to disarm localhost. - If the daemon is missing then we return success. Either fencing was previously disabled and the - daemon has shutdown OR the daemon has died and this node will fence shortly... -*) let ha_disarm_fencing __context localhost = try let (_ : string) = call_script ha_disarm_fencing [] in () @@ -826,13 +806,10 @@ let ha_disarm_fencing __context localhost = let ha_set_excluded __context localhost = let (_ : string) = call_script ha_set_excluded [] in () -(** Internal API call to stop the HA daemon. - This call is idempotent. *) let ha_stop_daemon __context localhost = Monitor.stop (); let (_ : string) = call_script ha_stop_daemon [] in () -(** Emergency-mode API call to disarm localhost *) let emergency_ha_disable __context soft = let ha_armed = try bool_of_string (Localdb.get Constants.ha_armed) with _ -> false in if not ha_armed then @@ -861,9 +838,6 @@ let emergency_ha_disable __context soft = if not soft then Localdb.put Constants.ha_armed "false"; end -(** Internal API call to release any HA resources after the system has - been shutdown. This call is idempotent. Modified for CA-48539 to - call vdi.deactivate before vdi.detach. *) let ha_release_resources __context localhost = Monitor.stop (); @@ -890,10 +864,6 @@ let ha_release_resources __context localhost = (* At this point a restart won't enable the HA subsystem *) Localdb.put Constants.ha_armed "false" -(** Internal API call which blocks until this node's xHA daemon spots the invalid statefile - and exits cleanly. If the daemon survives but the statefile access is lost then this function - will return an exception and the no-statefile shutdown can be attempted. -*) let ha_wait_for_shutdown_via_statefile __context localhost = try while true do @@ -991,7 +961,6 @@ let write_config_file ~__context statevdi_paths generation = (Xha_interface.DaemonConfiguration.to_xml_string config); debug "%s file written" Xha_interface.DaemonConfiguration.filename -(** Internal API call to preconfigure localhost *) let preconfigure_host __context localhost statevdis metadata_vdi generation = info "Host.preconfigure_ha host = %s; statevdis = [ %s ]; generation = %s" (Ref.string_of localhost) (String.concat "; " (List.map Ref.string_of statevdis)) generation; @@ -1119,12 +1088,10 @@ let rec propose_new_master_internal ~__context ~address ~manual = proposed_master := Some address; proposed_master_time := Unix.gettimeofday () -(* First phase of a two-phase commit of a new master *) let propose_new_master ~__context ~address ~manual = Mutex.execute proposed_master_m (fun () -> propose_new_master_internal ~__context ~address ~manual) -(* Second phase of a two-phase commit of a new master *) let commit_new_master ~__context ~address = begin match !proposed_master with | Some x when x <> address -> @@ -1534,7 +1501,6 @@ let enable __context heartbeat_srs configuration = raise exn -(* Called before shutting down or rebooting a host *) let before_clean_shutdown_or_reboot ~__context ~host = let pool = Helpers.get_pool ~__context in if Db.Pool.get_ha_enabled ~__context ~self:pool then begin diff --git a/ocaml/xapi/xapi_ha.mli b/ocaml/xapi/xapi_ha.mli index bc7ef8f1b04..32d0a579c42 100644 --- a/ocaml/xapi/xapi_ha.mli +++ b/ocaml/xapi/xapi_ha.mli @@ -1,34 +1,116 @@ +(* + * Copyright (C) 2017 Citrix Systems Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; version 2.1 only. with the special + * exception on linking described in file LICENSE. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + *) + (** Functions for implementing 'High Availability' (HA). @group High Availability (HA) *) val ha_redo_log : Redo_log.redo_log (** The redo log instance used for HA *) +(******************************************************************************) +(** {2 Interface with the low-level HA subsystem} *) + module Monitor : sig (** Control the background HA monitoring thread *) val plan_out_of_date : bool ref + (** Used to explicitly signal that we should replan *) + val stop : unit -> unit end + val ha_prevent_restarts_for : 'a -> int64 -> unit +(** Called by MTC in Orlando Update 1 to temporarily block the VM restart thread. *) + val on_server_restart : unit -> unit +(** Called when xapi restarts: server may be in emergency mode at this point. + We need to inspect the local configuration and if HA is supposed to be armed + we need to set everything up. + Note that the master shouldn't be able to activate HA while we are offline + since that would cause us to come up with a broken configuration (the + enable-HA stage has the critical task of synchronising the HA configuration + on all the hosts). So really we only want to notice if the Pool has had + HA disabled while we were offline. *) + val on_database_engine_ready : unit -> unit +(** Called in the master xapi startup when the database is ready. We set all + hosts (including this one) to disabled, then signal the monitor thread to look. + It can then wait for slaves to turn up before trying to restart VMs. *) + +(******************************************************************************) +(** {2 Internal API calls to configure individual hosts} *) + val ha_disable_failover_decisions : 'a -> 'b -> unit +(** Internal API call to prevent this node making an unsafe failover decision. + This call is idempotent. *) + val ha_disarm_fencing : 'a -> 'b -> unit +(** Internal API call to disarm localhost. If the daemon is missing then we + return success. Either fencing was previously disabled and the daemon has + shutdown OR the daemon has died and this node will fence shortly... +*) + val ha_stop_daemon : 'a -> 'b -> unit +(** Internal API call to stop the HA daemon. This call is idempotent. *) + val emergency_ha_disable : 'a -> bool -> unit +(** Emergency-mode API call to disarm localhost *) + val ha_release_resources : Context.t -> 'a -> unit +(** Internal API call to release any HA resources after the system has been + shutdown. This call is idempotent. Modified for CA-48539 to call + vdi.deactivate before vdi.detach. *) + val ha_wait_for_shutdown_via_statefile : 'a -> 'b -> unit +(** Internal API call which blocks until this node's xHA daemon spots the + invalid statefile and exits cleanly. If the daemon survives but the + statefile access is lost then this function will return an exception and + the no-statefile shutdown can be attempted. +*) + val preconfigure_host : Context.t -> [ `host ] API.Ref.t -> - [ `VDI ] API.Ref.t list -> [ `VDI ] API.Ref.t -> string -> unit + [ `VDI ] API.Ref.t list -> + [ `VDI ] API.Ref.t -> + string -> unit +(** Internal API call to preconfigure localhost *) + val join_liveset : 'a -> 'b Ref.t -> unit + val propose_new_master : __context:'a -> address:string -> manual:'b -> unit +(** First phase of a two-phase commit of a new master *) + val commit_new_master : __context:Context.t -> address:string -> unit +(** Second phase of a two-phase commit of a new master *) + val abort_new_master : __context:'a -> address:string -> unit + +(******************************************************************************) +(** {2 External API calls} *) + +(** {3 Pool.*_ha API calls} *) + val disable : Context.t -> unit + val enable : Context.t -> [ `SR ] API.Ref.t list -> (string * string) list -> unit + +(** {3 Functions called by host.* API calls} *) + val before_clean_shutdown_or_reboot : __context:Context.t -> host:'a -> unit +(** Called before shutting down or rebooting a host + (called by the host.shutdown, host.reboot API functions). *) + From 10b5068f00e4bc51001fdde2e942f46862404319 Mon Sep 17 00:00:00 2001 From: minglumlu Date: Tue, 23 May 2017 10:55:44 +0100 Subject: [PATCH 50/59] CA-223802 [xso-672] When existing network is deactivated and new network is added, 'Networking' tab of the VM shows incorrect information. Signed-off-by: minglumlu --- ocaml/xapi/suite.ml | 1 + ocaml/xapi/test_guest_agent.ml | 371 +++++++++++++++++++++++++++++++++ ocaml/xapi/xapi_guest_agent.ml | 63 +++++- 3 files changed, 429 insertions(+), 6 deletions(-) create mode 100644 ocaml/xapi/test_guest_agent.ml diff --git a/ocaml/xapi/suite.ml b/ocaml/xapi/suite.ml index 4ada58f8d64..aa819a4510d 100644 --- a/ocaml/xapi/suite.ml +++ b/ocaml/xapi/suite.ml @@ -60,6 +60,7 @@ let base_suite = Test_sdn_controller.test; Test_event.test; Test_extauth_plugin_ADpbis.test; + Test_guest_agent.test; ] let handlers = [ diff --git a/ocaml/xapi/test_guest_agent.ml b/ocaml/xapi/test_guest_agent.ml new file mode 100644 index 00000000000..0d17a6452b8 --- /dev/null +++ b/ocaml/xapi/test_guest_agent.ml @@ -0,0 +1,371 @@ +(* + * Copyright (C) Citrix Systems Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; version 2.1 only. with the special + * exception on linking described in file LICENSE. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + *) + +open OUnit +open Test_highlevel + +module Networks = Generic.Make (struct + module Io = struct + type input_t = string list + type output_t = (string * string) list + + let string_of_input_t = Test_printers.(list string) + let string_of_output_t = Test_printers.(assoc_list string string) + end + + type 'a tree = T of 'a * 'a tree list + + let rec add_path_to_tree (T(root, children)) = function + | [] -> (T(root, children)) + | node :: rest_of_path -> + try + let T(_, children_of_node) = List.find (fun (T(n, _)) -> n = node) children in + let t = add_path_to_tree (T(node, children_of_node)) rest_of_path in + T(root, t :: (List.filter (fun (T(n, _)) -> n <> node) children)) + with Not_found -> + T(root, (add_path_to_tree (T(node, [])) rest_of_path) :: children) + + let construct_tree tree path = + let open Stdext.Xstringext in + let nodes = String.split_f (fun s -> s = '/') path in + add_path_to_tree tree nodes + + let rec list_helper children = function + | [] -> List.map (fun (T(node, _)) -> node) children + | node :: rest_of_path -> + try + let T(_, children_of_node) = List.find (fun (T(n, _)) -> n = node) children in + list_helper children_of_node rest_of_path + with Not_found -> [] + + let list (T(root, children)) path = + let open Stdext.Xstringext in + let nodes = String.split_f (fun s -> s = '/') path in + list_helper children nodes + + + let transform input = + let tree = List.fold_left construct_tree (T("", [])) input in + Xapi_guest_agent.networks "attr" (list tree) + + let tests = [ + (* basic cases *) + [ "attr/vif/0/ipv6/0"; + ], [ "attr/vif/0/ipv6/0", "0/ipv6/0"; + ]; + + [ "attr/vif/0/ipv4/0"; + ], [ "attr/vif/0/ipv4/0", "0/ip"; + "attr/vif/0/ipv4/0", "0/ipv4/0"; + ]; + + [ "attr/eth0/ip"; + ], [ "attr/eth0/ip", "0/ip"; + "attr/eth0/ip", "0/ipv4/0"; + ]; + + [ "attr/eth0/ipv6/0/addr"; + ], [ "attr/eth0/ip", "0/ip"; + "attr/eth0/ip", "0/ipv4/0"; + "attr/eth0/ipv6/0/addr", "0/ipv6/0"; + ]; + + + (* index *) + [ "attr/vif/1/ipv6/2"; + ], [ "attr/vif/1/ipv6/2", "1/ipv6/2"; + ]; + + [ "attr/vif/1/ipv4/2"; + ], [ "attr/vif/1/ipv4/2", "1/ip"; + "attr/vif/1/ipv4/2", "1/ipv4/2"; + ]; + + [ "attr/eth1/ip"; + ], [ "attr/eth1/ip", "1/ip"; + "attr/eth1/ip", "1/ipv4/0"; + ]; + + [ "attr/eth1/ipv6/2/addr"; + ], [ "attr/eth1/ip", "1/ip"; + "attr/eth1/ip", "1/ipv4/0"; + "attr/eth1/ipv6/2/addr", "1/ipv6/2"; + ]; + + (* multiple ip addrs *) + [ "attr/vif/0/ipv6/0"; + "attr/vif/0/ipv6/1"; + ], [ "attr/vif/0/ipv6/1", "0/ipv6/1"; + "attr/vif/0/ipv6/0", "0/ipv6/0"; + ]; + + [ "attr/vif/0/ipv4/0"; + "attr/vif/0/ipv4/1"; + ], [ "attr/vif/0/ipv4/1", "0/ipv4/1"; + "attr/vif/0/ipv4/0", "0/ip"; + "attr/vif/0/ipv4/0", "0/ipv4/0"; + ]; + + [ "attr/eth0/ip"; + "attr/eth0/ipv6/0/addr"; + ], [ "attr/eth0/ip", "0/ip"; + "attr/eth0/ip", "0/ipv4/0"; + "attr/eth0/ipv6/0/addr", "0/ipv6/0"; + ]; + + [ "attr/vif/0/ipv4/0"; + "attr/vif/0/ipv6/0"; + ], [ "attr/vif/0/ipv4/0", "0/ip"; + "attr/vif/0/ipv4/0", "0/ipv4/0"; + "attr/vif/0/ipv6/0", "0/ipv6/0"; + ]; + + [ "attr/eth0/ip"; + "attr/vif/0/ipv4/0"; + "attr/eth0/ipv6/0/addr"; + "attr/vif/0/ipv6/0"; + ], [ "attr/vif/0/ipv4/0", "0/ip"; + "attr/vif/0/ipv4/0", "0/ipv4/0"; + "attr/vif/0/ipv6/0", "0/ipv6/0"; + ]; + + (* multiple vifs and multiple ip addrs *) + [ "attr/vif/0/ipv6/0"; + "attr/vif/0/ipv6/1"; + "attr/vif/1/ipv6/0"; + "attr/vif/1/ipv6/1"; + ], [ "attr/vif/0/ipv6/1", "0/ipv6/1"; + "attr/vif/0/ipv6/0", "0/ipv6/0"; + "attr/vif/1/ipv6/1", "1/ipv6/1"; + "attr/vif/1/ipv6/0", "1/ipv6/0"; + ]; + + [ "attr/vif/0/ipv4/0"; + "attr/vif/0/ipv4/1"; + "attr/vif/1/ipv4/0"; + "attr/vif/1/ipv4/1"; + ], [ "attr/vif/0/ipv4/1", "0/ipv4/1"; + "attr/vif/0/ipv4/0", "0/ip"; + "attr/vif/0/ipv4/0", "0/ipv4/0"; + "attr/vif/1/ipv4/1", "1/ipv4/1"; + "attr/vif/1/ipv4/0", "1/ip"; + "attr/vif/1/ipv4/0", "1/ipv4/0"; + ]; + + (* exceptions *) + [ "attr/vif/0/ipv4/a"; + "attr/vif/0/ipv4/1"; + ], []; + ] + end) + +module Initial_guest_metrics = Generic.Make (struct + module Io = struct + type input_t = (string * string) list + type output_t = (string * string) list + + let string_of_input_t = Test_printers.(assoc_list string string) + let string_of_output_t = Test_printers.(assoc_list string string) + end + + type 'a mtree = + | Lf of 'a * 'a + | Mt of 'a * 'a mtree list + + let has_name name = function + | Lf (n, _) -> n = name + | Mt (n, _) -> n = name + + let get_name = function + | Lf (n, _) -> n + | Mt (n, _) -> n + + let rec add_leaf_to_mtree paths leaf_value = function + | Lf _ -> raise (Failure "Can't add a leaf on a leaf") + | Mt (root, children) -> + (match paths with + | [] -> + (match children with + | [] -> Lf(root, leaf_value) + | _ -> raise (Failure "Can't add a leaf on a tree node")) + | node :: rest_paths -> + try + let t = List.find (has_name node) children in + (match t with + | Lf (_, _) -> raise (Failure "Can't overwrite an existing leaf") + | Mt (node, children_of_node) -> + let mt = add_leaf_to_mtree rest_paths leaf_value (Mt(node, children_of_node)) in + Mt(root, mt :: (List.filter (fun n -> not (has_name node n)) children))) + with Not_found -> + Mt(root, (add_leaf_to_mtree rest_paths leaf_value (Mt(node, []))) :: children)) + + let construct_mtree mtree (path, leaf_value) = + let open Stdext.Xstringext in + let nodes = String.split_f (fun s -> s = '/') path in + add_leaf_to_mtree nodes leaf_value mtree + + let rec list_helper children = function + | [] -> List.map get_name children + | node :: rest_paths -> + try + match List.find (has_name node) children with + | Lf (_, _) -> [] + | Mt (_, children_of_node) -> list_helper children_of_node rest_paths + with Not_found -> [] + + let list mtree path = + match mtree with + | Lf (_, _) -> [] + | Mt (_, children) -> + let open Stdext.Xstringext in + let paths = String.split_f (fun s -> s = '/') path in + list_helper children paths + + let rec lookup_helper mtree = function + | [] -> + (match mtree with + | Lf (_, v) -> Some v + | Mt (_, _) -> None) + | node :: rest_paths -> + (match mtree with + | Lf (l, v) -> lookup_helper (Lf(l, v)) rest_paths + | Mt (_, children) -> + try + lookup_helper (List.find (has_name node) children) rest_paths + with Not_found -> None) + + let lookup mtree path = + let open Stdext.Xstringext in + let paths = String.split_f (fun s -> s = '/') path in + lookup_helper mtree paths + + + let transform input = + let tree = List.fold_left construct_mtree (Mt("", [])) input in + let guest_metrics = Xapi_guest_agent.get_initial_guest_metrics (lookup tree) (list tree) in + guest_metrics.Xapi_guest_agent.networks + + + let tests = [ + (* basic cases *) + [ "attr/vif/0/ipv6/0", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; + ], [ "0/ipv6/0", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; + ]; + + [ "attr/vif/0/ipv4/0", "192.168.0.1"; + ], [ "0/ip", "192.168.0.1"; + "0/ipv4/0", "192.168.0.1"; + ]; + + [ "attr/eth0/ip", "192.168.0.1"; + ], [ "0/ip", "192.168.0.1"; + "0/ipv4/0", "192.168.0.1"; + ]; + + [ "attr/eth0/ipv6/0/addr", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; + ], [ "0/ipv6/0", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; + ]; + + (* index *) + [ "attr/vif/1/ipv6/2", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; + ], [ "1/ipv6/2", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; + ]; + + [ "attr/vif/1/ipv4/2", "192.168.0.1"; + ], [ "1/ip", "192.168.0.1"; + "1/ipv4/2", "192.168.0.1"; + ]; + + [ "attr/eth1/ip", "192.168.0.1"; + ], [ "1/ip", "192.168.0.1"; + "1/ipv4/0", "192.168.0.1"; + ]; + + [ "attr/eth1/ipv6/2/addr", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; + ], [ "1/ipv6/2", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; + ]; + + (* multiple ip addrs *) + [ "attr/vif/0/ipv6/0", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; + "attr/vif/0/ipv6/1", "fe80:0000:0000:0000:7870:94ff:fe52:dd07"; + ], [ "0/ipv6/1", "fe80:0000:0000:0000:7870:94ff:fe52:dd07"; + "0/ipv6/0", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; + ]; + + [ "attr/vif/0/ipv4/0", "192.168.0.1"; + "attr/vif/0/ipv4/1", "192.168.1.1"; + ], [ "0/ipv4/1", "192.168.1.1"; + "0/ip", "192.168.0.1"; + "0/ipv4/0", "192.168.0.1"; + ]; + + [ "attr/eth0/ip", "192.168.0.1"; + "attr/eth0/ipv6/0/addr", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; + ], [ "0/ip", "192.168.0.1"; + "0/ipv4/0", "192.168.0.1"; + "0/ipv6/0", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; + ]; + + [ "attr/vif/0/ipv4/0", "192.168.0.1"; + "attr/vif/0/ipv6/0", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; + ], [ "0/ip", "192.168.0.1"; + "0/ipv4/0", "192.168.0.1"; + "0/ipv6/0", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; + ]; + + [ "attr/eth0/ip", "192.168.0.1"; + "attr/vif/0/ipv4/0", "192.168.0.1"; + "attr/eth0/ipv6/0/addr", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; + "attr/vif/0/ipv6/0", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; + ], [ "0/ip", "192.168.0.1"; + "0/ipv4/0", "192.168.0.1"; + "0/ipv6/0", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; + ]; + + (* multiple vifs and multiple ip addrs *) + [ "attr/vif/0/ipv6/0", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; + "attr/vif/0/ipv6/1", "fe80:0000:0000:0000:7870:94ff:fe52:dd07"; + "attr/vif/1/ipv6/0", "fe80:0000:0000:0000:7870:94ff:fe52:dd16"; + "attr/vif/1/ipv6/1", "fe80:0000:0000:0000:7870:94ff:fe52:dd17"; + ], [ "0/ipv6/1", "fe80:0000:0000:0000:7870:94ff:fe52:dd07"; + "0/ipv6/0", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; + "1/ipv6/1", "fe80:0000:0000:0000:7870:94ff:fe52:dd17"; + "1/ipv6/0", "fe80:0000:0000:0000:7870:94ff:fe52:dd16"; + ]; + + [ "attr/vif/0/ipv4/0", "192.168.0.1"; + "attr/vif/0/ipv4/1", "192.168.0.2"; + "attr/vif/1/ipv4/0", "192.168.1.1"; + "attr/vif/1/ipv4/1", "192.168.1.2"; + ], [ "0/ipv4/1", "192.168.0.2"; + "0/ip", "192.168.0.1"; + "0/ipv4/0", "192.168.0.1"; + "1/ipv4/1", "192.168.1.2"; + "1/ip", "192.168.1.1"; + "1/ipv4/0", "192.168.1.1"; + ]; + + (* exceptions *) + [ "attr/vif/0/ipv4/a", "192.168.0.1"; + "attr/vif/0/ipv4/1", "192.168.0.1"; + ], []; + ] + end) + +let test = + "test_guest_agent" >::: + [ + "test_networks" >::: Networks.tests; + "test_get_initial_guest_metrics" >::: Initial_guest_metrics.tests; + ] diff --git a/ocaml/xapi/xapi_guest_agent.ml b/ocaml/xapi/xapi_guest_agent.ml index 15e681fe5ba..223033dcf0e 100644 --- a/ocaml/xapi/xapi_guest_agent.ml +++ b/ocaml/xapi/xapi_guest_agent.ml @@ -66,6 +66,17 @@ let extend base str = Printf.sprintf "%s/%s" base str * attr/eth0/ip -> 0/ip * attr/eth0/ipv6/0/addr -> 0/ipv6/0 * attr/eth0/ipv6/1/addr -> 0/ipv6/1 + * + * Example output on new xenstore protocol: + * attr/vif/0/ipv4/0 -> 0/ipv4/0 + * attr/vif/0/ipv4/1 -> 0/ipv4/1 + * attr/vif/0/ipv6/0 -> 0/ipv6/0 + * attr/vif/0/ipv6/1 -> 0/ipv6/1 + * + * For the compatibility of XAPI clients, outputs of both protocols + * will be generated. I.E. + * attr/eth0/ip -> 0/ip; 0/ipv4/0 + * attr/vif/0/ipv4/0 -> 0/ip; 0/ipv4/0 * *) let networks path (list: string -> string list) = (* Find all ipv6 addresses under a path. *) @@ -76,10 +87,11 @@ let networks path (list: string -> string list) = (* Find the ipv4 address under a path, and the ipv6 addresses if they exist. *) let find_all_ips path prefix = let ipv4 = (extend path "ip", extend prefix "ip") in + let ipv4_with_idx = (extend path "ip", extend prefix "ipv4/0") in if List.mem "ipv6" (list path) then - ipv4 :: (find_ipv6 (extend path "ipv6") (extend prefix "ipv6")) + ipv4 :: (ipv4_with_idx :: (find_ipv6 (extend path "ipv6") (extend prefix "ipv6"))) else - [ipv4] + [ipv4; ipv4_with_idx] in (* Find all "ethn", "xenbrn" or newer interface standard names * [see https://www.freedesktop.org/wiki/Software/systemd/PredictableNetworkInterfaceNames/] @@ -112,10 +124,49 @@ let networks path (list: string -> string list) = | Some pair -> pair :: acc ) [] (list path) in - path - |> find_eths - |> List.map (fun (path, prefix) -> find_all_ips path prefix) - |> List.concat + let find_vifs vif_path = + let extract_vif acc vif_id = ((extend vif_path vif_id), vif_id) :: acc in + List.fold_left extract_vif [] (list vif_path) + in + let cmp a b = + try + compare (int_of_string a) (int_of_string b) + with Failure _ -> + error "String (\"%s\" or \"%s\") can't be converted into an integer as index of IP" a b; + raise (Failure "Failed to compare") + in + let find_all_vif_ips vif_path vif_id = + (* vif_path: attr/vif/0 *) + (* vif_id: 0 *) + let extract_ip_ver vif_id acc ip_ver = + let ip_addr_ids = list (extend vif_path ip_ver) in + let extract_ip_addr vif_id ip_ver acc ip_addr_id = + let key_left = Printf.sprintf "%s/%s/%s" vif_path ip_ver ip_addr_id in + let key_right = Printf.sprintf "%s/%s/%s" vif_id ip_ver ip_addr_id in + match acc with + | [] when ip_ver = "ipv4" -> + [(key_left, (extend vif_id "ip")); (key_left, key_right)] + | _ -> (key_left, key_right) :: acc + in + try + (List.fold_left (extract_ip_addr vif_id ip_ver) [] (List.stable_sort cmp ip_addr_ids)) @ acc + with Failure _ -> + error "Failed to extract IP address for vif %s." vif_id; + [] + in + let ip_vers = List.filter (fun a -> a = "ipv4" || a = "ipv6") (list vif_path) in + List.fold_left (extract_ip_ver vif_id) [] ip_vers + in + match find_vifs (extend path "vif") with + | [] -> + path + |> find_eths + |> List.map (fun (path, prefix) -> find_all_ips path prefix) + |> List.concat + | vif_pair_list -> + vif_pair_list + |> List.map (fun (vif_path, vif_id) -> find_all_vif_ips vif_path vif_id) + |> List.concat (* One key is placed in the other map per control/* key in xenstore. This catches keys like "feature-shutdown" "feature-hibernate" "feature-reboot" From 0ed23b5dd99df24f4c2212d9237a30caee8a448f Mon Sep 17 00:00:00 2001 From: Deli Zhang Date: Thu, 25 May 2017 14:17:14 +0800 Subject: [PATCH 51/59] CA-247321: Refactor Db_gc, move gc functions to Db_gc_util Signed-off-by: Deli Zhang --- ocaml/xapi/db_gc.ml | 391 +------------------------------- ocaml/xapi/db_gc_util.ml | 413 ++++++++++++++++++++++++++++++++++ ocaml/xapi/test_gpu_group.ml | 2 +- ocaml/xapi/test_pvs_proxy.ml | 8 +- ocaml/xapi/test_pvs_server.ml | 4 +- 5 files changed, 421 insertions(+), 397 deletions(-) create mode 100644 ocaml/xapi/db_gc_util.ml diff --git a/ocaml/xapi/db_gc.ml b/ocaml/xapi/db_gc.ml index c47b95e3c47..3954658b646 100644 --- a/ocaml/xapi/db_gc.ml +++ b/ocaml/xapi/db_gc.ml @@ -37,187 +37,6 @@ let host_table_m = Mutex.create () let _time = "time" let _shutting_down = "shutting-down" -let valid_ref x = Db.is_valid_ref x - -let gc_connector ~__context get_all get_record valid_ref1 valid_ref2 delete_record = - let db = Context.database_of __context in - let module DB = (val (Db_cache.get db) : Db_interface.DB_ACCESS) in - let all_refs = get_all ~__context in - let do_gc ref = - let print_valid b = if b then "valid" else "INVALID" in - let record = get_record ~__context ~self:ref in - let ref_1_valid = valid_ref1 record in - let ref_2_valid = valid_ref2 record in - - if not (ref_1_valid && ref_2_valid) then - begin - let table,reference,valid1,valid2 = - (match DB.get_table_from_ref db (Ref.string_of ref) with - None -> "UNKNOWN CLASS" - | Some c -> c), - (Ref.string_of ref), - (print_valid ref_1_valid), - (print_valid ref_2_valid) in - debug "Connector %s (%s) has invalid refs [ref_1: %s; ref_2: %s]. Attempting GC..." table reference valid1 valid2; - delete_record ~__context ~self:ref - end in - List.iter do_gc all_refs - -let gc_VGPU_types ~__context = - (* We delete a VGPU_type iff it does not appear in the supported_VGPU_types - * of any PGPU _and_ there doesn't exist a VGPU with this VGPU_type *) - let open Db_filter_types in - let garbage = Db.VGPU_type.get_records_where ~__context - ~expr:(And ((Eq (Field "VGPUs", Literal "()")), - (Eq (Field "supported_on_PGPUs", Literal "()")))) in - match garbage with - | [] -> () - | _ -> - debug "GC-ing the following unused and unsupported VGPU_types: [ %s ]" - (String.concat "; " (List.map Ref.string_of (List.map fst garbage))); - List.iter (fun (self, _) -> Db.VGPU_type.destroy ~__context ~self) garbage - -let gc_PVS_proxies ~__context = - gc_connector ~__context - Db.PVS_proxy.get_all - Db.PVS_proxy.get_record - (fun x -> valid_ref __context x.pVS_proxy_VIF) - (fun x -> valid_ref __context x.pVS_proxy_site) - Db.PVS_proxy.destroy - -(* A PVS server refers to a PVS site. We delete it, if the reference - * becomes invalid. At creation, the server is connected to a site and - * hence we never GC a server right after it was created. *) -let gc_PVS_servers ~__context = - gc_connector ~__context - Db.PVS_server.get_all - Db.PVS_server.get_record - (fun x -> true) - (fun x -> valid_ref __context x.pVS_server_site) - Db.PVS_server.destroy - -let gc_PVS_cache_storage ~__context = - gc_connector ~__context - Db.PVS_cache_storage.get_all - Db.PVS_cache_storage.get_record - (fun x -> valid_ref __context x.pVS_cache_storage_site) - (fun x -> valid_ref __context x.pVS_cache_storage_host) - Db.PVS_cache_storage.destroy - -let gc_PIFs ~__context = - gc_connector ~__context Db.PIF.get_all Db.PIF.get_record (fun x->valid_ref __context x.pIF_host) (fun x->valid_ref __context x.pIF_network) - (fun ~__context ~self -> - (* We need to destroy the PIF, it's metrics and any VLAN/bond records that this PIF was a master of. *) - (* bonds/tunnels_to_gc is actually a list which is either empty (not part of a bond/tunnel) - * or containing exactly one reference.. *) - let bonds_to_gc = Db.PIF.get_bond_master_of ~__context ~self in - let vlan_to_gc = Db.PIF.get_VLAN_master_of ~__context ~self in - let tunnels_to_gc = Db.PIF.get_tunnel_access_PIF_of ~__context ~self in - (* Only destroy PIF_metrics of physical or bond PIFs *) - if vlan_to_gc = Ref.null && tunnels_to_gc = [] then begin - let metrics = Db.PIF.get_metrics ~__context ~self in - (try Db.PIF_metrics.destroy ~__context ~self:metrics with _ -> ()) - end; - (try Db.VLAN.destroy ~__context ~self:vlan_to_gc with _ -> ()); - List.iter (fun tunnel -> (try Db.Tunnel.destroy ~__context ~self:tunnel with _ -> ())) tunnels_to_gc; - List.iter (fun bond -> (try Db.Bond.destroy ~__context ~self:bond with _ -> ())) bonds_to_gc; - Db.PIF.destroy ~__context ~self) -let gc_VBDs ~__context = - gc_connector ~__context Db.VBD.get_all Db.VBD.get_record (fun x->valid_ref __context x.vBD_VM) (fun x->valid_ref __context x.vBD_VDI || x.vBD_empty) - (fun ~__context ~self -> - (* When GCing VBDs that are CDs, set them to empty rather than destroy them entirely *) - if (valid_ref __context (Db.VBD.get_VM ~__context ~self)) && (Db.VBD.get_type ~__context ~self = `CD) then - begin - Db.VBD.set_VDI ~__context ~self ~value:Ref.null; - Db.VBD.set_empty ~__context ~self ~value:true; - debug "VBD corresponds to CD. Record preserved but set to empty"; - end - else - begin - let metrics = Db.VBD.get_metrics ~__context ~self in - (try Db.VBD_metrics.destroy ~__context ~self:metrics with _ -> ()); - Db.VBD.destroy ~__context ~self; - end) - -let gc_crashdumps ~__context = - gc_connector ~__context Db.Crashdump.get_all Db.Crashdump.get_record - (fun x->valid_ref __context x.crashdump_VM) (fun x->valid_ref __context x.crashdump_VDI) Db.Crashdump.destroy -let gc_VIFs ~__context = - gc_connector ~__context Db.VIF.get_all Db.VIF.get_record (fun x->valid_ref __context x.vIF_VM) (fun x->valid_ref __context x.vIF_network) - (fun ~__context ~self -> - let metrics = Db.VIF.get_metrics ~__context ~self in - (try Db.VIF_metrics.destroy ~__context ~self:metrics with _ -> ()); - Db.VIF.destroy ~__context ~self) -let gc_VGPUs ~__context = - gc_connector ~__context Db.VGPU.get_all Db.VGPU.get_record (fun x->valid_ref __context x.vGPU_VM) (fun x->valid_ref __context x.vGPU_GPU_group) - (fun ~__context ~self -> - Db.VGPU.destroy ~__context ~self) - -let gc_PGPUs ~__context = - let pgpus = Db.PGPU.get_all ~__context in - (* Go through the list of PGPUs, destroying any with an invalid host ref. - * Keep a list of groups which contained PGPUs which were destroyed. *) - let affected_groups = - List.fold_left - (fun acc pgpu -> - if not (valid_ref __context (Db.PGPU.get_host ~__context ~self:pgpu)) - then begin - let group = Db.PGPU.get_GPU_group ~__context ~self:pgpu in - Db.PGPU.destroy ~__context ~self:pgpu; - debug "GCed PGPU %s" (Ref.string_of pgpu); - group :: acc - end else - acc) - [] pgpus - |> List.filter (valid_ref __context) - |> List.setify - in - (* Update enabled/supported VGPU types on the groups which contained the - * destroyed PGPUs. *) - List.iter - (fun group -> - Xapi_gpu_group.update_enabled_VGPU_types ~__context ~self:group; - Xapi_gpu_group.update_supported_VGPU_types ~__context ~self:group) - affected_groups - -let gc_PBDs ~__context = - gc_connector ~__context Db.PBD.get_all Db.PBD.get_record (fun x->valid_ref __context x.pBD_host) (fun x->valid_ref __context x.pBD_SR) Db.PBD.destroy -let gc_Host_patches ~__context = - gc_connector ~__context Db.Host_patch.get_all Db.Host_patch.get_record (fun x->valid_ref __context x.host_patch_host) (fun x->valid_ref __context x.host_patch_pool_patch) Db.Host_patch.destroy -let gc_host_cpus ~__context = - let host_cpus = Db.Host_cpu.get_all ~__context in - List.iter - (fun hcpu -> - if not (valid_ref __context (Db.Host_cpu.get_host ~__context ~self:hcpu)) then - Db.Host_cpu.destroy ~__context ~self:hcpu) host_cpus -let gc_host_metrics ~__context = - let all_host_metrics = Db.Host_metrics.get_all ~__context in - let metrics = List.map (fun host-> Db.Host.get_metrics ~__context ~self:host) in - let host_metrics = metrics (Db.Host.get_all ~__context) in - List.iter - (fun hmetric-> - if not (List.mem hmetric host_metrics) then - Db.Host_metrics.destroy ~__context ~self:hmetric) all_host_metrics - -(* If the SR record is missing, delete the VDI record *) -let gc_VDIs ~__context = - let all_srs = Db.SR.get_all ~__context in - List.iter (fun vdi -> - let sr = Db.VDI.get_SR ~__context ~self:vdi in - if not(List.mem sr all_srs) then begin - debug "GCed VDI %s" (Ref.string_of vdi); - Db.VDI.destroy ~__context ~self:vdi - end) (Db.VDI.get_all ~__context) - -let gc_consoles ~__context = - List.iter (fun console -> - if not (valid_ref __context (Db.Console.get_VM ~__context ~self:console)) - then begin - Db.Console.destroy ~__context ~self:console; - debug "GCed console %s" (Ref.string_of console); - end - ) (Db.Console.get_all ~__context) - let already_sent_clock_skew_warnings = Hashtbl.create 10 let detect_clock_skew ~__context host skew = @@ -297,186 +116,6 @@ let check_host_liveness ~__context = let all_hosts = Db.Host.get_all ~__context in List.iter check_host all_hosts -let timeout_sessions_common ~__context sessions limit session_group = - let unused_sessions = List.filter - (fun (x, _) -> - let rec is_session_unused s = - if (s=Ref.null) then true (* top of session tree *) - else - try (* if no session s, assume default value true=unused *) - let tasks = (Db.Session.get_tasks ~__context ~self:s) in - let parent = (Db.Session.get_parent ~__context ~self:s) in - (List.for_all - (fun t -> TaskHelper.status_is_completed - (* task might not exist anymore, assume completed in this case *) - (try Db.Task.get_status ~__context ~self:t with _->`success) - ) - tasks - ) - && (is_session_unused parent) - with _->true - in is_session_unused x - ) - sessions - in - (* Only keep a list of (ref, last_active, uuid) *) - let disposable_sessions = List.map (fun (x, y) -> x, Date.to_float y.Db_actions.session_last_active, y.Db_actions.session_uuid) unused_sessions in - (* Definitely invalidate sessions last used long ago *) - let threshold_time = Unix.time () -. !Xapi_globs.inactive_session_timeout in - let young, old = List.partition (fun (_, y, _) -> y > threshold_time) disposable_sessions in - (* If there are too many young sessions then we need to delete the oldest *) - let lucky, unlucky = - if List.length young <= limit - then young, [] (* keep them all *) - else - (* Need to reverse sort by last active and drop the oldest *) - List.chop limit (List.sort (fun (_,a, _) (_,b, _) -> compare b a) young) in - let cancel doc sessions = - List.iter - (fun (s, active, uuid) -> - debug "Session.destroy _ref=%s uuid=%s %s (last active %s): %s" (Ref.string_of s) uuid (Context.trackid_of_session (Some s)) (Date.to_string (Date.of_float active)) doc; - Xapi_session.destroy_db_session ~__context ~self:s - ) sessions in - (* Only the 'lucky' survive: the 'old' and 'unlucky' are destroyed *) - if unlucky <> [] - then debug "Number of disposable sessions in group '%s' in database (%d/%d) exceeds limit (%d): will delete the oldest" session_group (List.length disposable_sessions) (List.length sessions) limit; - cancel (Printf.sprintf "Timed out session in group '%s' because of its age" session_group) old; - cancel (Printf.sprintf "Timed out session in group '%s' because max number of sessions was exceeded" session_group) unlucky - -let last_session_log_time = ref None - -let timeout_sessions ~__context = - let all_sessions = Db.Session.get_internal_records_where ~__context ~expr:Db_filter_types.True in - - let pool_sessions, nonpool_sessions = List.partition (fun (_, s) -> s.Db_actions.session_pool) all_sessions in - let use_root_auth_name s = s.Db_actions.session_auth_user_name = "" || s.Db_actions.session_auth_user_name = "root" in - let anon_sessions, named_sessions = List.partition (fun (_, s) -> s.Db_actions.session_originator = "" && use_root_auth_name s) nonpool_sessions in - let session_groups = Hashtbl.create 37 in - List.iter (function (_, s) as rs -> - let key = if use_root_auth_name s then `Orig s.Db_actions.session_originator else `Name s.Db_actions.session_auth_user_name in - let current_sessions = - try Hashtbl.find session_groups key - with Not_found -> [] in - Hashtbl.replace session_groups key (rs :: current_sessions) - ) named_sessions; - - let should_log = match !last_session_log_time with - | None -> true - | Some t -> Unix.time () -. t > 600.0 (* Every 10 mins, dump session stats *) - in - - if should_log then begin - last_session_log_time := Some (Unix.time ()); - let nbindings = Hashtbl.fold (fun _ _ acc -> 1+acc) session_groups 0 in - debug "session_log: active_sessions=%d (%d pool, %d anon, %d named - %d groups)" - (List.length all_sessions) (List.length pool_sessions) (List.length anon_sessions) (List.length named_sessions) nbindings - end; - - begin - Hashtbl.iter - (fun key ss -> match key with - | `Orig orig -> timeout_sessions_common ~__context ss Xapi_globs.max_sessions_per_originator ("originator:"^orig) - | `Name name -> timeout_sessions_common ~__context ss Xapi_globs.max_sessions_per_user_name ("username:"^name)) - session_groups; - timeout_sessions_common ~__context anon_sessions Xapi_globs.max_sessions "external"; - timeout_sessions_common ~__context pool_sessions Xapi_globs.max_sessions "internal"; - end - -let probation_pending_tasks = Hashtbl.create 53 - -let timeout_tasks ~__context = - let all_tasks = Db.Task.get_internal_records_where ~__context ~expr:Db_filter_types.True in - let oldest_completed_time = Unix.time() -. !Xapi_globs.completed_task_timeout (* time out completed tasks after 65 minutes *) in - let oldest_pending_time = Unix.time() -. !Xapi_globs.pending_task_timeout (* time out pending tasks after 24 hours *) in - - let completed, pending = - List.partition - (fun (_, t) -> TaskHelper.status_is_completed t.Db_actions.task_status) - all_tasks in - - (* Any task that was incomplete at the point someone called Task.destroy - will have `destroy in its current_operations. If they're now complete, - we can Kill these immediately *) - let completed_destroyable, completed_gcable = - List.partition - (fun (_, t) -> List.exists (fun (_,op) -> op = `destroy) t.Db_actions.task_current_operations) - completed in - - List.iter (fun (t, _) -> Db.Task.destroy ~__context ~self:t) completed_destroyable; - - let completed_old, completed_young = - List.partition - (fun (_, t) -> - Date.to_float t.Db_actions.task_finished < oldest_completed_time) - completed_gcable in - - let pending_old, pending_young = - List.partition - (fun (_, t) -> - Date.to_float t.Db_actions.task_created < oldest_pending_time) - pending in - - let pending_old_run, pending_old_hung = - List.partition - (fun (_, t) -> - try - let pre_progress = - Hashtbl.find probation_pending_tasks t.Db_actions.task_uuid in - t.Db_actions.task_progress -. pre_progress > min_float - with Not_found -> true) - pending_old in - - let () = - Hashtbl.clear probation_pending_tasks; - List.iter - (fun (_, t) -> - Hashtbl.add probation_pending_tasks - t.Db_actions.task_uuid t.Db_actions.task_progress) - pending_old in - - let old = pending_old_hung @ completed_old in - let young = pending_old_run @ pending_young @ completed_young in - - (* If there are still too many young tasks then we'll try to delete some completed ones *) - let lucky, unlucky = - if List.length young <= Xapi_globs.max_tasks - then young, [] (* keep them all *) - else - (* Compute how many we'd like to delete *) - let overflow = List.length young - Xapi_globs.max_tasks in - (* We only consider deleting completed tasks *) - let completed, pending = List.partition - (fun (_, t) -> TaskHelper.status_is_completed t.Db_actions.task_status) young in - (* Sort the completed tasks so we delete oldest tasks in preference *) - let completed = - List.sort (fun (_,t1) (_,t2) -> compare (Date.to_float t1.Db_actions.task_finished) (Date.to_float t2.Db_actions.task_finished)) completed in - (* From the completes set, choose up to 'overflow' *) - let unlucky, lucky = - if List.length completed > overflow - then List.chop overflow completed - else completed, [] in (* not enough to delete, oh well *) - (* Keep all pending and any which were not chosen from the completed set *) - pending @ lucky, unlucky in - (* Cancel the 'old' and 'unlucky' *) - List.iter (fun (x, y) -> - if not (TaskHelper.status_is_completed y.Db_actions.task_status) - then warn "GCed old task that was still in pending state: %s" y.Db_actions.task_uuid; - TaskHelper.destroy ~__context x - ) (old @ unlucky); - if List.length lucky > Xapi_globs.max_tasks - then warn "There are more pending tasks than the maximum allowed: %d > %d" (List.length lucky) Xapi_globs.max_tasks - -(* -let timeout_alerts ~__context = - let all_alerts = Db.Alert.get_all ~__context in - let now = Unix.gettimeofday() in - List.iter (fun alert -> - let alert_time = Date.to_float (Db.Alert.get_timestamp ~__context ~self:alert) in - if now -. alert_time > Xapi_globs.alert_timeout then - Db.Alert.destroy ~__context ~self:alert - ) all_alerts -*) - (* Compare this host's (the master's) version with that reported by all other hosts and mark the Pool with an other_config key if we are in a rolling upgrade mode. If we detect the beginning or end of a rolling upgrade, call out to an external script. *) @@ -559,9 +198,6 @@ let tickle_heartbeat ~__context host stuff = ); [] -let gc_messages ~__context = - Xapi_message.gc ~__context - let single_pass () = Server_helpers.exec_with_new_task "DB GC" (fun __context -> @@ -571,32 +207,7 @@ let single_pass () = Stats.time_this (Printf.sprintf "Db_gc: %s" name) (fun () -> f ~__context) in - (* do VDIs first because this will *) - (* cause some VBDs to be affected *) - List.iter time_one [ - "VDIs", gc_VDIs; - "PIFs", gc_PIFs; - "VBDs", gc_VBDs; - "crashdumps", gc_crashdumps; - "VIFs", gc_VIFs; - "PBDs", gc_PBDs; - "VGPUs", gc_VGPUs; - "PGPUs", gc_PGPUs; - "VGPU_types", gc_VGPU_types; - "Host patches", gc_Host_patches; - "Host CPUs", gc_host_cpus; - "Host metrics", gc_host_metrics; - "Tasks", timeout_tasks; - "Sessions", timeout_sessions; - "Messages", gc_messages; - "Consoles", gc_consoles; - "PVS proxies", gc_PVS_proxies; - "PVS servers", gc_PVS_servers; - "PVS cache storage", gc_PVS_cache_storage; - (* timeout_alerts; *) - (* CA-29253: wake up all blocked clients *) - "Heartbeat", Xapi_event.heartbeat; - ] + List.iter time_one Db_gc_util.gc_subtask_list ); Mutex.execute use_host_heartbeat_for_liveness_m (fun () -> diff --git a/ocaml/xapi/db_gc_util.ml b/ocaml/xapi/db_gc_util.ml new file mode 100644 index 00000000000..5d73c22f2e3 --- /dev/null +++ b/ocaml/xapi/db_gc_util.ml @@ -0,0 +1,413 @@ +(* + * Copyright (C) 2006-2017 Citrix Systems Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; version 2.1 only. with the special + * exception on linking described in file LICENSE. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + *) +(** + * @group Database Operations +*) + +open API +open Stdext +open Listext + +module D=Debug.Make(struct let name="db_gc_util" end) +open D + +let valid_ref x = Db.is_valid_ref x + +let gc_connector ~__context get_all get_record valid_ref1 valid_ref2 delete_record = + let db = Context.database_of __context in + let module DB = (val (Db_cache.get db) : Db_interface.DB_ACCESS) in + let all_refs = get_all ~__context in + let do_gc ref = + let print_valid b = if b then "valid" else "INVALID" in + let record = get_record ~__context ~self:ref in + let ref_1_valid = valid_ref1 record in + let ref_2_valid = valid_ref2 record in + + if not (ref_1_valid && ref_2_valid) then + begin + let table,reference,valid1,valid2 = + (match DB.get_table_from_ref db (Ref.string_of ref) with + None -> "UNKNOWN CLASS" + | Some c -> c), + (Ref.string_of ref), + (print_valid ref_1_valid), + (print_valid ref_2_valid) in + debug "Connector %s (%s) has invalid refs [ref_1: %s; ref_2: %s]. Attempting GC..." table reference valid1 valid2; + delete_record ~__context ~self:ref + end in + List.iter do_gc all_refs + +let gc_VGPU_types ~__context = + (* We delete a VGPU_type iff it does not appear in the supported_VGPU_types + * of any PGPU _and_ there doesn't exist a VGPU with this VGPU_type *) + let open Db_filter_types in + let garbage = Db.VGPU_type.get_records_where ~__context + ~expr:(And ((Eq (Field "VGPUs", Literal "()")), + (Eq (Field "supported_on_PGPUs", Literal "()")))) in + match garbage with + | [] -> () + | _ -> + debug "GC-ing the following unused and unsupported VGPU_types: [ %s ]" + (String.concat "; " (List.map Ref.string_of (List.map fst garbage))); + List.iter (fun (self, _) -> Db.VGPU_type.destroy ~__context ~self) garbage + +let gc_PVS_proxies ~__context = + gc_connector ~__context + Db.PVS_proxy.get_all + Db.PVS_proxy.get_record + (fun x -> valid_ref __context x.pVS_proxy_VIF) + (fun x -> valid_ref __context x.pVS_proxy_site) + Db.PVS_proxy.destroy + +(* A PVS server refers to a PVS site. We delete it, if the reference + * becomes invalid. At creation, the server is connected to a site and + * hence we never GC a server right after it was created. *) +let gc_PVS_servers ~__context = + gc_connector ~__context + Db.PVS_server.get_all + Db.PVS_server.get_record + (fun x -> true) + (fun x -> valid_ref __context x.pVS_server_site) + Db.PVS_server.destroy + +let gc_PVS_cache_storage ~__context = + gc_connector ~__context + Db.PVS_cache_storage.get_all + Db.PVS_cache_storage.get_record + (fun x -> valid_ref __context x.pVS_cache_storage_site) + (fun x -> valid_ref __context x.pVS_cache_storage_host) + Db.PVS_cache_storage.destroy + +let gc_PIFs ~__context = + gc_connector ~__context Db.PIF.get_all Db.PIF.get_record (fun x->valid_ref __context x.pIF_host) (fun x->valid_ref __context x.pIF_network) + (fun ~__context ~self -> + (* We need to destroy the PIF, it's metrics and any VLAN/bond records that this PIF was a master of. *) + (* bonds/tunnels_to_gc is actually a list which is either empty (not part of a bond/tunnel) + * or containing exactly one reference.. *) + let bonds_to_gc = Db.PIF.get_bond_master_of ~__context ~self in + let vlan_to_gc = Db.PIF.get_VLAN_master_of ~__context ~self in + let tunnels_to_gc = Db.PIF.get_tunnel_access_PIF_of ~__context ~self in + (* Only destroy PIF_metrics of physical or bond PIFs *) + if vlan_to_gc = Ref.null && tunnels_to_gc = [] then begin + let metrics = Db.PIF.get_metrics ~__context ~self in + (try Db.PIF_metrics.destroy ~__context ~self:metrics with _ -> ()) + end; + (try Db.VLAN.destroy ~__context ~self:vlan_to_gc with _ -> ()); + List.iter (fun tunnel -> (try Db.Tunnel.destroy ~__context ~self:tunnel with _ -> ())) tunnels_to_gc; + List.iter (fun bond -> (try Db.Bond.destroy ~__context ~self:bond with _ -> ())) bonds_to_gc; + Db.PIF.destroy ~__context ~self) +let gc_VBDs ~__context = + gc_connector ~__context Db.VBD.get_all Db.VBD.get_record (fun x->valid_ref __context x.vBD_VM) (fun x->valid_ref __context x.vBD_VDI || x.vBD_empty) + (fun ~__context ~self -> + (* When GCing VBDs that are CDs, set them to empty rather than destroy them entirely *) + if (valid_ref __context (Db.VBD.get_VM ~__context ~self)) && (Db.VBD.get_type ~__context ~self = `CD) then + begin + Db.VBD.set_VDI ~__context ~self ~value:Ref.null; + Db.VBD.set_empty ~__context ~self ~value:true; + debug "VBD corresponds to CD. Record preserved but set to empty"; + end + else + begin + let metrics = Db.VBD.get_metrics ~__context ~self in + (try Db.VBD_metrics.destroy ~__context ~self:metrics with _ -> ()); + Db.VBD.destroy ~__context ~self; + end) + +let gc_crashdumps ~__context = + gc_connector ~__context Db.Crashdump.get_all Db.Crashdump.get_record + (fun x->valid_ref __context x.crashdump_VM) (fun x->valid_ref __context x.crashdump_VDI) Db.Crashdump.destroy +let gc_VIFs ~__context = + gc_connector ~__context Db.VIF.get_all Db.VIF.get_record (fun x->valid_ref __context x.vIF_VM) (fun x->valid_ref __context x.vIF_network) + (fun ~__context ~self -> + let metrics = Db.VIF.get_metrics ~__context ~self in + (try Db.VIF_metrics.destroy ~__context ~self:metrics with _ -> ()); + Db.VIF.destroy ~__context ~self) +let gc_VGPUs ~__context = + gc_connector ~__context Db.VGPU.get_all Db.VGPU.get_record (fun x->valid_ref __context x.vGPU_VM) (fun x->valid_ref __context x.vGPU_GPU_group) + (fun ~__context ~self -> + Db.VGPU.destroy ~__context ~self) + +let gc_PGPUs ~__context = + let pgpus = Db.PGPU.get_all ~__context in + (* Go through the list of PGPUs, destroying any with an invalid host ref. + * Keep a list of groups which contained PGPUs which were destroyed. *) + let affected_groups = + List.fold_left + (fun acc pgpu -> + if not (valid_ref __context (Db.PGPU.get_host ~__context ~self:pgpu)) + then begin + let group = Db.PGPU.get_GPU_group ~__context ~self:pgpu in + Db.PGPU.destroy ~__context ~self:pgpu; + debug "GCed PGPU %s" (Ref.string_of pgpu); + group :: acc + end else + acc) + [] pgpus + |> List.filter (valid_ref __context) + |> List.setify + in + (* Update enabled/supported VGPU types on the groups which contained the + * destroyed PGPUs. *) + List.iter + (fun group -> + Xapi_gpu_group.update_enabled_VGPU_types ~__context ~self:group; + Xapi_gpu_group.update_supported_VGPU_types ~__context ~self:group) + affected_groups + +let gc_PBDs ~__context = + gc_connector ~__context Db.PBD.get_all Db.PBD.get_record (fun x->valid_ref __context x.pBD_host) (fun x->valid_ref __context x.pBD_SR) Db.PBD.destroy +let gc_Host_patches ~__context = + gc_connector ~__context Db.Host_patch.get_all Db.Host_patch.get_record (fun x->valid_ref __context x.host_patch_host) (fun x->valid_ref __context x.host_patch_pool_patch) Db.Host_patch.destroy +let gc_host_cpus ~__context = + let host_cpus = Db.Host_cpu.get_all ~__context in + List.iter + (fun hcpu -> + if not (valid_ref __context (Db.Host_cpu.get_host ~__context ~self:hcpu)) then + Db.Host_cpu.destroy ~__context ~self:hcpu) host_cpus +let gc_host_metrics ~__context = + let all_host_metrics = Db.Host_metrics.get_all ~__context in + let metrics = List.map (fun host-> Db.Host.get_metrics ~__context ~self:host) in + let host_metrics = metrics (Db.Host.get_all ~__context) in + List.iter + (fun hmetric-> + if not (List.mem hmetric host_metrics) then + Db.Host_metrics.destroy ~__context ~self:hmetric) all_host_metrics + +(* If the SR record is missing, delete the VDI record *) +let gc_VDIs ~__context = + let all_srs = Db.SR.get_all ~__context in + List.iter (fun vdi -> + let sr = Db.VDI.get_SR ~__context ~self:vdi in + if not(List.mem sr all_srs) then begin + debug "GCed VDI %s" (Ref.string_of vdi); + Db.VDI.destroy ~__context ~self:vdi + end) (Db.VDI.get_all ~__context) + +let gc_consoles ~__context = + List.iter (fun console -> + if not (valid_ref __context (Db.Console.get_VM ~__context ~self:console)) + then begin + Db.Console.destroy ~__context ~self:console; + debug "GCed console %s" (Ref.string_of console); + end + ) (Db.Console.get_all ~__context) + +let timeout_sessions_common ~__context sessions limit session_group = + let unused_sessions = List.filter + (fun (x, _) -> + let rec is_session_unused s = + if (s=Ref.null) then true (* top of session tree *) + else + try (* if no session s, assume default value true=unused *) + let tasks = (Db.Session.get_tasks ~__context ~self:s) in + let parent = (Db.Session.get_parent ~__context ~self:s) in + (List.for_all + (fun t -> TaskHelper.status_is_completed + (* task might not exist anymore, assume completed in this case *) + (try Db.Task.get_status ~__context ~self:t with _->`success) + ) + tasks + ) + && (is_session_unused parent) + with _->true + in is_session_unused x + ) + sessions + in + (* Only keep a list of (ref, last_active, uuid) *) + let disposable_sessions = List.map (fun (x, y) -> x, Date.to_float y.Db_actions.session_last_active, y.Db_actions.session_uuid) unused_sessions in + (* Definitely invalidate sessions last used long ago *) + let threshold_time = Unix.time () -. !Xapi_globs.inactive_session_timeout in + let young, old = List.partition (fun (_, y, _) -> y > threshold_time) disposable_sessions in + (* If there are too many young sessions then we need to delete the oldest *) + let lucky, unlucky = + if List.length young <= limit + then young, [] (* keep them all *) + else + (* Need to reverse sort by last active and drop the oldest *) + List.chop limit (List.sort (fun (_,a, _) (_,b, _) -> compare b a) young) in + let cancel doc sessions = + List.iter + (fun (s, active, uuid) -> + debug "Session.destroy _ref=%s uuid=%s %s (last active %s): %s" (Ref.string_of s) uuid (Context.trackid_of_session (Some s)) (Date.to_string (Date.of_float active)) doc; + Xapi_session.destroy_db_session ~__context ~self:s + ) sessions in + (* Only the 'lucky' survive: the 'old' and 'unlucky' are destroyed *) + if unlucky <> [] + then debug "Number of disposable sessions in group '%s' in database (%d/%d) exceeds limit (%d): will delete the oldest" session_group (List.length disposable_sessions) (List.length sessions) limit; + cancel (Printf.sprintf "Timed out session in group '%s' because of its age" session_group) old; + cancel (Printf.sprintf "Timed out session in group '%s' because max number of sessions was exceeded" session_group) unlucky + +let last_session_log_time = ref None + +let timeout_sessions ~__context = + let all_sessions = Db.Session.get_internal_records_where ~__context ~expr:Db_filter_types.True in + + let pool_sessions, nonpool_sessions = List.partition (fun (_, s) -> s.Db_actions.session_pool) all_sessions in + let use_root_auth_name s = s.Db_actions.session_auth_user_name = "" || s.Db_actions.session_auth_user_name = "root" in + let anon_sessions, named_sessions = List.partition (fun (_, s) -> s.Db_actions.session_originator = "" && use_root_auth_name s) nonpool_sessions in + let session_groups = Hashtbl.create 37 in + List.iter (function (_, s) as rs -> + let key = if use_root_auth_name s then `Orig s.Db_actions.session_originator else `Name s.Db_actions.session_auth_user_name in + let current_sessions = + try Hashtbl.find session_groups key + with Not_found -> [] in + Hashtbl.replace session_groups key (rs :: current_sessions) + ) named_sessions; + + let should_log = match !last_session_log_time with + | None -> true + | Some t -> Unix.time () -. t > 600.0 (* Every 10 mins, dump session stats *) + in + + if should_log then begin + last_session_log_time := Some (Unix.time ()); + let nbindings = Hashtbl.fold (fun _ _ acc -> 1+acc) session_groups 0 in + debug "session_log: active_sessions=%d (%d pool, %d anon, %d named - %d groups)" + (List.length all_sessions) (List.length pool_sessions) (List.length anon_sessions) (List.length named_sessions) nbindings + end; + + begin + Hashtbl.iter + (fun key ss -> match key with + | `Orig orig -> timeout_sessions_common ~__context ss Xapi_globs.max_sessions_per_originator ("originator:"^orig) + | `Name name -> timeout_sessions_common ~__context ss Xapi_globs.max_sessions_per_user_name ("username:"^name)) + session_groups; + timeout_sessions_common ~__context anon_sessions Xapi_globs.max_sessions "external"; + timeout_sessions_common ~__context pool_sessions Xapi_globs.max_sessions "internal"; + end + +let probation_pending_tasks = Hashtbl.create 53 + +let timeout_tasks ~__context = + let all_tasks = Db.Task.get_internal_records_where ~__context ~expr:Db_filter_types.True in + let oldest_completed_time = Unix.time() -. !Xapi_globs.completed_task_timeout (* time out completed tasks after 65 minutes *) in + let oldest_pending_time = Unix.time() -. !Xapi_globs.pending_task_timeout (* time out pending tasks after 24 hours *) in + + let completed, pending = + List.partition + (fun (_, t) -> TaskHelper.status_is_completed t.Db_actions.task_status) + all_tasks in + + (* Any task that was incomplete at the point someone called Task.destroy + will have `destroy in its current_operations. If they're now complete, + we can Kill these immediately *) + let completed_destroyable, completed_gcable = + List.partition + (fun (_, t) -> List.exists (fun (_,op) -> op = `destroy) t.Db_actions.task_current_operations) + completed in + + List.iter (fun (t, _) -> Db.Task.destroy ~__context ~self:t) completed_destroyable; + + let completed_old, completed_young = + List.partition + (fun (_, t) -> + Date.to_float t.Db_actions.task_finished < oldest_completed_time) + completed_gcable in + + let pending_old, pending_young = + List.partition + (fun (_, t) -> + Date.to_float t.Db_actions.task_created < oldest_pending_time) + pending in + + let pending_old_run, pending_old_hung = + List.partition + (fun (_, t) -> + try + let pre_progress = + Hashtbl.find probation_pending_tasks t.Db_actions.task_uuid in + t.Db_actions.task_progress -. pre_progress > min_float + with Not_found -> true) + pending_old in + + let () = + Hashtbl.clear probation_pending_tasks; + List.iter + (fun (_, t) -> + Hashtbl.add probation_pending_tasks + t.Db_actions.task_uuid t.Db_actions.task_progress) + pending_old in + + let old = pending_old_hung @ completed_old in + let young = pending_old_run @ pending_young @ completed_young in + + (* If there are still too many young tasks then we'll try to delete some completed ones *) + let lucky, unlucky = + if List.length young <= Xapi_globs.max_tasks + then young, [] (* keep them all *) + else + (* Compute how many we'd like to delete *) + let overflow = List.length young - Xapi_globs.max_tasks in + (* We only consider deleting completed tasks *) + let completed, pending = List.partition + (fun (_, t) -> TaskHelper.status_is_completed t.Db_actions.task_status) young in + (* Sort the completed tasks so we delete oldest tasks in preference *) + let completed = + List.sort (fun (_,t1) (_,t2) -> compare (Date.to_float t1.Db_actions.task_finished) (Date.to_float t2.Db_actions.task_finished)) completed in + (* From the completes set, choose up to 'overflow' *) + let unlucky, lucky = + if List.length completed > overflow + then List.chop overflow completed + else completed, [] in (* not enough to delete, oh well *) + (* Keep all pending and any which were not chosen from the completed set *) + pending @ lucky, unlucky in + (* Cancel the 'old' and 'unlucky' *) + List.iter (fun (x, y) -> + if not (TaskHelper.status_is_completed y.Db_actions.task_status) + then warn "GCed old task that was still in pending state: %s" y.Db_actions.task_uuid; + TaskHelper.destroy ~__context x + ) (old @ unlucky); + if List.length lucky > Xapi_globs.max_tasks + then warn "There are more pending tasks than the maximum allowed: %d > %d" (List.length lucky) Xapi_globs.max_tasks + +(* +let timeout_alerts ~__context = + let all_alerts = Db.Alert.get_all ~__context in + let now = Unix.gettimeofday() in + List.iter (fun alert -> + let alert_time = Date.to_float (Db.Alert.get_timestamp ~__context ~self:alert) in + if now -. alert_time > Xapi_globs.alert_timeout then + Db.Alert.destroy ~__context ~self:alert + ) all_alerts +*) + +let gc_messages ~__context = + Xapi_message.gc ~__context + +(* do VDIs first because this will cause some VBDs to be affected *) +let gc_subtask_list = [ + "VDIs", gc_VDIs; + "PIFs", gc_PIFs; + "VBDs", gc_VBDs; + "crashdumps", gc_crashdumps; + "VIFs", gc_VIFs; + "PBDs", gc_PBDs; + "VGPUs", gc_VGPUs; + "PGPUs", gc_PGPUs; + "VGPU_types", gc_VGPU_types; + "Host patches", gc_Host_patches; + "Host CPUs", gc_host_cpus; + "Host metrics", gc_host_metrics; + "Tasks", timeout_tasks; + "Sessions", timeout_sessions; + "Messages", gc_messages; + "Consoles", gc_consoles; + "PVS proxies", gc_PVS_proxies; + "PVS servers", gc_PVS_servers; + "PVS cache storage", gc_PVS_cache_storage; + (* timeout_alerts; *) + (* CA-29253: wake up all blocked clients *) + "Heartbeat", Xapi_event.heartbeat; + ] diff --git a/ocaml/xapi/test_gpu_group.ml b/ocaml/xapi/test_gpu_group.ml index ad47471837a..605ff23abc9 100644 --- a/ocaml/xapi/test_gpu_group.ml +++ b/ocaml/xapi/test_gpu_group.ml @@ -55,7 +55,7 @@ let test_supported_enabled_types () = (* Invalidate the PGPU's host ref, and run a GC pass; this should destroy the * pgpu, and clear the group's supported and enabled types. *) Db.PGPU.set_host ~__context ~self:pgpu ~value:Ref.null; - Db_gc.gc_PGPUs ~__context; + Db_gc_util.gc_PGPUs ~__context; let group_supported_types = Db.GPU_group.get_supported_VGPU_types ~__context ~self:gPU_group in diff --git a/ocaml/xapi/test_pvs_proxy.ml b/ocaml/xapi/test_pvs_proxy.ml index 0040337753b..0c10158d0df 100644 --- a/ocaml/xapi/test_pvs_proxy.ml +++ b/ocaml/xapi/test_pvs_proxy.ml @@ -72,18 +72,18 @@ let test_gc_proxy () = let site = make_pvs_site ~__context () in let vIF = make_vif ~__context ~device:"0" () in let proxy = Xapi_pvs_proxy.create ~__context ~site ~vIF in - ( Db_gc.gc_PVS_proxies ~__context + ( Db_gc_util.gc_PVS_proxies ~__context ; assert_equal (Db.PVS_proxy.get_site ~__context ~self:proxy) site ; assert_equal (Db.PVS_proxy.get_VIF ~__context ~self:proxy) vIF ; Db.PVS_proxy.set_site ~__context ~self:proxy ~value:Ref.null - ; Db_gc.gc_PVS_proxies ~__context (* should collect the proxy *) + ; Db_gc_util.gc_PVS_proxies ~__context (* should collect the proxy *) ; assert_equal false (Db.is_valid_ref __context proxy)); let proxy = Xapi_pvs_proxy.create ~__context ~site ~vIF in - ( Db_gc.gc_PVS_proxies ~__context + ( Db_gc_util.gc_PVS_proxies ~__context ; assert_equal (Db.PVS_proxy.get_site ~__context ~self:proxy) site ; assert_equal (Db.PVS_proxy.get_VIF ~__context ~self:proxy) vIF ; Db.PVS_proxy.set_VIF ~__context ~self:proxy ~value:Ref.null - ; Db_gc.gc_PVS_proxies ~__context (* should collect the proxy *) + ; Db_gc_util.gc_PVS_proxies ~__context (* should collect the proxy *) ; assert_equal false (Db.is_valid_ref __context proxy)) let test = diff --git a/ocaml/xapi/test_pvs_server.ml b/ocaml/xapi/test_pvs_server.ml index 11e967fb217..4b927a5fa20 100644 --- a/ocaml/xapi/test_pvs_server.ml +++ b/ocaml/xapi/test_pvs_server.ml @@ -150,10 +150,10 @@ let test_gc () = let server = Xapi_pvs_server.introduce ~__context ~addresses ~first_port ~last_port ~site in - ( Db_gc.gc_PVS_servers ~__context + ( Db_gc_util.gc_PVS_servers ~__context ; assert_equal (Db.PVS_server.get_site ~__context ~self:server) site ; Db.PVS_server.set_site ~__context ~self:server ~value:Ref.null - ; Db_gc.gc_PVS_servers ~__context (* should collect the server *) + ; Db_gc_util.gc_PVS_servers ~__context (* should collect the server *) ; assert_equal false (Db.is_valid_ref __context server) ) From 71cb6b67aba6d760b6ceb26d11a32d00210521bc Mon Sep 17 00:00:00 2001 From: Deli Zhang Date: Thu, 25 May 2017 14:29:09 +0800 Subject: [PATCH 52/59] CA-247321: Refactor Db_gc_util, change function definition order following appearence in gc_subtask_list Signed-off-by: Deli Zhang --- ocaml/xapi/db_gc_util.ml | 304 ++++++++++++++++++++------------------- 1 file changed, 156 insertions(+), 148 deletions(-) diff --git a/ocaml/xapi/db_gc_util.ml b/ocaml/xapi/db_gc_util.ml index 5d73c22f2e3..6d99e8c7356 100644 --- a/ocaml/xapi/db_gc_util.ml +++ b/ocaml/xapi/db_gc_util.ml @@ -43,51 +43,20 @@ let gc_connector ~__context get_all get_record valid_ref1 valid_ref2 delete_reco (Ref.string_of ref), (print_valid ref_1_valid), (print_valid ref_2_valid) in - debug "Connector %s (%s) has invalid refs [ref_1: %s; ref_2: %s]. Attempting GC..." table reference valid1 valid2; + debug "Connector %s (%s) has invalid refs [ref_1: %s; ref_2: %s]. Attempting to GC..." table reference valid1 valid2; delete_record ~__context ~self:ref end in List.iter do_gc all_refs -let gc_VGPU_types ~__context = - (* We delete a VGPU_type iff it does not appear in the supported_VGPU_types - * of any PGPU _and_ there doesn't exist a VGPU with this VGPU_type *) - let open Db_filter_types in - let garbage = Db.VGPU_type.get_records_where ~__context - ~expr:(And ((Eq (Field "VGPUs", Literal "()")), - (Eq (Field "supported_on_PGPUs", Literal "()")))) in - match garbage with - | [] -> () - | _ -> - debug "GC-ing the following unused and unsupported VGPU_types: [ %s ]" - (String.concat "; " (List.map Ref.string_of (List.map fst garbage))); - List.iter (fun (self, _) -> Db.VGPU_type.destroy ~__context ~self) garbage - -let gc_PVS_proxies ~__context = - gc_connector ~__context - Db.PVS_proxy.get_all - Db.PVS_proxy.get_record - (fun x -> valid_ref __context x.pVS_proxy_VIF) - (fun x -> valid_ref __context x.pVS_proxy_site) - Db.PVS_proxy.destroy - -(* A PVS server refers to a PVS site. We delete it, if the reference - * becomes invalid. At creation, the server is connected to a site and - * hence we never GC a server right after it was created. *) -let gc_PVS_servers ~__context = - gc_connector ~__context - Db.PVS_server.get_all - Db.PVS_server.get_record - (fun x -> true) - (fun x -> valid_ref __context x.pVS_server_site) - Db.PVS_server.destroy - -let gc_PVS_cache_storage ~__context = - gc_connector ~__context - Db.PVS_cache_storage.get_all - Db.PVS_cache_storage.get_record - (fun x -> valid_ref __context x.pVS_cache_storage_site) - (fun x -> valid_ref __context x.pVS_cache_storage_host) - Db.PVS_cache_storage.destroy +(* If the SR record is missing, delete the VDI record *) +let gc_VDIs ~__context = + let all_srs = Db.SR.get_all ~__context in + List.iter (fun vdi -> + let sr = Db.VDI.get_SR ~__context ~self:vdi in + if not(List.mem sr all_srs) then begin + debug "GCed VDI %s" (Ref.string_of vdi); + Db.VDI.destroy ~__context ~self:vdi + end) (Db.VDI.get_all ~__context) let gc_PIFs ~__context = gc_connector ~__context Db.PIF.get_all Db.PIF.get_record (fun x->valid_ref __context x.pIF_host) (fun x->valid_ref __context x.pIF_network) @@ -106,7 +75,8 @@ let gc_PIFs ~__context = (try Db.VLAN.destroy ~__context ~self:vlan_to_gc with _ -> ()); List.iter (fun tunnel -> (try Db.Tunnel.destroy ~__context ~self:tunnel with _ -> ())) tunnels_to_gc; List.iter (fun bond -> (try Db.Bond.destroy ~__context ~self:bond with _ -> ())) bonds_to_gc; - Db.PIF.destroy ~__context ~self) + Db.PIF.destroy ~__context ~self) + let gc_VBDs ~__context = gc_connector ~__context Db.VBD.get_all Db.VBD.get_record (fun x->valid_ref __context x.vBD_VM) (fun x->valid_ref __context x.vBD_VDI || x.vBD_empty) (fun ~__context ~self -> @@ -123,16 +93,21 @@ let gc_VBDs ~__context = (try Db.VBD_metrics.destroy ~__context ~self:metrics with _ -> ()); Db.VBD.destroy ~__context ~self; end) - + let gc_crashdumps ~__context = gc_connector ~__context Db.Crashdump.get_all Db.Crashdump.get_record (fun x->valid_ref __context x.crashdump_VM) (fun x->valid_ref __context x.crashdump_VDI) Db.Crashdump.destroy + let gc_VIFs ~__context = gc_connector ~__context Db.VIF.get_all Db.VIF.get_record (fun x->valid_ref __context x.vIF_VM) (fun x->valid_ref __context x.vIF_network) (fun ~__context ~self -> let metrics = Db.VIF.get_metrics ~__context ~self in (try Db.VIF_metrics.destroy ~__context ~self:metrics with _ -> ()); Db.VIF.destroy ~__context ~self) + +let gc_PBDs ~__context = + gc_connector ~__context Db.PBD.get_all Db.PBD.get_record (fun x->valid_ref __context x.pBD_host) (fun x->valid_ref __context x.pBD_SR) Db.PBD.destroy + let gc_VGPUs ~__context = gc_connector ~__context Db.VGPU.get_all Db.VGPU.get_record (fun x->valid_ref __context x.vGPU_VM) (fun x->valid_ref __context x.vGPU_GPU_group) (fun ~__context ~self -> @@ -165,16 +140,30 @@ let gc_PGPUs ~__context = Xapi_gpu_group.update_supported_VGPU_types ~__context ~self:group) affected_groups -let gc_PBDs ~__context = - gc_connector ~__context Db.PBD.get_all Db.PBD.get_record (fun x->valid_ref __context x.pBD_host) (fun x->valid_ref __context x.pBD_SR) Db.PBD.destroy +let gc_VGPU_types ~__context = + (* We delete a VGPU_type iff it does not appear in the supported_VGPU_types + * of any PGPU _and_ there doesn't exist a VGPU with this VGPU_type *) + let open Db_filter_types in + let garbage = Db.VGPU_type.get_records_where ~__context + ~expr:(And ((Eq (Field "VGPUs", Literal "()")), + (Eq (Field "supported_on_PGPUs", Literal "()")))) in + match garbage with + | [] -> () + | _ -> + debug "GC-ing the following unused and unsupported VGPU_types: [ %s ]" + (String.concat "; " (List.map Ref.string_of (List.map fst garbage))); + List.iter (fun (self, _) -> Db.VGPU_type.destroy ~__context ~self) garbage + let gc_Host_patches ~__context = gc_connector ~__context Db.Host_patch.get_all Db.Host_patch.get_record (fun x->valid_ref __context x.host_patch_host) (fun x->valid_ref __context x.host_patch_pool_patch) Db.Host_patch.destroy + let gc_host_cpus ~__context = let host_cpus = Db.Host_cpu.get_all ~__context in List.iter (fun hcpu -> if not (valid_ref __context (Db.Host_cpu.get_host ~__context ~self:hcpu)) then Db.Host_cpu.destroy ~__context ~self:hcpu) host_cpus + let gc_host_metrics ~__context = let all_host_metrics = Db.Host_metrics.get_all ~__context in let metrics = List.map (fun host-> Db.Host.get_metrics ~__context ~self:host) in @@ -184,25 +173,91 @@ let gc_host_metrics ~__context = if not (List.mem hmetric host_metrics) then Db.Host_metrics.destroy ~__context ~self:hmetric) all_host_metrics -(* If the SR record is missing, delete the VDI record *) -let gc_VDIs ~__context = - let all_srs = Db.SR.get_all ~__context in - List.iter (fun vdi -> - let sr = Db.VDI.get_SR ~__context ~self:vdi in - if not(List.mem sr all_srs) then begin - debug "GCed VDI %s" (Ref.string_of vdi); - Db.VDI.destroy ~__context ~self:vdi - end) (Db.VDI.get_all ~__context) +let probation_pending_tasks = Hashtbl.create 53 -let gc_consoles ~__context = - List.iter (fun console -> - if not (valid_ref __context (Db.Console.get_VM ~__context ~self:console)) - then begin - Db.Console.destroy ~__context ~self:console; - debug "GCed console %s" (Ref.string_of console); - end - ) (Db.Console.get_all ~__context) +let timeout_tasks ~__context = + let all_tasks = Db.Task.get_internal_records_where ~__context ~expr:Db_filter_types.True in + let oldest_completed_time = Unix.time() -. !Xapi_globs.completed_task_timeout (* time out completed tasks after 65 minutes *) in + let oldest_pending_time = Unix.time() -. !Xapi_globs.pending_task_timeout (* time out pending tasks after 24 hours *) in + + let completed, pending = + List.partition + (fun (_, t) -> TaskHelper.status_is_completed t.Db_actions.task_status) + all_tasks in + + (* Any task that was incomplete at the point someone called Task.destroy + will have `destroy in its current_operations. If they're now complete, + we can Kill these immediately *) + let completed_destroyable, completed_gcable = + List.partition + (fun (_, t) -> List.exists (fun (_,op) -> op = `destroy) t.Db_actions.task_current_operations) + completed in + + List.iter (fun (t, _) -> Db.Task.destroy ~__context ~self:t) completed_destroyable; + + let completed_old, completed_young = + List.partition + (fun (_, t) -> + Date.to_float t.Db_actions.task_finished < oldest_completed_time) + completed_gcable in + + let pending_old, pending_young = + List.partition + (fun (_, t) -> + Date.to_float t.Db_actions.task_created < oldest_pending_time) + pending in + + let pending_old_run, pending_old_hung = + List.partition + (fun (_, t) -> + try + let pre_progress = + Hashtbl.find probation_pending_tasks t.Db_actions.task_uuid in + t.Db_actions.task_progress -. pre_progress > min_float + with Not_found -> true) + pending_old in + + let () = + Hashtbl.clear probation_pending_tasks; + List.iter + (fun (_, t) -> + Hashtbl.add probation_pending_tasks + t.Db_actions.task_uuid t.Db_actions.task_progress) + pending_old in + + let old = pending_old_hung @ completed_old in + let young = pending_old_run @ pending_young @ completed_young in + (* If there are still too many young tasks then we'll try to delete some completed ones *) + let lucky, unlucky = + if List.length young <= Xapi_globs.max_tasks + then young, [] (* keep them all *) + else + (* Compute how many we'd like to delete *) + let overflow = List.length young - Xapi_globs.max_tasks in + (* We only consider deleting completed tasks *) + let completed, pending = List.partition + (fun (_, t) -> TaskHelper.status_is_completed t.Db_actions.task_status) young in + (* Sort the completed tasks so we delete oldest tasks in preference *) + let completed = + List.sort (fun (_,t1) (_,t2) -> compare (Date.to_float t1.Db_actions.task_finished) (Date.to_float t2.Db_actions.task_finished)) completed in + (* From the completes set, choose up to 'overflow' *) + let unlucky, lucky = + if List.length completed > overflow + then List.chop overflow completed + else completed, [] in (* not enough to delete, oh well *) + (* Keep all pending and any which were not chosen from the completed set *) + pending @ lucky, unlucky in + (* Cancel the 'old' and 'unlucky' *) + List.iter (fun (x, y) -> + if not (TaskHelper.status_is_completed y.Db_actions.task_status) + then warn "GCed old task that was still in pending state: %s" y.Db_actions.task_uuid; + TaskHelper.destroy ~__context x + ) (old @ unlucky); + if List.length lucky > Xapi_globs.max_tasks + then warn "There are more pending tasks than the maximum allowed: %d > %d" (List.length lucky) Xapi_globs.max_tasks + + let timeout_sessions_common ~__context sessions limit session_group = let unused_sessions = List.filter (fun (x, _) -> @@ -287,90 +342,45 @@ let timeout_sessions ~__context = timeout_sessions_common ~__context anon_sessions Xapi_globs.max_sessions "external"; timeout_sessions_common ~__context pool_sessions Xapi_globs.max_sessions "internal"; end - -let probation_pending_tasks = Hashtbl.create 53 - -let timeout_tasks ~__context = - let all_tasks = Db.Task.get_internal_records_where ~__context ~expr:Db_filter_types.True in - let oldest_completed_time = Unix.time() -. !Xapi_globs.completed_task_timeout (* time out completed tasks after 65 minutes *) in - let oldest_pending_time = Unix.time() -. !Xapi_globs.pending_task_timeout (* time out pending tasks after 24 hours *) in - - let completed, pending = - List.partition - (fun (_, t) -> TaskHelper.status_is_completed t.Db_actions.task_status) - all_tasks in - - (* Any task that was incomplete at the point someone called Task.destroy - will have `destroy in its current_operations. If they're now complete, - we can Kill these immediately *) - let completed_destroyable, completed_gcable = - List.partition - (fun (_, t) -> List.exists (fun (_,op) -> op = `destroy) t.Db_actions.task_current_operations) - completed in - - List.iter (fun (t, _) -> Db.Task.destroy ~__context ~self:t) completed_destroyable; - - let completed_old, completed_young = - List.partition - (fun (_, t) -> - Date.to_float t.Db_actions.task_finished < oldest_completed_time) - completed_gcable in - - let pending_old, pending_young = - List.partition - (fun (_, t) -> - Date.to_float t.Db_actions.task_created < oldest_pending_time) - pending in - - let pending_old_run, pending_old_hung = - List.partition - (fun (_, t) -> - try - let pre_progress = - Hashtbl.find probation_pending_tasks t.Db_actions.task_uuid in - t.Db_actions.task_progress -. pre_progress > min_float - with Not_found -> true) - pending_old in - - let () = - Hashtbl.clear probation_pending_tasks; - List.iter - (fun (_, t) -> - Hashtbl.add probation_pending_tasks - t.Db_actions.task_uuid t.Db_actions.task_progress) - pending_old in - - let old = pending_old_hung @ completed_old in - let young = pending_old_run @ pending_young @ completed_young in - - (* If there are still too many young tasks then we'll try to delete some completed ones *) - let lucky, unlucky = - if List.length young <= Xapi_globs.max_tasks - then young, [] (* keep them all *) - else - (* Compute how many we'd like to delete *) - let overflow = List.length young - Xapi_globs.max_tasks in - (* We only consider deleting completed tasks *) - let completed, pending = List.partition - (fun (_, t) -> TaskHelper.status_is_completed t.Db_actions.task_status) young in - (* Sort the completed tasks so we delete oldest tasks in preference *) - let completed = - List.sort (fun (_,t1) (_,t2) -> compare (Date.to_float t1.Db_actions.task_finished) (Date.to_float t2.Db_actions.task_finished)) completed in - (* From the completes set, choose up to 'overflow' *) - let unlucky, lucky = - if List.length completed > overflow - then List.chop overflow completed - else completed, [] in (* not enough to delete, oh well *) - (* Keep all pending and any which were not chosen from the completed set *) - pending @ lucky, unlucky in - (* Cancel the 'old' and 'unlucky' *) - List.iter (fun (x, y) -> - if not (TaskHelper.status_is_completed y.Db_actions.task_status) - then warn "GCed old task that was still in pending state: %s" y.Db_actions.task_uuid; - TaskHelper.destroy ~__context x - ) (old @ unlucky); - if List.length lucky > Xapi_globs.max_tasks - then warn "There are more pending tasks than the maximum allowed: %d > %d" (List.length lucky) Xapi_globs.max_tasks + +let gc_messages ~__context = + Xapi_message.gc ~__context + +let gc_consoles ~__context = + List.iter (fun console -> + if not (valid_ref __context (Db.Console.get_VM ~__context ~self:console)) + then begin + Db.Console.destroy ~__context ~self:console; + debug "GCed console %s" (Ref.string_of console); + end + ) (Db.Console.get_all ~__context) + +let gc_PVS_proxies ~__context = + gc_connector ~__context + Db.PVS_proxy.get_all + Db.PVS_proxy.get_record + (fun x -> valid_ref __context x.pVS_proxy_VIF) + (fun x -> valid_ref __context x.pVS_proxy_site) + Db.PVS_proxy.destroy + +(* A PVS server refers to a PVS site. We delete it, if the reference + * becomes invalid. At creation, the server is connected to a site and + * hence we never GC a server right after it was created. *) +let gc_PVS_servers ~__context = + gc_connector ~__context + Db.PVS_server.get_all + Db.PVS_server.get_record + (fun x -> true) + (fun x -> valid_ref __context x.pVS_server_site) + Db.PVS_server.destroy + +let gc_PVS_cache_storage ~__context = + gc_connector ~__context + Db.PVS_cache_storage.get_all + Db.PVS_cache_storage.get_record + (fun x -> valid_ref __context x.pVS_cache_storage_site) + (fun x -> valid_ref __context x.pVS_cache_storage_host) + Db.PVS_cache_storage.destroy (* let timeout_alerts ~__context = @@ -383,8 +393,6 @@ let timeout_alerts ~__context = ) all_alerts *) -let gc_messages ~__context = - Xapi_message.gc ~__context (* do VDIs first because this will cause some VBDs to be affected *) let gc_subtask_list = [ From 1657eb8dfbd1e20d7967159af7e60a96d1a9027d Mon Sep 17 00:00:00 2001 From: Deli Zhang Date: Thu, 25 May 2017 14:44:00 +0800 Subject: [PATCH 53/59] CA-247321: Clean up host_patch table in resync_host Signed-off-by: Deli Zhang --- ocaml/xapi/xapi_pool_update.ml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ocaml/xapi/xapi_pool_update.ml b/ocaml/xapi/xapi_pool_update.ml index 72aa7aff7d6..2aa78835c22 100644 --- a/ocaml/xapi/xapi_pool_update.ml +++ b/ocaml/xapi/xapi_pool_update.ml @@ -499,7 +499,10 @@ let resync_host ~__context ~host = && Xapi_pool_patch.pool_patch_of_update ~__context self |> fun self -> Db.Pool_patch.get_host_patches ~__context ~self |> function [] -> false | _ -> true) - |> List.iter (fun self -> destroy ~__context ~self) + |> List.iter (fun self -> destroy ~__context ~self); + + (* Clean up host_patch table *) + Db_gc_util.gc_Host_patches ~__context end let pool_update_download_handler (req: Request.t) s _ = From 695ca0dd3fb33dfcb4df028a9fc9a8bab0c5c026 Mon Sep 17 00:00:00 2001 From: Gabor Igloi Date: Mon, 22 May 2017 11:35:25 +0100 Subject: [PATCH 54/59] xapi_crashdump.ml: remove dead code There is no crashdump.create API call, so it's fine to remove this function. Signed-off-by: Gabor Igloi --- ocaml/xapi/xapi_crashdump.ml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/ocaml/xapi/xapi_crashdump.ml b/ocaml/xapi/xapi_crashdump.ml index d8183877109..e8d9de00513 100644 --- a/ocaml/xapi/xapi_crashdump.ml +++ b/ocaml/xapi/xapi_crashdump.ml @@ -11,16 +11,9 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) -exception Not_implemented let nothrow f () = try f() with _ -> () -let create ~__context ~vM ~vDI = - let cdumpref = Ref.make() in - let uuid = Uuid.to_string (Uuid.make_uuid()) in - Db.Crashdump.create ~__context ~ref:cdumpref ~uuid ~vM ~vDI ~other_config:[]; - cdumpref - let destroy ~__context ~self = Stdext.Pervasiveext.finally (nothrow (fun ()-> From a8c5478eedea82cd7381312b711b0123c77e3f57 Mon Sep 17 00:00:00 2001 From: Gabor Igloi Date: Mon, 22 May 2017 11:44:33 +0100 Subject: [PATCH 55/59] Xapi_crashdump: replace nothrow function with helper Use an equivalent one from Helpers instead. Signed-off-by: Gabor Igloi --- ocaml/xapi/xapi_crashdump.ml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/ocaml/xapi/xapi_crashdump.ml b/ocaml/xapi/xapi_crashdump.ml index e8d9de00513..0325e04510a 100644 --- a/ocaml/xapi/xapi_crashdump.ml +++ b/ocaml/xapi/xapi_crashdump.ml @@ -12,14 +12,14 @@ * GNU Lesser General Public License for more details. *) -let nothrow f () = try f() with _ -> () - let destroy ~__context ~self = Stdext.Pervasiveext.finally - (nothrow (fun ()-> - let vdi = Db.Crashdump.get_VDI ~__context ~self in - Helpers.call_api_functions ~__context - (fun rpc session_id -> - Client.Client.VDI.destroy rpc session_id vdi))) + (Helpers.log_exn_continue + "destroying crashdump" + (fun ()-> + let vdi = Db.Crashdump.get_VDI ~__context ~self in + Helpers.call_api_functions ~__context + (fun rpc session_id -> + Client.Client.VDI.destroy rpc session_id vdi))) (fun ()-> Db.Crashdump.destroy ~__context ~self) From f55495e5ac7566ecac46510e6def215eee0648ad Mon Sep 17 00:00:00 2001 From: Gabor Igloi Date: Thu, 25 May 2017 17:30:21 +0100 Subject: [PATCH 56/59] Add Inverness release to the datamodel Signed-off-by: Gabor Igloi --- ocaml/idl/datamodel.ml | 9 +++++++++ ocaml/idl/datamodel_types.ml | 7 +++++++ 2 files changed, 16 insertions(+) diff --git a/ocaml/idl/datamodel.ml b/ocaml/idl/datamodel.ml index 7fe0ed395f2..21cb87f5223 100644 --- a/ocaml/idl/datamodel.ml +++ b/ocaml/idl/datamodel.ml @@ -87,6 +87,9 @@ let ely_release_schema_minor_vsn = 108 let falcon_release_schema_major_vsn = 5 let falcon_release_schema_minor_vsn = 120 +let inverness_release_schema_major_vsn = 5 +let inverness_release_schema_minor_vsn = 120 + (* List of tech-preview releases. Fields in these releases are not guaranteed to be retained when * upgrading to a full release. *) let tech_preview_releases = [ @@ -229,6 +232,12 @@ let get_product_releases in_product_since = | x::xs -> go_through_release_order xs in go_through_release_order release_order +let inverness_release = + { internal = get_product_releases rel_inverness + ; opensource = get_oss_releases None + ; internal_deprecated_since = None + } + let falcon_release = { internal = get_product_releases rel_falcon ; opensource=get_oss_releases None diff --git a/ocaml/idl/datamodel_types.ml b/ocaml/idl/datamodel_types.ml index bd0d840e12d..d382b211e10 100644 --- a/ocaml/idl/datamodel_types.ml +++ b/ocaml/idl/datamodel_types.ml @@ -56,6 +56,7 @@ let rel_indigo = "indigo" let rel_dundee = "dundee" let rel_ely = "ely" let rel_falcon = "falcon" +let rel_inverness = "inverness" type api_release = { code_name: string option; @@ -179,6 +180,12 @@ let release_order_full = [{ version_major = 2; version_minor = 7; branding = "XenServer 7.2"; + }; { + code_name = Some rel_inverness; + (** TODO replace with the actual version numbers when Inverness is released *) + version_major = 2; + version_minor = 7; + branding = "Unreleased"; }; ] From 826b36faaf3029e2f3c1b0b7600368d281834f13 Mon Sep 17 00:00:00 2001 From: Gabor Igloi Date: Fri, 26 May 2017 12:00:42 +0100 Subject: [PATCH 57/59] Datamodel: deprecate crashdump XenAPI class Signed-off-by: Gabor Igloi --- ocaml/idl/datamodel.ml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ocaml/idl/datamodel.ml b/ocaml/idl/datamodel.ml index 21cb87f5223..b0b43f4f388 100644 --- a/ocaml/idl/datamodel.ml +++ b/ocaml/idl/datamodel.ml @@ -6633,7 +6633,7 @@ let crashdump_destroy = call (** A crashdump for a particular VM, stored in a particular VDI *) let crashdump = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:None ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_crashdump ~descr:"A VM crashdump" + create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:None ~internal_deprecated_since:(Some rel_inverness) ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_crashdump ~descr:"A VM crashdump" ~gen_events:true ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_OP From bef7f64ffc0227c20b69df79640f1442392f33e5 Mon Sep 17 00:00:00 2001 From: Gabor Igloi Date: Fri, 26 May 2017 12:15:38 +0100 Subject: [PATCH 58/59] Datamodel: add comment about how to declare new XS release Signed-off-by: Gabor Igloi --- ocaml/idl/datamodel_types.ml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ocaml/idl/datamodel_types.ml b/ocaml/idl/datamodel_types.ml index d382b211e10..0464ccfc10d 100644 --- a/ocaml/idl/datamodel_types.ml +++ b/ocaml/idl/datamodel_types.ml @@ -65,6 +65,8 @@ type api_release = { branding: string; } +(* When you add a new release, use the version number of the latest release, + and "Unreleased" for the branding, until the actual values are finalised. *) let release_order_full = [{ code_name = Some rel_rio; version_major = 1; From 1d904611ad80bf245fce09b0a5e5058e357e72c5 Mon Sep 17 00:00:00 2001 From: Gabor Igloi Date: Tue, 23 May 2017 21:00:08 +0100 Subject: [PATCH 59/59] Clean up Xapi_vm_lifecycle.check_operation_error Get rid of the unnecessary get_info function. Signed-off-by: Gabor Igloi --- ocaml/xapi/xapi_vm_lifecycle.ml | 39 ++++++++++++++------------------- 1 file changed, 16 insertions(+), 23 deletions(-) diff --git a/ocaml/xapi/xapi_vm_lifecycle.ml b/ocaml/xapi/xapi_vm_lifecycle.ml index 63c724677ae..11ac7dad722 100644 --- a/ocaml/xapi/xapi_vm_lifecycle.ml +++ b/ocaml/xapi/xapi_vm_lifecycle.ml @@ -307,16 +307,21 @@ let is_mobile ~__context vm strict = && not @@ nested_virt ~__context vm metrics) || not strict +let maybe_get_guest_metrics ~__context ~ref = + if Db.is_valid_ref __context ref + then Some (Db.VM_guest_metrics.get_record_internal ~__context ~self:ref) + else None (** Take an internal VM record and a proposed operation. Return None iff the operation would be acceptable; otherwise Some (Api_errors., [list of strings]) corresponding to the first error found. Checking stops at the first error. The "strict" param sets whether we require feature-flags for ops that need guest support: ops in the suspend-like and shutdown-like categories. *) -let check_operation_error ~__context ~vmr ~vmgmr ~ref ~clone_suspended_vm_enabled ~vdis_reset_and_caching ~op ~strict = +let check_operation_error ~__context ~ref ~op ~strict = + let vmr = Db.VM.get_record_internal ~__context ~self:ref in + let vmgmr = maybe_get_guest_metrics ~__context ~ref:(vmr.Db_actions.vM_guest_metrics) in let ref_str = Ref.string_of ref in let power_state = vmr.Db_actions.vM_power_state in - let current_ops = vmr.Db_actions.vM_current_operations in let is_template = vmr.Db_actions.vM_is_a_template in let is_snapshot = vmr.Db_actions.vM_is_a_snapshot in @@ -337,6 +342,7 @@ let check_operation_error ~__context ~vmr ~vmgmr ~ref ~clone_suspended_vm_enable (* if other operations are in progress, check that the new operation is allowed concurrently with them. *) let current_error = check current_error (fun () -> + let current_ops = vmr.Db_actions.vM_current_operations in if List.length current_ops <> 0 && not (is_allowed_concurrently ~op ~current_ops) then report_concurrent_operations_error ~current_ops ~ref_str else None) in @@ -428,6 +434,12 @@ let check_operation_error ~__context ~vmr ~vmgmr ~ref ~clone_suspended_vm_enable (* Check for an error due to VDI caching/reset behaviour *) let current_error = check current_error (fun () -> + let vdis_reset_and_caching = List.filter_map (fun vbd -> + try + let vdi = Db.VBD.get_VDI ~__context ~self:vbd in + let sm_config = Db.VDI.get_sm_config ~__context ~self:vdi in + Some ((assoc_opt "on_boot" sm_config = Some "reset"), (bool_of_assoc "caching" sm_config)) + with _ -> None) vmr.Db_actions.vM_VBDs in if op = `checkpoint || op = `snapshot || op = `suspend || op = `snapshot_with_quiesce then (* If any vdi exists with on_boot=reset, then disallow checkpoint, snapshot, suspend *) if List.exists fst vdis_reset_and_caching @@ -478,26 +490,8 @@ let check_operation_error ~__context ~vmr ~vmgmr ~ref ~clone_suspended_vm_enable current_error -let maybe_get_guest_metrics ~__context ~ref = - if Db.is_valid_ref __context ref - then Some (Db.VM_guest_metrics.get_record_internal ~__context ~self:ref) - else None - -let get_info ~__context ~self = - let all = Db.VM.get_record_internal ~__context ~self in - let gm = maybe_get_guest_metrics ~__context ~ref:(all.Db_actions.vM_guest_metrics) in - let clone_suspended_vm_enabled = Helpers.clone_suspended_vm_enabled ~__context in - let vdis_reset_and_caching = List.filter_map (fun vbd -> - try - let vdi = Db.VBD.get_VDI ~__context ~self:vbd in - let sm_config = Db.VDI.get_sm_config ~__context ~self:vdi in - Some ((assoc_opt "on_boot" sm_config = Some "reset"), (bool_of_assoc "caching" sm_config)) - with _ -> None) all.Db_actions.vM_VBDs in - all, gm, clone_suspended_vm_enabled, vdis_reset_and_caching - let get_operation_error ~__context ~self ~op ~strict = - let all, gm, clone_suspended_vm_enabled, vdis_reset_and_caching = get_info ~__context ~self in - check_operation_error __context all gm self clone_suspended_vm_enabled vdis_reset_and_caching op strict + check_operation_error ~__context ~ref:self ~op ~strict let assert_operation_valid ~__context ~self ~op ~strict = match get_operation_error ~__context ~self ~op ~strict with @@ -505,9 +499,8 @@ let assert_operation_valid ~__context ~self ~op ~strict = | Some (a,b) -> raise (Api_errors.Server_error (a,b)) let update_allowed_operations ~__context ~self = - let all, gm, clone_suspended_vm_enabled, vdis_reset_and_caching = get_info ~__context ~self in let check accu op = - match check_operation_error __context all gm self clone_suspended_vm_enabled vdis_reset_and_caching op true with + match check_operation_error ~__context ~ref:self ~op ~strict:true with | None -> op :: accu | _ -> accu in