diff --git a/.travis-opam-coverage.sh b/.travis-opam-coverage.sh deleted file mode 100644 index a0e09a4ebca..00000000000 --- a/.travis-opam-coverage.sh +++ /dev/null @@ -1,35 +0,0 @@ -# SUMMARY: -# Builds & tests xapi with coverage in a Ubuntu 16.04 Docker container with -# OCaml 4.02.3, then uploads the coverage information to coveralls. - -set -uex - -# Currently there is no way of specifying OPAM depexts for multiple versions of -# a given disto, and our current depexts only work with Ubuntu >= 16.04, due to -# a change in packages (libsystemd-dev). Since the build environments of Travis -# are older then Ubuntu 16.04, we have to run the build in a Docker container -# with an appropriate Ubuntu version. -# We need to pass some Travis environment variables to the container to enable -# uploading to coveralls and detection of Travis CI. -docker run --rm --volume=$PWD:/mnt --workdir=/mnt \ - --env "TRAVIS=$TRAVIS" \ - --env "TRAVIS_JOB_ID=$TRAVIS_JOB_ID" \ - ocaml/opam:ubuntu-16.04_ocaml-4.02.3 \ - bash -uex -c ' -sudo apt-get update - -# replace the base remote with xs-opam -opam repository remove default -opam repository add xs-opam https://github.com/xapi-project/xs-opam.git - -# install the dependencies of xapi -opam pin add --no-action xapi . -opam depext --yes xapi -opam install --deps-only xapi - -# build and test xapi with coverage, then submit the coverage information to coveralls -sudo apt-get install --yes wget -wget https://raw.githubusercontent.com/simonjbeaumont/ocaml-travis-coveralls/master/travis-coveralls.sh -COV_CONF="./configure" bash -ex travis-coveralls.sh -' - diff --git a/.travis-xenserver-build-env.sh b/.travis-xenserver-build-env.sh deleted file mode 100644 index b9a9b94c97d..00000000000 --- a/.travis-xenserver-build-env.sh +++ /dev/null @@ -1,19 +0,0 @@ -# SUMMARY: -# Builds and tests xapi using xenserver-build-env, which installs the -# dependencies as RPMs. - -set -uex - -wget https://raw.githubusercontent.com/xenserver/xenserver-build-env/master/utils/travis-build-repo.sh - -# only run deploy.sh when the build succeeds -env \ - CONTAINER_NAME=build-env \ - OCAMLRUNPARAM=b \ - REPO_PACKAGE_NAME=xapi \ - REPO_CONFIGURE_CMD=./configure \ - REPO_BUILD_CMD=make \ - REPO_TEST_CMD='make test' \ - REPO_DOC_CMD='make doc-json' \ - bash travis-build-repo.sh && \ - ( ( test $TRAVIS_PULL_REQUEST == "false" && test $TRAVIS_BRANCH == "master" && bash deploy.sh ) || true ) diff --git a/.travis.yml b/.travis.yml index 197c54fcb6a..29c35f819b2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,17 +1,22 @@ language: c -sudo: required services: docker -script: bash ./.travis-$BUILD_METHOD.sh +install: + - wget https://raw.githubusercontent.com/xenserver/xenserver-build-env/master/utils/travis-build-repo.sh +script: bash travis-build-repo.sh +after_success: + - test $TRAVIS_PULL_REQUEST == "false" && test $TRAVIS_BRANCH == "master" && bash deploy.sh +sudo: true env: global: - # for BUILD_METHOD=xenserver-build-env + - CONTAINER_NAME=build-env + - OCAMLRUNPARAM=b + - REPO_PACKAGE_NAME=xapi + - REPO_CONFIGURE_CMD=./configure + - REPO_BUILD_CMD=make + - REPO_TEST_CMD='make test' + - REPO_DOC_CMD='make doc-json' - secure: tokxJl2litqu/T6UUwzkLRZzlbxnbYqVG2QRKKQz3tkIXyZHQWTS2NAyH7mwDgdBq2dDVSxAUxS1jWq/vGraX7MmbVz37Pz8wjykoIfIRtQuEx+REDAvAzWSw+1LTpUf7ZcI+F2SpgJrnH87uN5AAc220UqIx8TvAtGrita+2+o= matrix: - - BUILD_METHOD=xenserver-build-env - - BUILD_METHOD=opam-coverage + - OCAML_VERSION=4.02 notifications: slack: citrix:BHYQZbI8m036ELU21gZil75Y -matrix: - fast_finish: true - allow_failures: - - env: BUILD_METHOD=opam-coverage diff --git a/Makefile b/Makefile index 993f999a5ea..51c94ffb8c1 100644 --- a/Makefile +++ b/Makefile @@ -12,12 +12,12 @@ build: setup.data doc: setup.data build $(SETUP) -doc $(DOCFLAGS) - ./jsapi.native -destdir _build/ocaml/doc -templdir ocaml/doc/templates + ./jsapi.native -destdir _build/ocaml/doc test: setup.data build $(SETUP) -test $(TESTFLAGS) -all: setup.ml +all: $(SETUP) -all $(ALLFLAGS) uninstall: setup.data @@ -26,10 +26,10 @@ uninstall: setup.data reinstall: setup.data $(SETUP) -reinstall $(REINSTALLFLAGS) -clean: setup.ml +clean: $(SETUP) -clean $(CLEANFLAGS) -distclean: setup.ml +distclean: $(SETUP) -distclean $(DISTCLEANFLAGS) setup.data: setup.ml @@ -38,7 +38,7 @@ setup.data: setup.ml setup.ml: _oasis oasis setup -setup-update dynamic -configure: setup.ml +configure: $(SETUP) -configure $(CONFIGUREFLAGS) .PHONY: build doc test all install uninstall reinstall clean distclean configure @@ -95,4 +95,5 @@ install: setup.data rbac_static.csv mkdir -p $(DESTDIR)$(DOCDIR)/html/xenserver cp -r -L _build/ocaml/doc/api $(DESTDIR)$(DOCDIR)/html/xenserver cd ocaml/doc && cp *.js *.html *.css *.png $(DESTDIR)$(DOCDIR)/html/xenserver - cp _build/ocaml/doc/branding.js $(DESTDIR)$(DOCDIR)/html/xenserver/branding.js + cp ocaml/doc/xenserver/* $(DESTDIR)$(DOCDIR)/html/xenserver + diff --git a/README.markdown b/README.markdown index f485011e326..6f544fe0e94 100644 --- a/README.markdown +++ b/README.markdown @@ -1,10 +1,6 @@ Xapi Project's XenAPI Management Toolstack ========================================== -[![Build Status](https://travis-ci.org/xapi-project/xen-api.svg?branch=master)](https://travis-ci.org/xapi-project/xen-api) -[![Coverage Status](https://coveralls.io/repos/github/xapi-project/xen-api/badge.svg?branch=master)](https://coveralls.io/github/xapi-project/xen-api?branch=master) -[![Lines of Code](https://tokei.rs/b1/github/xapi-project/xen-api)](https://github.com/xapi-project/xen-api) - Xen API (or xapi) is a management stack that configures and controls Xen-enabled hosts and resource pools, and co-ordinates resources within the pool. Xapi exposes the Xen API interface for many diff --git a/_oasis b/_oasis index 2dc4ab15f8c..71ec9716c8c 100644 --- a/_oasis +++ b/_oasis @@ -424,8 +424,7 @@ Executable jsapi xapi-consts, stdext, uuid, - gzip, - mustache + gzip ############################################################################ diff --git a/_tags b/_tags deleted file mode 100644 index 5587ac5cca5..00000000000 --- a/_tags +++ /dev/null @@ -1,3 +0,0 @@ -# OASIS_START -# OASIS_STOP -: warn(-52) diff --git a/ocaml/database/db_cache_impl.ml b/ocaml/database/db_cache_impl.ml index 702e5111db4..39e2e048f24 100644 --- a/ocaml/database/db_cache_impl.ml +++ b/ocaml/database/db_cache_impl.ml @@ -254,24 +254,14 @@ let process_structured_field_locked t (key,value) tblname fld objref proc_fn_sel let newval = match proc_fn_selector with | AddSet -> add_to_set key existing_str | RemoveSet -> remove_from_set key existing_str - | AddMap | AddMapLegacy -> + | AddMap -> begin try - (* We use the idempotent map add if we're using the non-legacy - process function, or if the global field 'idempotent_map' has - been set. By default, the Db calls on the master use the - legacy functions, but those on the slave use the new one. - This means xapi code should always assume idempotent_map is - true *) - let idempotent = - (proc_fn_selector = AddMap) || !Db_globs.idempotent_map - in - add_to_map ~idempotent key value existing_str + add_to_map key value existing_str with Duplicate -> error "Duplicate key in set or map: table %s; field %s; ref %s; key %s" tblname fld objref key; raise (Duplicate_key (tblname,fld,objref,key)); end - | RemoveMap -> remove_from_map key existing_str in write_field_locked t tblname objref fld newval with Not_found -> diff --git a/ocaml/database/db_cache_types.ml b/ocaml/database/db_cache_types.ml index ab3b37fe427..e42402336ca 100644 --- a/ocaml/database/db_cache_types.ml +++ b/ocaml/database/db_cache_types.ml @@ -350,10 +350,10 @@ let remove_from_set key t = Schema.Value.Set (List.filter (fun x -> x <> key) t) exception Duplicate -let add_to_map ~idempotent key value t = +let add_to_map key value t = let t = Schema.Value.Unsafe_cast.pairs t in - if List.mem_assoc key t && (not idempotent || List.assoc key t <> value) then raise Duplicate; - Schema.Value.Pairs ((key, value) :: List.filter (fun (k, _) -> k <> key) t) + if List.mem key (List.map fst t) then raise Duplicate; + Schema.Value.Pairs ((key, value) :: t) let remove_from_map key t = let t = Schema.Value.Unsafe_cast.pairs t in @@ -500,5 +500,4 @@ type structured_op_t = | RemoveSet | AddMap | RemoveMap - | AddMapLegacy [@@deriving rpc] diff --git a/ocaml/database/db_cache_types.mli b/ocaml/database/db_cache_types.mli index a96849ab010..028fa177893 100644 --- a/ocaml/database/db_cache_types.mli +++ b/ocaml/database/db_cache_types.mli @@ -146,7 +146,7 @@ end exception Duplicate val add_to_set : string -> Schema.Value.t -> Schema.Value.t val remove_from_set : string -> Schema.Value.t -> Schema.Value.t -val add_to_map : idempotent:bool -> string -> string -> Schema.Value.t -> Schema.Value.t +val add_to_map : string -> string -> Schema.Value.t -> Schema.Value.t val remove_from_map : string -> Schema.Value.t -> Schema.Value.t val set_field : string -> string -> string -> Schema.Value.t -> Database.t -> Database.t @@ -169,6 +169,5 @@ type structured_op_t = | RemoveSet | AddMap | RemoveMap - | AddMapLegacy val structured_op_t_of_rpc: Rpc.t -> structured_op_t val rpc_of_structured_op_t: structured_op_t -> Rpc.t diff --git a/ocaml/database/db_globs.ml b/ocaml/database/db_globs.ml index e99fc110ebf..50e15c3743d 100644 --- a/ocaml/database/db_globs.ml +++ b/ocaml/database/db_globs.ml @@ -50,9 +50,6 @@ let static_vdis_dir = ref "/etc/xensource/static-vdis" (* Note the following has an equivalent in the xapi layer *) let http_limit_max_rpc_size = 300 * 1024 (* 300K *) -(* add_to_map is idempotent *) -let idempotent_map = ref false - (** Dynamic configurations to be read whenever xapi (re)start *) let permanent_master_failure_retry_interval = ref 60. diff --git a/ocaml/database/db_rpc_common_v1.ml b/ocaml/database/db_rpc_common_v1.ml index 2d21b7494fb..ce649775009 100644 --- a/ocaml/database/db_rpc_common_v1.ml +++ b/ocaml/database/db_rpc_common_v1.ml @@ -79,9 +79,7 @@ let marshall_structured_op x = AddSet -> "addset" | RemoveSet -> "removeset" | AddMap -> "addmap" - | RemoveMap -> "removemap" - | AddMapLegacy -> "addmap" (* Nb, we always use 'non-legacy' mode for remote access *) - in + | RemoveMap -> "removemap" in XMLRPC.To.string str let unmarshall_structured_op xml = match (XMLRPC.From.string xml) with @@ -313,3 +311,4 @@ let unmarshall_read_records_where_response xml = [ref_xml; rec_xml] -> (XMLRPC.From.string ref_xml, unmarshall_read_record_response rec_xml) | _ -> raise DB_remote_marshall_error) xml_refs_and_recs_list + diff --git a/ocaml/database/db_rpc_common_v2.ml b/ocaml/database/db_rpc_common_v2.ml index aab47604b2c..cfd6b3c38be 100644 --- a/ocaml/database/db_rpc_common_v2.ml +++ b/ocaml/database/db_rpc_common_v2.ml @@ -34,16 +34,6 @@ module Request = struct | Read_records_where of string * Db_filter_types.expr | Process_structured_field of (string * string) * string * string * string * Db_cache_types.structured_op_t [@@deriving rpc] - - (* Make sure the slave only ever uses the idempotent version *) - let rpc_of_t t = - let t' = - match t with - | Process_structured_field (a,b,c,d,Db_cache_types.AddMapLegacy) -> - Process_structured_field (a,b,c,d,Db_cache_types.AddMap) - | x -> x - in - rpc_of_t t' end module Response = struct diff --git a/ocaml/doc/branding.js b/ocaml/doc/branding.js new file mode 100644 index 00000000000..81da0b1d37d --- /dev/null +++ b/ocaml/doc/branding.js @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2006-2009 Citrix Systems Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; version 2.1 only. with the special + * exception on linking described in file LICENSE. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + */ + +function make_title() { + document.write('Xapi Documentation'); +} + +function make_header(t) { + if (t == 'apidoc') + title = 'Xapi – XenAPI Documentation'; + else if (t == 'codedoc') + title = 'Xapi – OCaml Code Documentation'; + else + title = 'Xapi – Documentation'; + + html = '

XenServer Management API

' + document.getElementById('header').innerHTML = html; +} + +first_release = 'midnight-ride'; + +function get_release_name(s) +{ + switch (s) { + case 'rio': + case 'miami': + case 'symc': + case 'orlando': + case 'orlando-update-1': + case 'george': + case 'midnight-ride': + return 'XCP 0.5'; + case 'cowley': + return 'XCP 1.0'; + case 'boston': + return 'XCP 1.5'; + case 'tampa': + return 'XCP 1.6'; + case 'clearwater': + return 'XenServer 6.2'; + case 'vgpu-tech-preview': + return 'XenServer 6.2 vGPU preview'; + case 'vgpu-productisation': + return 'XenServer 6.2 SP1'; + case 'clearwater-felton': + return 'XenServer 6.2 SP1 Hotfix 4'; + case 'clearwater-whetstone': + return 'XenServer 6.2 SP1 Hotfix 11'; + case 'creedence': + return 'XenServer 6.5'; + case 'cream': + return 'XenServer 6.5 SP1'; + case 'dundee': + return 'XenServer 7.0'; + case 'ely': + return 'XenServer 7.1'; + default: + return (s + ' (unreleased)'); + } +} + diff --git a/ocaml/doc/jsapi.ml b/ocaml/doc/jsapi.ml index 0994aaed52d..b0dae63c058 100644 --- a/ocaml/doc/jsapi.ml +++ b/ocaml/doc/jsapi.ml @@ -12,8 +12,6 @@ * GNU Lesser General Public License for more details. *) -open Stdext -open Pervasiveext open Datamodel_types type change_t = lifecycle_change * string * string @@ -21,12 +19,10 @@ and changes_t = change_t list [@@deriving rpc] let destdir = ref "." -let templdir = ref "" let parse_args () = Arg.parse [ "-destdir", Arg.Set_string destdir, "the destination directory for the generated files"; - "-templdir", Arg.Set_string templdir, "the directory with the template (mustache) files"; ] (fun x-> Printf.printf "Ignoring anonymous argument %s" x) ("Generates documentation for the datamodel classes. See -help.") @@ -52,14 +48,14 @@ let generate_files destdir = let changes_in_release rel = let search_obj obj = - let changes = List.filter (fun (transition, release, doc) -> release = code_name_of_release rel) obj.obj_lifecycle in + let changes = List.filter (fun (transition, release, doc) -> release = rel) obj.obj_lifecycle in let obj_changes : changes_t = List.map (fun (transition, release, doc) -> (transition, obj.name, if doc = "" && transition = Published then obj.description else doc) ) changes in let changes_for_msg m = - let changes = List.filter (fun (transition, release, doc) -> release = code_name_of_release rel) m.msg_lifecycle in + let changes = List.filter (fun (transition, release, doc) -> release = rel) m.msg_lifecycle in List.map (fun (transition, release, doc) -> (transition, m.msg_name, if doc = "" && transition = Published then m.msg_doc else doc) ) changes @@ -68,7 +64,7 @@ let generate_files destdir = let msg_changes : changes_t = List.fold_left (fun l m -> l @ (changes_for_msg m)) [] msgs in let changes_for_field f = - let changes = List.filter (fun (transition, release, doc) -> release = code_name_of_release rel) f.lifecycle in + let changes = List.filter (fun (transition, release, doc) -> release = rel) f.lifecycle in let field_name = String.concat "_" f.full_name in List.map (fun (transition, release, doc) -> (transition, field_name, if doc = "" && transition = Published then f.field_description else doc) @@ -87,37 +83,14 @@ let generate_files destdir = "{'cls': '" ^ obj.name ^ "', 'obj_changes': " ^ Jsonrpc.to_string (rpc_of_changes_t obj_changes) ^ ", 'field_changes': " ^ Jsonrpc.to_string (rpc_of_changes_t field_changes) ^ ", 'msg_changes': " ^ Jsonrpc.to_string (rpc_of_changes_t msg_changes) ^ "}" in let release_info = String.concat ", " (List.map search_obj objs) in - let fname = (code_name_of_release rel) ^ ".json" in + let fname = rel ^ ".json" in Stdext.Unixext.write_string_to_file (Filename.concat api_dir fname) ("release_info = [" ^ release_info ^ "]") in List.iter changes_in_release release_order; - let release_list = String.concat ", " (List.map (fun s -> "'" ^ (code_name_of_release s) ^ "'") release_order) in + let release_list = String.concat ", " (List.map (fun s -> "'" ^ s ^ "'") release_order) in Stdext.Unixext.write_string_to_file (Filename.concat api_dir "releases.json") ("releases = [" ^ release_list ^ "]") -let json_releases = - let json_of_rel x = `O [ - "code_name", `String (code_name_of_release x); - "version_major", `Float (float_of_int x.version_major); - "version_minor", `Float (float_of_int x.version_minor); - "branding", `String x.branding; - ] - in - `O [ "releases", `A (List.map json_of_rel release_order) ] - -let render_template template_file json output_file = - let templ = Stdext.Unixext.string_of_file template_file |> Mustache.of_string in - let rendered = Mustache.render templ json in - let out_chan = open_out output_file in - finally (fun () -> output_string out_chan rendered) - (fun () -> close_out out_chan) - -let populate_releases templates_dir dest_dir= - let inpath x = Filename.concat templates_dir x in - let outpath x = Filename.concat dest_dir x in - let render (infile, outfile) = render_template (inpath infile) json_releases (outpath outfile) in - [ "branding.mustache", "branding.js"] |> List.iter render let _ = parse_args (); - generate_files !destdir; - populate_releases !templdir !destdir + generate_files !destdir \ No newline at end of file diff --git a/ocaml/doc/templates/branding.mustache b/ocaml/doc/xenserver/branding.js similarity index 51% rename from ocaml/doc/templates/branding.mustache rename to ocaml/doc/xenserver/branding.js index 3924445efe5..2095f961822 100644 --- a/ocaml/doc/templates/branding.mustache +++ b/ocaml/doc/xenserver/branding.js @@ -24,14 +24,49 @@ function make_header(t) { first_release = 'rio'; -function get_release_name(s) { +function get_release_name(s) +{ switch (s) { -{{#releases}} - case '{{code_name}}': - return '{{branding}}'; -{{/releases}} - default: - return 'Unreleased'; - } + case 'rio': + return 'XenServer 4.0'; + case 'miami': + return 'XenServer 4.1'; + case 'symc': + return 'XenServer 4.1.1'; + case 'orlando': + return 'XenServer 5.0'; + case 'orlando-update-1': + return 'XenServer 5.0 Update 1'; + case 'george': + return 'XenServer 5.5'; + case 'midnight-ride': + return 'XenServer 5.6'; + case 'cowley': + return 'XenServer 5.6 FP1'; + case 'boston': + return 'XenServer 6.0'; + case 'tampa': + return 'XenServer 6.1'; + case 'clearwater': + return 'XenServer 6.2'; + case 'vgpu-tech-preview': + return 'XenServer 6.2 vGPU preview'; + case 'vgpu-productisation': + return 'XenServer 6.2 SP1'; + case 'clearwater-felton': + return 'XenServer 6.2 SP1 Hotfix 4'; + case 'clearwater-whetstone': + return 'XenServer 6.2 SP1 Hotfix 11'; + case 'creedence': + return 'XenServer 6.5'; + case 'cream': + return 'XenServer 6.5 SP1'; + case 'dundee': + return 'XenServer 7.0'; + case 'ely': + return 'XenServer 7.1'; + default: + return 'Unreleased'; + } } diff --git a/ocaml/idl/datamodel.ml b/ocaml/idl/datamodel.ml index b0b43f4f388..5ffa328b94a 100644 --- a/ocaml/idl/datamodel.ml +++ b/ocaml/idl/datamodel.ml @@ -87,8 +87,9 @@ let ely_release_schema_minor_vsn = 108 let falcon_release_schema_major_vsn = 5 let falcon_release_schema_minor_vsn = 120 -let inverness_release_schema_major_vsn = 5 -let inverness_release_schema_minor_vsn = 120 +(* the schema vsn of the last release: used to determine whether we can upgrade or not.. *) +let last_release_schema_major_vsn = falcon_release_schema_major_vsn +let last_release_schema_minor_vsn = falcon_release_schema_minor_vsn (* List of tech-preview releases. Fields in these releases are not guaranteed to be retained when * upgrading to a full release. *) @@ -218,6 +219,8 @@ let _R_ALL = _R_READ_ONLY let errors = Hashtbl.create 10 let messages = Hashtbl.create 10 +exception UnspecifiedRelease + let get_oss_releases in_oss_since = match in_oss_since with None -> [] @@ -228,24 +231,17 @@ let get_product_releases in_product_since = let rec go_through_release_order rs = match rs with [] -> raise UnspecifiedRelease - | x::xs when code_name_of_release x = in_product_since -> "closed"::in_product_since::(List.map code_name_of_release xs) - | x::xs -> go_through_release_order xs + | x::xs -> if x=in_product_since then "closed"::x::xs else go_through_release_order xs in go_through_release_order release_order -let inverness_release = - { internal = get_product_releases rel_inverness - ; opensource = get_oss_releases None - ; internal_deprecated_since = None - } - let falcon_release = { internal = get_product_releases rel_falcon ; opensource=get_oss_releases None ; internal_deprecated_since=None } -let ely_release = - { internal = get_product_releases rel_ely +let dundee_plus_release = + { internal = get_product_releases rel_dundee_plus ; opensource=get_oss_releases None ; internal_deprecated_since=None } @@ -311,37 +307,37 @@ let cowley_release = } let midnight_ride_release = - { internal=get_product_releases rel_midnight_ride + { internal=get_product_releases "midnight-ride" ; opensource=get_oss_releases None ; internal_deprecated_since=None } let george_release = - { internal=get_product_releases rel_george + { internal=get_product_releases "george" ; opensource=get_oss_releases None ; internal_deprecated_since=None } let orlando_release = - { internal=get_product_releases rel_orlando + { internal=get_product_releases "orlando" ; opensource=get_oss_releases None ; internal_deprecated_since=None } let miami_symc_release = - { internal=get_product_releases rel_symc + { internal=get_product_releases "symc" ; opensource=get_oss_releases None ; internal_deprecated_since=None } let miami_release = - { internal=get_product_releases rel_miami + { internal=get_product_releases "miami" ; opensource=get_oss_releases None ; internal_deprecated_since=None } let rio_release = - { internal=get_product_releases rel_rio + { internal=get_product_releases "rio" ; opensource=get_oss_releases (Some "3.0.3") ; internal_deprecated_since=None } @@ -509,9 +505,6 @@ let _ = error Api_errors.cannot_contact_host ["host"] ~doc:"Cannot forward messages because the host cannot be contacted. The host may be switched off or there may be network connectivity problems." (); - error Api_errors.tls_connection_failed ["address"; "port"] - ~doc:"Cannot contact the other host using TLS on the specified address and port" (); - error Api_errors.uuid_invalid [ "type"; "uuid" ] ~doc:"The uuid you supplied was invalid." (); error Api_errors.object_nolonger_exists [] @@ -854,10 +847,6 @@ let _ = ~doc:"The host failed to enable external authentication." (); error Api_errors.auth_enable_failed_unavailable ["message"] ~doc:"The host failed to enable external authentication." (); - error Api_errors.auth_enable_failed_invalid_ou ["message"] - ~doc:"The host failed to enable external authentication." (); - error Api_errors.auth_enable_failed_invalid_account ["message"] - ~doc:"The host failed to enable external authentication." (); error Api_errors.auth_disable_failed ["message"] ~doc:"The host failed to disable external authentication." (); error Api_errors.auth_disable_failed_wrong_credentials ["message"] @@ -911,10 +900,6 @@ let _ = ~doc:"The pool failed to enable external authentication." (); error Api_errors.pool_auth_enable_failed_duplicate_hostname ["host";"message"] ~doc:"The pool failed to enable external authentication." (); - error Api_errors.pool_auth_enable_failed_invalid_ou ["host";"message"] - ~doc:"The pool failed to enable external authentication." (); - error Api_errors.pool_auth_enable_failed_invalid_account ["host";"message"] - ~doc:"The pool failed to enable external authentication." (); error Api_errors.pool_auth_disable_failed ["host";"message"] ~doc:"The pool failed to disable the external authentication of at least one host." (); error Api_errors.pool_auth_disable_failed_wrong_credentials ["host";"message"] @@ -3227,7 +3212,7 @@ let host_call_plugin = call let host_has_extension = call ~name:"has_extension" - ~in_product_since:rel_ely + ~in_product_since:rel_dundee_plus ~doc:"Return true if the extension is available on the host" ~params:[Ref _host, "host", "The host"; String, "name", "The name of the API call";] @@ -3237,7 +3222,7 @@ let host_has_extension = call let host_call_extension = call ~name:"call_extension" - ~in_product_since:rel_ely + ~in_product_since:rel_dundee_plus ~custom_marshaller:true ~doc:"Call a XenAPI extension on this host" ~params:[Ref _host, "host", "The host"; @@ -4563,7 +4548,7 @@ let host_emergency_ha_disable = call ~flags:[`Session] ~in_oss_since:None ~in_product_since:rel_orlando ~versioned_params: - [{param_type=Bool; param_name="soft"; param_doc="Disable HA temporarily, revert upon host reboot or further changes, idempotent"; param_release=ely_release; param_default=Some(VBool false)}; + [{param_type=Bool; param_name="soft"; param_doc="Disable HA temporarily, revert upon host reboot or further changes, idempotent"; param_release=dundee_plus_release; param_default=Some(VBool false)}; ] ~doc:"This call disables HA on the local host. This should only be used with extreme care." ~allowed_roles:_R_POOL_OP @@ -5072,7 +5057,7 @@ let host = field ~qualifier:RW ~in_product_since:rel_tampa ~default_value:(Some (VMap [])) ~ty:(Map (String, String)) "guest_VCPUs_params" "VCPUs params to apply to all resident guests"; field ~qualifier:RW ~in_product_since:rel_cream ~default_value:(Some (VEnum "enabled")) ~ty:host_display "display" "indicates whether the host is configured to output its console to a physical display device"; field ~qualifier:DynamicRO ~in_product_since:rel_cream ~default_value:(Some (VSet [VInt 0L])) ~ty:(Set (Int)) "virtual_hardware_platform_versions" "The set of versions of the virtual hardware platform that the host can offer to its guests"; - field ~qualifier:DynamicRO ~default_value:(Some (VRef null_ref)) ~in_product_since:rel_ely ~ty:(Ref _vm) "control_domain" "The control domain (domain 0)"; + field ~qualifier:DynamicRO ~default_value:(Some (VRef null_ref)) ~in_product_since:rel_dundee_plus ~ty:(Ref _vm) "control_domain" "The control domain (domain 0)"; field ~qualifier:DynamicRO ~lifecycle:[Published, rel_ely, ""] ~ty:(Set (Ref _pool_update)) ~ignore_foreign_key:true "updates_requiring_reboot" "List of updates which require reboot"; field ~qualifier:DynamicRO ~lifecycle:[Published, rel_falcon, ""] ~ty:(Set (Ref _feature)) "features" "List of features available on this host" ]) @@ -6633,7 +6618,7 @@ let crashdump_destroy = call (** A crashdump for a particular VM, stored in a particular VDI *) let crashdump = - create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:None ~internal_deprecated_since:(Some rel_inverness) ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_crashdump ~descr:"A VM crashdump" + create_obj ~in_db:true ~in_product_since:rel_rio ~in_oss_since:None ~internal_deprecated_since:None ~persist:PersistEverything ~gen_constructor_destructor:false ~name:_crashdump ~descr:"A VM crashdump" ~gen_events:true ~doccomments:[] ~messages_default_allowed_roles:_R_POOL_OP @@ -9264,7 +9249,7 @@ let vgpu_type = () module PVS_site = struct - let lifecycle = [Prototyped, rel_ely, ""] + let lifecycle = [Prototyped, rel_dundee_plus, ""] let introduce = call ~name:"introduce" @@ -9350,7 +9335,7 @@ end let pvs_site = PVS_site.obj module PVS_server = struct - let lifecycle = [Prototyped, rel_ely, ""] + let lifecycle = [Prototyped, rel_dundee_plus, ""] let introduce = call ~name:"introduce" @@ -9419,7 +9404,7 @@ end let pvs_server = PVS_server.obj module PVS_proxy = struct - let lifecycle = [Prototyped, rel_ely, ""] + let lifecycle = [Prototyped, rel_dundee_plus, ""] let status = Enum ("pvs_proxy_status", [ "stopped", "The proxy is not currently running"; diff --git a/ocaml/idl/datamodel_types.ml b/ocaml/idl/datamodel_types.ml index 0464ccfc10d..41a88212dd3 100644 --- a/ocaml/idl/datamodel_types.ml +++ b/ocaml/idl/datamodel_types.ml @@ -54,161 +54,42 @@ let rel_creedence = "creedence" let rel_cream = "cream" let rel_indigo = "indigo" let rel_dundee = "dundee" +let rel_dundee_plus = "dundee-plus" let rel_ely = "ely" let rel_falcon = "falcon" -let rel_inverness = "inverness" - -type api_release = { - code_name: string option; - version_major: int; - version_minor: int; - branding: string; -} - -(* When you add a new release, use the version number of the latest release, - and "Unreleased" for the branding, until the actual values are finalised. *) -let release_order_full = [{ - code_name = Some rel_rio; - version_major = 1; - version_minor = 1; - branding = "XenServer 4.0"; - }; { - code_name = Some rel_miami; - version_major = 1; - version_minor = 2; - branding = "XenServer 4.1"; - }; { - code_name = Some rel_symc; - version_major = 1; - version_minor = 2; - branding = "XenServer 4.1.1"; - }; { - code_name = Some rel_orlando; - version_major = 1; - version_minor = 3; - branding = "XenServer 5.0"; - }; { - code_name = Some rel_orlando_update_1; - version_major = 1; - version_minor = 3; - branding = "XenServer 5.0 Update 1"; - }; { - code_name = None; - version_major = 1; - version_minor = 4; - branding = "Unreleased"; - }; { - code_name = None; - version_major = 1; - version_minor = 5; - branding = "XenServer 5.0 update 3"; - }; { - code_name = Some rel_george; - version_major = 1; - version_minor = 6; - branding = "XenServer 5.5"; - }; { - code_name = Some rel_midnight_ride; - version_major = 1; - version_minor = 7; - branding = "XenServer 5.6"; - }; { - code_name = Some rel_cowley; - version_major = 1; - version_minor = 8; - branding = "XenServer 5.6 FP1"; - }; { - code_name = Some rel_boston; - version_major = 1; - version_minor = 9; - branding = "XenServer 6.0"; - }; { - code_name = Some rel_tampa; - version_major = 1; - version_minor = 10; - branding = "XenServer 6.1"; - }; { - code_name = Some rel_clearwater; - version_major = 2; - version_minor = 0; - branding = "XenServer 6.2"; - }; { - code_name = Some rel_vgpu_tech_preview; - version_major = 2; - version_minor = 0; - branding = "XenServer 6.2 SP1 Tech-Preview"; - }; { - code_name = Some rel_vgpu_productisation; - version_major = 2; - version_minor = 1; - branding = "XenServer 6.2 SP1"; - }; { - code_name = Some rel_clearwater_felton; - version_major = 2; - version_minor = 2; - branding = "XenServer 6.2 SP1 Hotfix 4"; - }; { - code_name = Some rel_clearwater_whetstone; - version_major = 2; - version_minor = 2; - branding = "XenServer 6.2 SP1 Hotfix 11"; - }; { - code_name = Some rel_creedence; - version_major = 2; - version_minor = 3; - branding = "XenServer 6.5"; - }; { - code_name = Some rel_cream; - version_major = 2; - version_minor = 4; - branding = "XenServer 6.5 SP1"; - }; { - code_name = Some rel_indigo; - version_major = 2; - version_minor = 4; - branding = "XenServer 6.5 SP1 Hotfix 31"; - }; { - code_name = Some rel_dundee; - version_major = 2; - version_minor = 5; - branding = "XenServer 7.0"; - }; { - code_name = Some rel_ely; - version_major = 2; - version_minor = 6; - branding = "XenServer 7.1"; - }; { - code_name = Some rel_falcon; - version_major = 2; - version_minor = 7; - branding = "XenServer 7.2"; - }; { - code_name = Some rel_inverness; - (** TODO replace with the actual version numbers when Inverness is released *) - version_major = 2; - version_minor = 7; - branding = "Unreleased"; - }; - ] let release_order = - List.filter (fun x -> x.code_name <> None) release_order_full + [ rel_rio + ; rel_miami + ; rel_symc + ; rel_orlando + ; rel_orlando_update_1 + ; rel_george + ; rel_midnight_ride + ; rel_cowley + ; rel_boston + ; rel_tampa + ; rel_clearwater + ; rel_vgpu_tech_preview + ; rel_vgpu_productisation + ; rel_clearwater_felton + ; rel_clearwater_whetstone + ; rel_creedence + ; rel_cream + ; rel_indigo + ; rel_dundee + ; rel_dundee_plus + ; rel_ely + ; rel_falcon + ] exception Unknown_release of string -exception UnspecifiedRelease - -let code_name_of_release x = - match x.code_name with - | Some r -> r - | None -> raise UnspecifiedRelease - (* ordering function on releases *) let release_leq x y = let rec posn_in_list i x l = match l with [] -> raise (Unknown_release x) - | r::rs when code_name_of_release r = x -> i - | r::rs-> posn_in_list (i+1) x rs in + | r::rs -> if r=x then i else posn_in_list (i+1) x rs in (posn_in_list 0 x release_order) <= (posn_in_list 0 y release_order) (** Types of object fields. Accessor functions are generated for each field automatically according to its type and qualifiers. *) diff --git a/ocaml/idl/dm_api.ml b/ocaml/idl/dm_api.ml index 54b4e0a5fe7..beadc1bd8b5 100644 --- a/ocaml/idl/dm_api.ml +++ b/ocaml/idl/dm_api.ml @@ -233,7 +233,7 @@ let check api emergency_calls = [] -> sofar | "closed"::xs -> find_smallest sofar xs (* closed is not a real release, so skip it *) | x::xs -> if release_lt x sofar then find_smallest x xs else find_smallest sofar xs in - find_smallest (getlast release_order |> code_name_of_release) releases in + find_smallest (getlast release_order) releases in let rec check_vsns max_release_sofar ps = match ps with [] -> true diff --git a/ocaml/idl/json_backend/gen_json.ml b/ocaml/idl/json_backend/gen_json.ml index b1011268e9e..ce70cc4e1fa 100644 --- a/ocaml/idl/json_backend/gen_json.ml +++ b/ocaml/idl/json_backend/gen_json.ml @@ -257,7 +257,7 @@ let compare_changes (a_t, a_n, _, a_k) (b_t, b_n, _, b_k) = let releases objs = let changes_in_release rel = let search_obj obj = - let changes = List.filter (fun (transition, release, doc) -> release = code_name_of_release rel) obj.obj_lifecycle in + let changes = List.filter (fun (transition, release, doc) -> release = rel) obj.obj_lifecycle in let obj_changes = List.map (fun (transition, release, doc) -> transition, @@ -267,7 +267,7 @@ let releases objs = ) changes in let changes_for_msg m = - let changes = List.filter (fun (transition, release, doc) -> release = code_name_of_release rel) m.msg_lifecycle in + let changes = List.filter (fun (transition, release, doc) -> release = rel) m.msg_lifecycle in List.map (fun (transition, release, doc) -> transition, obj.name ^ "." ^ m.msg_name, @@ -280,7 +280,7 @@ let releases objs = let msg_changes = List.fold_left (fun l m -> l @ (changes_for_msg m)) [] msgs in let changes_for_field f = - let changes = List.filter (fun (transition, release, doc) -> release = code_name_of_release rel) f.lifecycle in + let changes = List.filter (fun (transition, release, doc) -> release = rel) f.lifecycle in let field_name = String.concat "_" f.full_name in List.map (fun (transition, release, doc) -> transition, @@ -302,7 +302,7 @@ let releases objs = in JArray (List.map search_obj objs |> List.flatten |> List.sort compare_changes |> List.map jobject_of_change) in - let release_info = JObject (List.map (fun rel -> code_name_of_release rel, changes_in_release rel) release_order) in + let release_info = JObject (List.map (fun rel -> rel, changes_in_release rel) release_order) in Stdext.Unixext.write_string_to_file ("release_info.json") (string_of_json 0 release_info) let _ = diff --git a/ocaml/idl/ocaml_backend/gen_db_actions.ml b/ocaml/idl/ocaml_backend/gen_db_actions.ml index f9242f244c6..8fcfec87a3e 100644 --- a/ocaml/idl/ocaml_backend/gen_db_actions.ml +++ b/ocaml/idl/ocaml_backend/gen_db_actions.ml @@ -312,7 +312,7 @@ let db_action api : O.Module.t = (Escaping.escape_id full_name) Client._self | FromField(Add, { DT.ty = DT.Map(_, _); full_name = full_name }) -> - Printf.sprintf "DB.process_structured_field __t (%s,%s) \"%s\" \"%s\" %s AddMapLegacy" + Printf.sprintf "DB.process_structured_field __t (%s,%s) \"%s\" \"%s\" %s AddMap" Client._key Client._value (Escaping.escape_obj obj.DT.name) (Escaping.escape_id full_name) @@ -448,7 +448,7 @@ let db_action api : O.Module.t = (** Generate a signature for the Server.Make functor. It should have one field per member in the user-facing API (not the special full 'DB api') which has no custom action. The signature will be smaller than the - db_actions signature but the db_actions module will be compatible with it *) + db_actions signature but the db_actions module will be compatable with it *) let make_db_defaults_api = Dm_api.filter (fun _ -> true) (fun _ -> true) (fun x -> not(Gen_empty_custom.operation_requires_side_effect x)) @@ -472,3 +472,4 @@ let db_defaults api : O.Signature.t = { O.Signature.name = _db_defaults; elements = List.map (fun x -> O.Signature.Module (obj x)) (Dm_api.objects_of_api api) } + diff --git a/ocaml/xapi-consts/api_errors.ml b/ocaml/xapi-consts/api_errors.ml index 380267634e4..5639c7c86f4 100644 --- a/ocaml/xapi-consts/api_errors.ml +++ b/ocaml/xapi-consts/api_errors.ml @@ -42,7 +42,6 @@ let session_invalid = "SESSION_INVALID" let change_password_rejected = "CHANGE_PASSWORD_REJECTED" let user_is_not_local_superuser = "USER_IS_NOT_LOCAL_SUPERUSER" let cannot_contact_host = "CANNOT_CONTACT_HOST" -let tls_connection_failed = "TLS_CONNECTION_FAILED" let not_supported_during_upgrade = "NOT_SUPPORTED_DURING_UPGRADE" let handle_invalid = "HANDLE_INVALID" @@ -463,14 +462,12 @@ let auth_suffix_permission_denied = "_PERMISSION_DENIED" let auth_suffix_domain_lookup_failed = "_DOMAIN_LOOKUP_FAILED" let auth_suffix_unavailable = "_UNAVAILABLE" let auth_suffix_invalid_ou = "_INVALID_OU" -let auth_suffix_invalid_account = "_INVALID_ACCOUNT" let auth_enable_failed = "AUTH_ENABLE_FAILED" let auth_enable_failed_wrong_credentials = auth_enable_failed^auth_suffix_wrong_credentials let auth_enable_failed_permission_denied = auth_enable_failed^auth_suffix_permission_denied let auth_enable_failed_domain_lookup_failed = auth_enable_failed^auth_suffix_domain_lookup_failed let auth_enable_failed_unavailable = auth_enable_failed^auth_suffix_unavailable let auth_enable_failed_invalid_ou = auth_enable_failed^auth_suffix_invalid_ou -let auth_enable_failed_invalid_account = auth_enable_failed^auth_suffix_invalid_account let auth_disable_failed = "AUTH_DISABLE_FAILED" let auth_disable_failed_wrong_credentials = auth_disable_failed^auth_suffix_wrong_credentials let auth_disable_failed_permission_denied = auth_disable_failed^auth_suffix_permission_denied @@ -481,8 +478,7 @@ let pool_auth_enable_failed_wrong_credentials = pool_auth_enable_failed^auth_suf let pool_auth_enable_failed_permission_denied = pool_auth_enable_failed^auth_suffix_permission_denied let pool_auth_enable_failed_domain_lookup_failed = pool_auth_enable_failed^auth_suffix_domain_lookup_failed let pool_auth_enable_failed_unavailable = pool_auth_enable_failed^auth_suffix_unavailable -let pool_auth_enable_failed_invalid_ou = pool_auth_enable_failed^auth_suffix_invalid_ou -let pool_auth_enable_failed_invalid_account = pool_auth_enable_failed^auth_suffix_invalid_account +let pool_auth_enable_failed_unavailable = pool_auth_enable_failed^auth_suffix_invalid_ou let pool_auth_enable_failed_duplicate_hostname = pool_auth_enable_failed^"_DUPLICATE_HOSTNAME" let pool_auth_disable_failed = pool_auth_prefix^auth_disable_failed let pool_auth_disable_failed_wrong_credentials = pool_auth_disable_failed^auth_suffix_wrong_credentials diff --git a/ocaml/xapi/auth_signature.ml b/ocaml/xapi/auth_signature.ml index 8c98f1d95b8..28999179873 100644 --- a/ocaml/xapi/auth_signature.ml +++ b/ocaml/xapi/auth_signature.ml @@ -22,7 +22,7 @@ *) exception Auth_failure of string -type auth_service_error_tag = E_GENERIC|E_LOOKUP|E_DENIED|E_CREDENTIALS|E_UNAVAILABLE|E_INVALID_OU|E_INVALID_ACCOUNT +type auth_service_error_tag = E_GENERIC|E_LOOKUP|E_DENIED|E_CREDENTIALS|E_UNAVAILABLE|E_INVALID_OU exception Auth_service_error of auth_service_error_tag * string exception Subject_cannot_be_resolved @@ -34,7 +34,6 @@ let suffix_of_tag errtag = | E_CREDENTIALS -> Api_errors.auth_suffix_wrong_credentials | E_UNAVAILABLE -> Api_errors.auth_suffix_unavailable | E_INVALID_OU -> Api_errors.auth_suffix_invalid_ou - | E_INVALID_ACCOUNT -> Api_errors.auth_suffix_invalid_account (* required fields in subject.other_config *) let subject_information_field_subject_name = "subject-name" diff --git a/ocaml/xapi/cli_frontend.ml b/ocaml/xapi/cli_frontend.ml index a9556e33cb1..387737890ba 100644 --- a/ocaml/xapi/cli_frontend.ml +++ b/ocaml/xapi/cli_frontend.ml @@ -855,7 +855,7 @@ let rec cmdtable_data : (string*cmd_spec) list = { reqd=["file-name"]; optn=["sr-uuid"]; - help="Stream new update to the server. The update will be uploaded to the SR , or, if it is not specified, to the pool's default SR."; + help="Stream new update to the server."; implementation=With_fd Cli_operations.update_upload; flags=[]; }; diff --git a/ocaml/xapi/cli_operations.ml b/ocaml/xapi/cli_operations.ml index 7e0bc0994a4..4672d0df008 100644 --- a/ocaml/xapi/cli_operations.ml +++ b/ocaml/xapi/cli_operations.ml @@ -678,7 +678,6 @@ let make_param_funs getall getallrecs getbyuuid record class_name def_filters de try set v with - (* XXX: -- warning 52 -- this might break with new ocaml compilers *) (Failure "int_of_string") -> failwith ("Parameter "^k^" must be an integer") | (Failure "float_of_string") -> failwith ("Parameter "^k^" must be a floating-point number") | (Invalid_argument "bool_of_string") -> failwith ("Parameter "^k^" must be a boolean (true or false)") @@ -1840,7 +1839,7 @@ let select_vms ?(include_control_vms = false) ?(include_template_vms = false) rp let params = if not include_template_vms then ("is-a-template" , "false") :: params else params in let vm_name_or_ref = try Some ( (* Escape every quote character *) - List.assoc "vm" params |> String.replace "\"" "\\\"" + List.assoc "vm" params |> Stdext.Xstringext.String.replace "\"" "\\\"" ) with _ -> None in let params, where_clause = match vm_name_or_ref with | None -> params, "true" @@ -2403,7 +2402,11 @@ let vm_install_real printer rpc session_id template name description params = let suspend_sr_ref = match sr_ref with | Some sr -> - if Cli_util.is_valid_ref session_id sr then + let ref_is_valid = Server_helpers.exec_with_new_task + ~session_id "Checking suspend_SR validity" + (fun __context -> Db.is_valid_ref __context sr) + in + if ref_is_valid then (* sr-uuid and/or sr-name-label was specified - use this as the suspend_SR *) sr else @@ -4337,19 +4340,12 @@ let update_upload fd printer rpc session_id params = let sr = if List.mem_assoc "sr-uuid" params then Client.SR.get_by_uuid rpc session_id (List.assoc "sr-uuid" params) - else begin - let sr = Client.Pool.get_default_SR ~rpc ~session_id ~self:(List.hd pools) in - if Cli_util.is_valid_ref session_id sr then sr - else failwith "No sr-uuid parameter was given, and the pool's default SR \ - is unspecified or invalid. Please explicitly specify the SR to use \ - in the sr-uuid parameter, or set the pool's default SR." - end - in + else Client.Pool.get_default_SR ~rpc ~session_id ~self:(List.hd pools) + in let uri = Printf.sprintf "%s%s?session_id=%s&sr_id=%s&task_id=%s" prefix Constants.import_raw_vdi_uri (Ref.string_of session_id) (Ref.string_of sr)(Ref.string_of task_id) in let _ = debug "trying to post patch to uri:%s" uri in - HttpPut (filename, uri) - in + HttpPut (filename, uri) in let result = track_http_operation fd rpc session_id make_command "host patch upload" in let vdi_ref = API.Legacy.From.ref_VDI "" (Xml.parse_string result) in let update_ref = diff --git a/ocaml/xapi/cli_util.ml b/ocaml/xapi/cli_util.ml index fedab52002e..1058373da6d 100644 --- a/ocaml/xapi/cli_util.ml +++ b/ocaml/xapi/cli_util.ml @@ -178,10 +178,6 @@ let ref_convert x = | Some ir -> ir.Ref_index.uuid^(match ir.Ref_index.name_label with None->"" | Some x -> " ("^x^")") -let is_valid_ref session_id ref = - Server_helpers.exec_with_new_task - ~session_id "Checking validity of reference" - (fun __context -> Db.is_valid_ref __context ref) (* Marshal an API-style server-error *) let get_server_error code params = diff --git a/ocaml/xapi/db_gc.ml b/ocaml/xapi/db_gc.ml index 3954658b646..c47b95e3c47 100644 --- a/ocaml/xapi/db_gc.ml +++ b/ocaml/xapi/db_gc.ml @@ -37,6 +37,187 @@ let host_table_m = Mutex.create () let _time = "time" let _shutting_down = "shutting-down" +let valid_ref x = Db.is_valid_ref x + +let gc_connector ~__context get_all get_record valid_ref1 valid_ref2 delete_record = + let db = Context.database_of __context in + let module DB = (val (Db_cache.get db) : Db_interface.DB_ACCESS) in + let all_refs = get_all ~__context in + let do_gc ref = + let print_valid b = if b then "valid" else "INVALID" in + let record = get_record ~__context ~self:ref in + let ref_1_valid = valid_ref1 record in + let ref_2_valid = valid_ref2 record in + + if not (ref_1_valid && ref_2_valid) then + begin + let table,reference,valid1,valid2 = + (match DB.get_table_from_ref db (Ref.string_of ref) with + None -> "UNKNOWN CLASS" + | Some c -> c), + (Ref.string_of ref), + (print_valid ref_1_valid), + (print_valid ref_2_valid) in + debug "Connector %s (%s) has invalid refs [ref_1: %s; ref_2: %s]. Attempting GC..." table reference valid1 valid2; + delete_record ~__context ~self:ref + end in + List.iter do_gc all_refs + +let gc_VGPU_types ~__context = + (* We delete a VGPU_type iff it does not appear in the supported_VGPU_types + * of any PGPU _and_ there doesn't exist a VGPU with this VGPU_type *) + let open Db_filter_types in + let garbage = Db.VGPU_type.get_records_where ~__context + ~expr:(And ((Eq (Field "VGPUs", Literal "()")), + (Eq (Field "supported_on_PGPUs", Literal "()")))) in + match garbage with + | [] -> () + | _ -> + debug "GC-ing the following unused and unsupported VGPU_types: [ %s ]" + (String.concat "; " (List.map Ref.string_of (List.map fst garbage))); + List.iter (fun (self, _) -> Db.VGPU_type.destroy ~__context ~self) garbage + +let gc_PVS_proxies ~__context = + gc_connector ~__context + Db.PVS_proxy.get_all + Db.PVS_proxy.get_record + (fun x -> valid_ref __context x.pVS_proxy_VIF) + (fun x -> valid_ref __context x.pVS_proxy_site) + Db.PVS_proxy.destroy + +(* A PVS server refers to a PVS site. We delete it, if the reference + * becomes invalid. At creation, the server is connected to a site and + * hence we never GC a server right after it was created. *) +let gc_PVS_servers ~__context = + gc_connector ~__context + Db.PVS_server.get_all + Db.PVS_server.get_record + (fun x -> true) + (fun x -> valid_ref __context x.pVS_server_site) + Db.PVS_server.destroy + +let gc_PVS_cache_storage ~__context = + gc_connector ~__context + Db.PVS_cache_storage.get_all + Db.PVS_cache_storage.get_record + (fun x -> valid_ref __context x.pVS_cache_storage_site) + (fun x -> valid_ref __context x.pVS_cache_storage_host) + Db.PVS_cache_storage.destroy + +let gc_PIFs ~__context = + gc_connector ~__context Db.PIF.get_all Db.PIF.get_record (fun x->valid_ref __context x.pIF_host) (fun x->valid_ref __context x.pIF_network) + (fun ~__context ~self -> + (* We need to destroy the PIF, it's metrics and any VLAN/bond records that this PIF was a master of. *) + (* bonds/tunnels_to_gc is actually a list which is either empty (not part of a bond/tunnel) + * or containing exactly one reference.. *) + let bonds_to_gc = Db.PIF.get_bond_master_of ~__context ~self in + let vlan_to_gc = Db.PIF.get_VLAN_master_of ~__context ~self in + let tunnels_to_gc = Db.PIF.get_tunnel_access_PIF_of ~__context ~self in + (* Only destroy PIF_metrics of physical or bond PIFs *) + if vlan_to_gc = Ref.null && tunnels_to_gc = [] then begin + let metrics = Db.PIF.get_metrics ~__context ~self in + (try Db.PIF_metrics.destroy ~__context ~self:metrics with _ -> ()) + end; + (try Db.VLAN.destroy ~__context ~self:vlan_to_gc with _ -> ()); + List.iter (fun tunnel -> (try Db.Tunnel.destroy ~__context ~self:tunnel with _ -> ())) tunnels_to_gc; + List.iter (fun bond -> (try Db.Bond.destroy ~__context ~self:bond with _ -> ())) bonds_to_gc; + Db.PIF.destroy ~__context ~self) +let gc_VBDs ~__context = + gc_connector ~__context Db.VBD.get_all Db.VBD.get_record (fun x->valid_ref __context x.vBD_VM) (fun x->valid_ref __context x.vBD_VDI || x.vBD_empty) + (fun ~__context ~self -> + (* When GCing VBDs that are CDs, set them to empty rather than destroy them entirely *) + if (valid_ref __context (Db.VBD.get_VM ~__context ~self)) && (Db.VBD.get_type ~__context ~self = `CD) then + begin + Db.VBD.set_VDI ~__context ~self ~value:Ref.null; + Db.VBD.set_empty ~__context ~self ~value:true; + debug "VBD corresponds to CD. Record preserved but set to empty"; + end + else + begin + let metrics = Db.VBD.get_metrics ~__context ~self in + (try Db.VBD_metrics.destroy ~__context ~self:metrics with _ -> ()); + Db.VBD.destroy ~__context ~self; + end) + +let gc_crashdumps ~__context = + gc_connector ~__context Db.Crashdump.get_all Db.Crashdump.get_record + (fun x->valid_ref __context x.crashdump_VM) (fun x->valid_ref __context x.crashdump_VDI) Db.Crashdump.destroy +let gc_VIFs ~__context = + gc_connector ~__context Db.VIF.get_all Db.VIF.get_record (fun x->valid_ref __context x.vIF_VM) (fun x->valid_ref __context x.vIF_network) + (fun ~__context ~self -> + let metrics = Db.VIF.get_metrics ~__context ~self in + (try Db.VIF_metrics.destroy ~__context ~self:metrics with _ -> ()); + Db.VIF.destroy ~__context ~self) +let gc_VGPUs ~__context = + gc_connector ~__context Db.VGPU.get_all Db.VGPU.get_record (fun x->valid_ref __context x.vGPU_VM) (fun x->valid_ref __context x.vGPU_GPU_group) + (fun ~__context ~self -> + Db.VGPU.destroy ~__context ~self) + +let gc_PGPUs ~__context = + let pgpus = Db.PGPU.get_all ~__context in + (* Go through the list of PGPUs, destroying any with an invalid host ref. + * Keep a list of groups which contained PGPUs which were destroyed. *) + let affected_groups = + List.fold_left + (fun acc pgpu -> + if not (valid_ref __context (Db.PGPU.get_host ~__context ~self:pgpu)) + then begin + let group = Db.PGPU.get_GPU_group ~__context ~self:pgpu in + Db.PGPU.destroy ~__context ~self:pgpu; + debug "GCed PGPU %s" (Ref.string_of pgpu); + group :: acc + end else + acc) + [] pgpus + |> List.filter (valid_ref __context) + |> List.setify + in + (* Update enabled/supported VGPU types on the groups which contained the + * destroyed PGPUs. *) + List.iter + (fun group -> + Xapi_gpu_group.update_enabled_VGPU_types ~__context ~self:group; + Xapi_gpu_group.update_supported_VGPU_types ~__context ~self:group) + affected_groups + +let gc_PBDs ~__context = + gc_connector ~__context Db.PBD.get_all Db.PBD.get_record (fun x->valid_ref __context x.pBD_host) (fun x->valid_ref __context x.pBD_SR) Db.PBD.destroy +let gc_Host_patches ~__context = + gc_connector ~__context Db.Host_patch.get_all Db.Host_patch.get_record (fun x->valid_ref __context x.host_patch_host) (fun x->valid_ref __context x.host_patch_pool_patch) Db.Host_patch.destroy +let gc_host_cpus ~__context = + let host_cpus = Db.Host_cpu.get_all ~__context in + List.iter + (fun hcpu -> + if not (valid_ref __context (Db.Host_cpu.get_host ~__context ~self:hcpu)) then + Db.Host_cpu.destroy ~__context ~self:hcpu) host_cpus +let gc_host_metrics ~__context = + let all_host_metrics = Db.Host_metrics.get_all ~__context in + let metrics = List.map (fun host-> Db.Host.get_metrics ~__context ~self:host) in + let host_metrics = metrics (Db.Host.get_all ~__context) in + List.iter + (fun hmetric-> + if not (List.mem hmetric host_metrics) then + Db.Host_metrics.destroy ~__context ~self:hmetric) all_host_metrics + +(* If the SR record is missing, delete the VDI record *) +let gc_VDIs ~__context = + let all_srs = Db.SR.get_all ~__context in + List.iter (fun vdi -> + let sr = Db.VDI.get_SR ~__context ~self:vdi in + if not(List.mem sr all_srs) then begin + debug "GCed VDI %s" (Ref.string_of vdi); + Db.VDI.destroy ~__context ~self:vdi + end) (Db.VDI.get_all ~__context) + +let gc_consoles ~__context = + List.iter (fun console -> + if not (valid_ref __context (Db.Console.get_VM ~__context ~self:console)) + then begin + Db.Console.destroy ~__context ~self:console; + debug "GCed console %s" (Ref.string_of console); + end + ) (Db.Console.get_all ~__context) + let already_sent_clock_skew_warnings = Hashtbl.create 10 let detect_clock_skew ~__context host skew = @@ -116,6 +297,186 @@ let check_host_liveness ~__context = let all_hosts = Db.Host.get_all ~__context in List.iter check_host all_hosts +let timeout_sessions_common ~__context sessions limit session_group = + let unused_sessions = List.filter + (fun (x, _) -> + let rec is_session_unused s = + if (s=Ref.null) then true (* top of session tree *) + else + try (* if no session s, assume default value true=unused *) + let tasks = (Db.Session.get_tasks ~__context ~self:s) in + let parent = (Db.Session.get_parent ~__context ~self:s) in + (List.for_all + (fun t -> TaskHelper.status_is_completed + (* task might not exist anymore, assume completed in this case *) + (try Db.Task.get_status ~__context ~self:t with _->`success) + ) + tasks + ) + && (is_session_unused parent) + with _->true + in is_session_unused x + ) + sessions + in + (* Only keep a list of (ref, last_active, uuid) *) + let disposable_sessions = List.map (fun (x, y) -> x, Date.to_float y.Db_actions.session_last_active, y.Db_actions.session_uuid) unused_sessions in + (* Definitely invalidate sessions last used long ago *) + let threshold_time = Unix.time () -. !Xapi_globs.inactive_session_timeout in + let young, old = List.partition (fun (_, y, _) -> y > threshold_time) disposable_sessions in + (* If there are too many young sessions then we need to delete the oldest *) + let lucky, unlucky = + if List.length young <= limit + then young, [] (* keep them all *) + else + (* Need to reverse sort by last active and drop the oldest *) + List.chop limit (List.sort (fun (_,a, _) (_,b, _) -> compare b a) young) in + let cancel doc sessions = + List.iter + (fun (s, active, uuid) -> + debug "Session.destroy _ref=%s uuid=%s %s (last active %s): %s" (Ref.string_of s) uuid (Context.trackid_of_session (Some s)) (Date.to_string (Date.of_float active)) doc; + Xapi_session.destroy_db_session ~__context ~self:s + ) sessions in + (* Only the 'lucky' survive: the 'old' and 'unlucky' are destroyed *) + if unlucky <> [] + then debug "Number of disposable sessions in group '%s' in database (%d/%d) exceeds limit (%d): will delete the oldest" session_group (List.length disposable_sessions) (List.length sessions) limit; + cancel (Printf.sprintf "Timed out session in group '%s' because of its age" session_group) old; + cancel (Printf.sprintf "Timed out session in group '%s' because max number of sessions was exceeded" session_group) unlucky + +let last_session_log_time = ref None + +let timeout_sessions ~__context = + let all_sessions = Db.Session.get_internal_records_where ~__context ~expr:Db_filter_types.True in + + let pool_sessions, nonpool_sessions = List.partition (fun (_, s) -> s.Db_actions.session_pool) all_sessions in + let use_root_auth_name s = s.Db_actions.session_auth_user_name = "" || s.Db_actions.session_auth_user_name = "root" in + let anon_sessions, named_sessions = List.partition (fun (_, s) -> s.Db_actions.session_originator = "" && use_root_auth_name s) nonpool_sessions in + let session_groups = Hashtbl.create 37 in + List.iter (function (_, s) as rs -> + let key = if use_root_auth_name s then `Orig s.Db_actions.session_originator else `Name s.Db_actions.session_auth_user_name in + let current_sessions = + try Hashtbl.find session_groups key + with Not_found -> [] in + Hashtbl.replace session_groups key (rs :: current_sessions) + ) named_sessions; + + let should_log = match !last_session_log_time with + | None -> true + | Some t -> Unix.time () -. t > 600.0 (* Every 10 mins, dump session stats *) + in + + if should_log then begin + last_session_log_time := Some (Unix.time ()); + let nbindings = Hashtbl.fold (fun _ _ acc -> 1+acc) session_groups 0 in + debug "session_log: active_sessions=%d (%d pool, %d anon, %d named - %d groups)" + (List.length all_sessions) (List.length pool_sessions) (List.length anon_sessions) (List.length named_sessions) nbindings + end; + + begin + Hashtbl.iter + (fun key ss -> match key with + | `Orig orig -> timeout_sessions_common ~__context ss Xapi_globs.max_sessions_per_originator ("originator:"^orig) + | `Name name -> timeout_sessions_common ~__context ss Xapi_globs.max_sessions_per_user_name ("username:"^name)) + session_groups; + timeout_sessions_common ~__context anon_sessions Xapi_globs.max_sessions "external"; + timeout_sessions_common ~__context pool_sessions Xapi_globs.max_sessions "internal"; + end + +let probation_pending_tasks = Hashtbl.create 53 + +let timeout_tasks ~__context = + let all_tasks = Db.Task.get_internal_records_where ~__context ~expr:Db_filter_types.True in + let oldest_completed_time = Unix.time() -. !Xapi_globs.completed_task_timeout (* time out completed tasks after 65 minutes *) in + let oldest_pending_time = Unix.time() -. !Xapi_globs.pending_task_timeout (* time out pending tasks after 24 hours *) in + + let completed, pending = + List.partition + (fun (_, t) -> TaskHelper.status_is_completed t.Db_actions.task_status) + all_tasks in + + (* Any task that was incomplete at the point someone called Task.destroy + will have `destroy in its current_operations. If they're now complete, + we can Kill these immediately *) + let completed_destroyable, completed_gcable = + List.partition + (fun (_, t) -> List.exists (fun (_,op) -> op = `destroy) t.Db_actions.task_current_operations) + completed in + + List.iter (fun (t, _) -> Db.Task.destroy ~__context ~self:t) completed_destroyable; + + let completed_old, completed_young = + List.partition + (fun (_, t) -> + Date.to_float t.Db_actions.task_finished < oldest_completed_time) + completed_gcable in + + let pending_old, pending_young = + List.partition + (fun (_, t) -> + Date.to_float t.Db_actions.task_created < oldest_pending_time) + pending in + + let pending_old_run, pending_old_hung = + List.partition + (fun (_, t) -> + try + let pre_progress = + Hashtbl.find probation_pending_tasks t.Db_actions.task_uuid in + t.Db_actions.task_progress -. pre_progress > min_float + with Not_found -> true) + pending_old in + + let () = + Hashtbl.clear probation_pending_tasks; + List.iter + (fun (_, t) -> + Hashtbl.add probation_pending_tasks + t.Db_actions.task_uuid t.Db_actions.task_progress) + pending_old in + + let old = pending_old_hung @ completed_old in + let young = pending_old_run @ pending_young @ completed_young in + + (* If there are still too many young tasks then we'll try to delete some completed ones *) + let lucky, unlucky = + if List.length young <= Xapi_globs.max_tasks + then young, [] (* keep them all *) + else + (* Compute how many we'd like to delete *) + let overflow = List.length young - Xapi_globs.max_tasks in + (* We only consider deleting completed tasks *) + let completed, pending = List.partition + (fun (_, t) -> TaskHelper.status_is_completed t.Db_actions.task_status) young in + (* Sort the completed tasks so we delete oldest tasks in preference *) + let completed = + List.sort (fun (_,t1) (_,t2) -> compare (Date.to_float t1.Db_actions.task_finished) (Date.to_float t2.Db_actions.task_finished)) completed in + (* From the completes set, choose up to 'overflow' *) + let unlucky, lucky = + if List.length completed > overflow + then List.chop overflow completed + else completed, [] in (* not enough to delete, oh well *) + (* Keep all pending and any which were not chosen from the completed set *) + pending @ lucky, unlucky in + (* Cancel the 'old' and 'unlucky' *) + List.iter (fun (x, y) -> + if not (TaskHelper.status_is_completed y.Db_actions.task_status) + then warn "GCed old task that was still in pending state: %s" y.Db_actions.task_uuid; + TaskHelper.destroy ~__context x + ) (old @ unlucky); + if List.length lucky > Xapi_globs.max_tasks + then warn "There are more pending tasks than the maximum allowed: %d > %d" (List.length lucky) Xapi_globs.max_tasks + +(* +let timeout_alerts ~__context = + let all_alerts = Db.Alert.get_all ~__context in + let now = Unix.gettimeofday() in + List.iter (fun alert -> + let alert_time = Date.to_float (Db.Alert.get_timestamp ~__context ~self:alert) in + if now -. alert_time > Xapi_globs.alert_timeout then + Db.Alert.destroy ~__context ~self:alert + ) all_alerts +*) + (* Compare this host's (the master's) version with that reported by all other hosts and mark the Pool with an other_config key if we are in a rolling upgrade mode. If we detect the beginning or end of a rolling upgrade, call out to an external script. *) @@ -198,6 +559,9 @@ let tickle_heartbeat ~__context host stuff = ); [] +let gc_messages ~__context = + Xapi_message.gc ~__context + let single_pass () = Server_helpers.exec_with_new_task "DB GC" (fun __context -> @@ -207,7 +571,32 @@ let single_pass () = Stats.time_this (Printf.sprintf "Db_gc: %s" name) (fun () -> f ~__context) in - List.iter time_one Db_gc_util.gc_subtask_list + (* do VDIs first because this will *) + (* cause some VBDs to be affected *) + List.iter time_one [ + "VDIs", gc_VDIs; + "PIFs", gc_PIFs; + "VBDs", gc_VBDs; + "crashdumps", gc_crashdumps; + "VIFs", gc_VIFs; + "PBDs", gc_PBDs; + "VGPUs", gc_VGPUs; + "PGPUs", gc_PGPUs; + "VGPU_types", gc_VGPU_types; + "Host patches", gc_Host_patches; + "Host CPUs", gc_host_cpus; + "Host metrics", gc_host_metrics; + "Tasks", timeout_tasks; + "Sessions", timeout_sessions; + "Messages", gc_messages; + "Consoles", gc_consoles; + "PVS proxies", gc_PVS_proxies; + "PVS servers", gc_PVS_servers; + "PVS cache storage", gc_PVS_cache_storage; + (* timeout_alerts; *) + (* CA-29253: wake up all blocked clients *) + "Heartbeat", Xapi_event.heartbeat; + ] ); Mutex.execute use_host_heartbeat_for_liveness_m (fun () -> diff --git a/ocaml/xapi/db_gc_util.ml b/ocaml/xapi/db_gc_util.ml deleted file mode 100644 index 6d99e8c7356..00000000000 --- a/ocaml/xapi/db_gc_util.ml +++ /dev/null @@ -1,421 +0,0 @@ -(* - * Copyright (C) 2006-2017 Citrix Systems Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as published - * by the Free Software Foundation; version 2.1 only. with the special - * exception on linking described in file LICENSE. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Lesser General Public License for more details. - *) -(** - * @group Database Operations -*) - -open API -open Stdext -open Listext - -module D=Debug.Make(struct let name="db_gc_util" end) -open D - -let valid_ref x = Db.is_valid_ref x - -let gc_connector ~__context get_all get_record valid_ref1 valid_ref2 delete_record = - let db = Context.database_of __context in - let module DB = (val (Db_cache.get db) : Db_interface.DB_ACCESS) in - let all_refs = get_all ~__context in - let do_gc ref = - let print_valid b = if b then "valid" else "INVALID" in - let record = get_record ~__context ~self:ref in - let ref_1_valid = valid_ref1 record in - let ref_2_valid = valid_ref2 record in - - if not (ref_1_valid && ref_2_valid) then - begin - let table,reference,valid1,valid2 = - (match DB.get_table_from_ref db (Ref.string_of ref) with - None -> "UNKNOWN CLASS" - | Some c -> c), - (Ref.string_of ref), - (print_valid ref_1_valid), - (print_valid ref_2_valid) in - debug "Connector %s (%s) has invalid refs [ref_1: %s; ref_2: %s]. Attempting to GC..." table reference valid1 valid2; - delete_record ~__context ~self:ref - end in - List.iter do_gc all_refs - -(* If the SR record is missing, delete the VDI record *) -let gc_VDIs ~__context = - let all_srs = Db.SR.get_all ~__context in - List.iter (fun vdi -> - let sr = Db.VDI.get_SR ~__context ~self:vdi in - if not(List.mem sr all_srs) then begin - debug "GCed VDI %s" (Ref.string_of vdi); - Db.VDI.destroy ~__context ~self:vdi - end) (Db.VDI.get_all ~__context) - -let gc_PIFs ~__context = - gc_connector ~__context Db.PIF.get_all Db.PIF.get_record (fun x->valid_ref __context x.pIF_host) (fun x->valid_ref __context x.pIF_network) - (fun ~__context ~self -> - (* We need to destroy the PIF, it's metrics and any VLAN/bond records that this PIF was a master of. *) - (* bonds/tunnels_to_gc is actually a list which is either empty (not part of a bond/tunnel) - * or containing exactly one reference.. *) - let bonds_to_gc = Db.PIF.get_bond_master_of ~__context ~self in - let vlan_to_gc = Db.PIF.get_VLAN_master_of ~__context ~self in - let tunnels_to_gc = Db.PIF.get_tunnel_access_PIF_of ~__context ~self in - (* Only destroy PIF_metrics of physical or bond PIFs *) - if vlan_to_gc = Ref.null && tunnels_to_gc = [] then begin - let metrics = Db.PIF.get_metrics ~__context ~self in - (try Db.PIF_metrics.destroy ~__context ~self:metrics with _ -> ()) - end; - (try Db.VLAN.destroy ~__context ~self:vlan_to_gc with _ -> ()); - List.iter (fun tunnel -> (try Db.Tunnel.destroy ~__context ~self:tunnel with _ -> ())) tunnels_to_gc; - List.iter (fun bond -> (try Db.Bond.destroy ~__context ~self:bond with _ -> ())) bonds_to_gc; - Db.PIF.destroy ~__context ~self) - -let gc_VBDs ~__context = - gc_connector ~__context Db.VBD.get_all Db.VBD.get_record (fun x->valid_ref __context x.vBD_VM) (fun x->valid_ref __context x.vBD_VDI || x.vBD_empty) - (fun ~__context ~self -> - (* When GCing VBDs that are CDs, set them to empty rather than destroy them entirely *) - if (valid_ref __context (Db.VBD.get_VM ~__context ~self)) && (Db.VBD.get_type ~__context ~self = `CD) then - begin - Db.VBD.set_VDI ~__context ~self ~value:Ref.null; - Db.VBD.set_empty ~__context ~self ~value:true; - debug "VBD corresponds to CD. Record preserved but set to empty"; - end - else - begin - let metrics = Db.VBD.get_metrics ~__context ~self in - (try Db.VBD_metrics.destroy ~__context ~self:metrics with _ -> ()); - Db.VBD.destroy ~__context ~self; - end) - -let gc_crashdumps ~__context = - gc_connector ~__context Db.Crashdump.get_all Db.Crashdump.get_record - (fun x->valid_ref __context x.crashdump_VM) (fun x->valid_ref __context x.crashdump_VDI) Db.Crashdump.destroy - -let gc_VIFs ~__context = - gc_connector ~__context Db.VIF.get_all Db.VIF.get_record (fun x->valid_ref __context x.vIF_VM) (fun x->valid_ref __context x.vIF_network) - (fun ~__context ~self -> - let metrics = Db.VIF.get_metrics ~__context ~self in - (try Db.VIF_metrics.destroy ~__context ~self:metrics with _ -> ()); - Db.VIF.destroy ~__context ~self) - -let gc_PBDs ~__context = - gc_connector ~__context Db.PBD.get_all Db.PBD.get_record (fun x->valid_ref __context x.pBD_host) (fun x->valid_ref __context x.pBD_SR) Db.PBD.destroy - -let gc_VGPUs ~__context = - gc_connector ~__context Db.VGPU.get_all Db.VGPU.get_record (fun x->valid_ref __context x.vGPU_VM) (fun x->valid_ref __context x.vGPU_GPU_group) - (fun ~__context ~self -> - Db.VGPU.destroy ~__context ~self) - -let gc_PGPUs ~__context = - let pgpus = Db.PGPU.get_all ~__context in - (* Go through the list of PGPUs, destroying any with an invalid host ref. - * Keep a list of groups which contained PGPUs which were destroyed. *) - let affected_groups = - List.fold_left - (fun acc pgpu -> - if not (valid_ref __context (Db.PGPU.get_host ~__context ~self:pgpu)) - then begin - let group = Db.PGPU.get_GPU_group ~__context ~self:pgpu in - Db.PGPU.destroy ~__context ~self:pgpu; - debug "GCed PGPU %s" (Ref.string_of pgpu); - group :: acc - end else - acc) - [] pgpus - |> List.filter (valid_ref __context) - |> List.setify - in - (* Update enabled/supported VGPU types on the groups which contained the - * destroyed PGPUs. *) - List.iter - (fun group -> - Xapi_gpu_group.update_enabled_VGPU_types ~__context ~self:group; - Xapi_gpu_group.update_supported_VGPU_types ~__context ~self:group) - affected_groups - -let gc_VGPU_types ~__context = - (* We delete a VGPU_type iff it does not appear in the supported_VGPU_types - * of any PGPU _and_ there doesn't exist a VGPU with this VGPU_type *) - let open Db_filter_types in - let garbage = Db.VGPU_type.get_records_where ~__context - ~expr:(And ((Eq (Field "VGPUs", Literal "()")), - (Eq (Field "supported_on_PGPUs", Literal "()")))) in - match garbage with - | [] -> () - | _ -> - debug "GC-ing the following unused and unsupported VGPU_types: [ %s ]" - (String.concat "; " (List.map Ref.string_of (List.map fst garbage))); - List.iter (fun (self, _) -> Db.VGPU_type.destroy ~__context ~self) garbage - -let gc_Host_patches ~__context = - gc_connector ~__context Db.Host_patch.get_all Db.Host_patch.get_record (fun x->valid_ref __context x.host_patch_host) (fun x->valid_ref __context x.host_patch_pool_patch) Db.Host_patch.destroy - -let gc_host_cpus ~__context = - let host_cpus = Db.Host_cpu.get_all ~__context in - List.iter - (fun hcpu -> - if not (valid_ref __context (Db.Host_cpu.get_host ~__context ~self:hcpu)) then - Db.Host_cpu.destroy ~__context ~self:hcpu) host_cpus - -let gc_host_metrics ~__context = - let all_host_metrics = Db.Host_metrics.get_all ~__context in - let metrics = List.map (fun host-> Db.Host.get_metrics ~__context ~self:host) in - let host_metrics = metrics (Db.Host.get_all ~__context) in - List.iter - (fun hmetric-> - if not (List.mem hmetric host_metrics) then - Db.Host_metrics.destroy ~__context ~self:hmetric) all_host_metrics - -let probation_pending_tasks = Hashtbl.create 53 - -let timeout_tasks ~__context = - let all_tasks = Db.Task.get_internal_records_where ~__context ~expr:Db_filter_types.True in - let oldest_completed_time = Unix.time() -. !Xapi_globs.completed_task_timeout (* time out completed tasks after 65 minutes *) in - let oldest_pending_time = Unix.time() -. !Xapi_globs.pending_task_timeout (* time out pending tasks after 24 hours *) in - - let completed, pending = - List.partition - (fun (_, t) -> TaskHelper.status_is_completed t.Db_actions.task_status) - all_tasks in - - (* Any task that was incomplete at the point someone called Task.destroy - will have `destroy in its current_operations. If they're now complete, - we can Kill these immediately *) - let completed_destroyable, completed_gcable = - List.partition - (fun (_, t) -> List.exists (fun (_,op) -> op = `destroy) t.Db_actions.task_current_operations) - completed in - - List.iter (fun (t, _) -> Db.Task.destroy ~__context ~self:t) completed_destroyable; - - let completed_old, completed_young = - List.partition - (fun (_, t) -> - Date.to_float t.Db_actions.task_finished < oldest_completed_time) - completed_gcable in - - let pending_old, pending_young = - List.partition - (fun (_, t) -> - Date.to_float t.Db_actions.task_created < oldest_pending_time) - pending in - - let pending_old_run, pending_old_hung = - List.partition - (fun (_, t) -> - try - let pre_progress = - Hashtbl.find probation_pending_tasks t.Db_actions.task_uuid in - t.Db_actions.task_progress -. pre_progress > min_float - with Not_found -> true) - pending_old in - - let () = - Hashtbl.clear probation_pending_tasks; - List.iter - (fun (_, t) -> - Hashtbl.add probation_pending_tasks - t.Db_actions.task_uuid t.Db_actions.task_progress) - pending_old in - - let old = pending_old_hung @ completed_old in - let young = pending_old_run @ pending_young @ completed_young in - - (* If there are still too many young tasks then we'll try to delete some completed ones *) - let lucky, unlucky = - if List.length young <= Xapi_globs.max_tasks - then young, [] (* keep them all *) - else - (* Compute how many we'd like to delete *) - let overflow = List.length young - Xapi_globs.max_tasks in - (* We only consider deleting completed tasks *) - let completed, pending = List.partition - (fun (_, t) -> TaskHelper.status_is_completed t.Db_actions.task_status) young in - (* Sort the completed tasks so we delete oldest tasks in preference *) - let completed = - List.sort (fun (_,t1) (_,t2) -> compare (Date.to_float t1.Db_actions.task_finished) (Date.to_float t2.Db_actions.task_finished)) completed in - (* From the completes set, choose up to 'overflow' *) - let unlucky, lucky = - if List.length completed > overflow - then List.chop overflow completed - else completed, [] in (* not enough to delete, oh well *) - (* Keep all pending and any which were not chosen from the completed set *) - pending @ lucky, unlucky in - (* Cancel the 'old' and 'unlucky' *) - List.iter (fun (x, y) -> - if not (TaskHelper.status_is_completed y.Db_actions.task_status) - then warn "GCed old task that was still in pending state: %s" y.Db_actions.task_uuid; - TaskHelper.destroy ~__context x - ) (old @ unlucky); - if List.length lucky > Xapi_globs.max_tasks - then warn "There are more pending tasks than the maximum allowed: %d > %d" (List.length lucky) Xapi_globs.max_tasks - - -let timeout_sessions_common ~__context sessions limit session_group = - let unused_sessions = List.filter - (fun (x, _) -> - let rec is_session_unused s = - if (s=Ref.null) then true (* top of session tree *) - else - try (* if no session s, assume default value true=unused *) - let tasks = (Db.Session.get_tasks ~__context ~self:s) in - let parent = (Db.Session.get_parent ~__context ~self:s) in - (List.for_all - (fun t -> TaskHelper.status_is_completed - (* task might not exist anymore, assume completed in this case *) - (try Db.Task.get_status ~__context ~self:t with _->`success) - ) - tasks - ) - && (is_session_unused parent) - with _->true - in is_session_unused x - ) - sessions - in - (* Only keep a list of (ref, last_active, uuid) *) - let disposable_sessions = List.map (fun (x, y) -> x, Date.to_float y.Db_actions.session_last_active, y.Db_actions.session_uuid) unused_sessions in - (* Definitely invalidate sessions last used long ago *) - let threshold_time = Unix.time () -. !Xapi_globs.inactive_session_timeout in - let young, old = List.partition (fun (_, y, _) -> y > threshold_time) disposable_sessions in - (* If there are too many young sessions then we need to delete the oldest *) - let lucky, unlucky = - if List.length young <= limit - then young, [] (* keep them all *) - else - (* Need to reverse sort by last active and drop the oldest *) - List.chop limit (List.sort (fun (_,a, _) (_,b, _) -> compare b a) young) in - let cancel doc sessions = - List.iter - (fun (s, active, uuid) -> - debug "Session.destroy _ref=%s uuid=%s %s (last active %s): %s" (Ref.string_of s) uuid (Context.trackid_of_session (Some s)) (Date.to_string (Date.of_float active)) doc; - Xapi_session.destroy_db_session ~__context ~self:s - ) sessions in - (* Only the 'lucky' survive: the 'old' and 'unlucky' are destroyed *) - if unlucky <> [] - then debug "Number of disposable sessions in group '%s' in database (%d/%d) exceeds limit (%d): will delete the oldest" session_group (List.length disposable_sessions) (List.length sessions) limit; - cancel (Printf.sprintf "Timed out session in group '%s' because of its age" session_group) old; - cancel (Printf.sprintf "Timed out session in group '%s' because max number of sessions was exceeded" session_group) unlucky - -let last_session_log_time = ref None - -let timeout_sessions ~__context = - let all_sessions = Db.Session.get_internal_records_where ~__context ~expr:Db_filter_types.True in - - let pool_sessions, nonpool_sessions = List.partition (fun (_, s) -> s.Db_actions.session_pool) all_sessions in - let use_root_auth_name s = s.Db_actions.session_auth_user_name = "" || s.Db_actions.session_auth_user_name = "root" in - let anon_sessions, named_sessions = List.partition (fun (_, s) -> s.Db_actions.session_originator = "" && use_root_auth_name s) nonpool_sessions in - let session_groups = Hashtbl.create 37 in - List.iter (function (_, s) as rs -> - let key = if use_root_auth_name s then `Orig s.Db_actions.session_originator else `Name s.Db_actions.session_auth_user_name in - let current_sessions = - try Hashtbl.find session_groups key - with Not_found -> [] in - Hashtbl.replace session_groups key (rs :: current_sessions) - ) named_sessions; - - let should_log = match !last_session_log_time with - | None -> true - | Some t -> Unix.time () -. t > 600.0 (* Every 10 mins, dump session stats *) - in - - if should_log then begin - last_session_log_time := Some (Unix.time ()); - let nbindings = Hashtbl.fold (fun _ _ acc -> 1+acc) session_groups 0 in - debug "session_log: active_sessions=%d (%d pool, %d anon, %d named - %d groups)" - (List.length all_sessions) (List.length pool_sessions) (List.length anon_sessions) (List.length named_sessions) nbindings - end; - - begin - Hashtbl.iter - (fun key ss -> match key with - | `Orig orig -> timeout_sessions_common ~__context ss Xapi_globs.max_sessions_per_originator ("originator:"^orig) - | `Name name -> timeout_sessions_common ~__context ss Xapi_globs.max_sessions_per_user_name ("username:"^name)) - session_groups; - timeout_sessions_common ~__context anon_sessions Xapi_globs.max_sessions "external"; - timeout_sessions_common ~__context pool_sessions Xapi_globs.max_sessions "internal"; - end - -let gc_messages ~__context = - Xapi_message.gc ~__context - -let gc_consoles ~__context = - List.iter (fun console -> - if not (valid_ref __context (Db.Console.get_VM ~__context ~self:console)) - then begin - Db.Console.destroy ~__context ~self:console; - debug "GCed console %s" (Ref.string_of console); - end - ) (Db.Console.get_all ~__context) - -let gc_PVS_proxies ~__context = - gc_connector ~__context - Db.PVS_proxy.get_all - Db.PVS_proxy.get_record - (fun x -> valid_ref __context x.pVS_proxy_VIF) - (fun x -> valid_ref __context x.pVS_proxy_site) - Db.PVS_proxy.destroy - -(* A PVS server refers to a PVS site. We delete it, if the reference - * becomes invalid. At creation, the server is connected to a site and - * hence we never GC a server right after it was created. *) -let gc_PVS_servers ~__context = - gc_connector ~__context - Db.PVS_server.get_all - Db.PVS_server.get_record - (fun x -> true) - (fun x -> valid_ref __context x.pVS_server_site) - Db.PVS_server.destroy - -let gc_PVS_cache_storage ~__context = - gc_connector ~__context - Db.PVS_cache_storage.get_all - Db.PVS_cache_storage.get_record - (fun x -> valid_ref __context x.pVS_cache_storage_site) - (fun x -> valid_ref __context x.pVS_cache_storage_host) - Db.PVS_cache_storage.destroy - -(* -let timeout_alerts ~__context = - let all_alerts = Db.Alert.get_all ~__context in - let now = Unix.gettimeofday() in - List.iter (fun alert -> - let alert_time = Date.to_float (Db.Alert.get_timestamp ~__context ~self:alert) in - if now -. alert_time > Xapi_globs.alert_timeout then - Db.Alert.destroy ~__context ~self:alert - ) all_alerts -*) - - -(* do VDIs first because this will cause some VBDs to be affected *) -let gc_subtask_list = [ - "VDIs", gc_VDIs; - "PIFs", gc_PIFs; - "VBDs", gc_VBDs; - "crashdumps", gc_crashdumps; - "VIFs", gc_VIFs; - "PBDs", gc_PBDs; - "VGPUs", gc_VGPUs; - "PGPUs", gc_PGPUs; - "VGPU_types", gc_VGPU_types; - "Host patches", gc_Host_patches; - "Host CPUs", gc_host_cpus; - "Host metrics", gc_host_metrics; - "Tasks", timeout_tasks; - "Sessions", timeout_sessions; - "Messages", gc_messages; - "Consoles", gc_consoles; - "PVS proxies", gc_PVS_proxies; - "PVS servers", gc_PVS_servers; - "PVS cache storage", gc_PVS_cache_storage; - (* timeout_alerts; *) - (* CA-29253: wake up all blocked clients *) - "Heartbeat", Xapi_event.heartbeat; - ] diff --git a/ocaml/xapi/extauth_plugin_ADpbis.ml b/ocaml/xapi/extauth_plugin_ADpbis.ml index 25f825c7175..7f02408b1f8 100644 --- a/ocaml/xapi/extauth_plugin_ADpbis.ml +++ b/ocaml/xapi/extauth_plugin_ADpbis.ml @@ -18,47 +18,6 @@ module D = Debug.Make(struct let name="extauth_plugin_ADpbis" end) open D -open Stdext.Xstringext - -let match_error_tag (lines:string list) = - let err_catch_list = - [ "DNS_ERROR_BAD_PACKET", Auth_signature.E_LOOKUP; - "LW_ERROR_PASSWORD_MISMATCH", Auth_signature.E_CREDENTIALS; - "LW_ERROR_INVALID_ACCOUNT", Auth_signature.E_INVALID_ACCOUNT; - "LW_ERROR_ACCESS_DENIED", Auth_signature.E_DENIED; - "LW_ERROR_DOMAIN_IS_OFFLINE", Auth_signature.E_UNAVAILABLE; - "LW_ERROR_INVALID_OU", Auth_signature.E_INVALID_OU; - (* More errors to be caught here *) - ] - in - let split_to_words = fun str -> - let seps = ['('; ')'; ' '; '\t'; '.'] in - String.split_f (fun s -> List.exists (fun sep -> sep = s) seps) str - in - let rec has_err lines err_pattern = - match lines with - | [] -> false - | line :: rest -> - try - ignore(List.find (fun w -> w = err_pattern) (split_to_words line)); - true - with Not_found -> has_err rest err_pattern - in - try - let (_, errtag) = List.find (fun (err_pattern, _) -> has_err lines err_pattern) err_catch_list in - errtag - with Not_found -> Auth_signature.E_GENERIC - -let extract_sid_from_group_list = fun group_list -> - List.map (fun (n,v)-> - let v = String.replace ")" "" v in - let v = String.replace "sid =" "|" v in - let vs = String.split_f (fun c -> c = '|') v in - let sid = String.trim (List.nth vs 1) in - debug "extract_sid_from_group_list get sid=[%s]" sid; - sid - ) (List.filter (fun (n,v)->n="") group_list) - module AuthADlw : Auth_signature.AUTH_MODULE = struct @@ -75,6 +34,17 @@ struct let splitlines s = String.split_f (fun c -> c = '\n') (String.replace "#012" "\n" s) + let rec string_trim s = + let l = String.length s in + if l = 0 then + s + else if s.[0] = ' ' || s.[0] = '\t' || s.[0] = '\n' || s.[0] = '\r' then + string_trim (String.sub s 1 (l-1)) + else if s.[l-1] = ' ' || s.[l-1] = '\t' || s.[l-1] = '\n' || s.[l-1] = '\r' then + string_trim (String.sub s 0 (l-1)) + else + s + let pbis_common_with_password (password:string) (pbis_cmd:string) (pbis_args:string list) = let debug_cmd = pbis_cmd ^ " " ^ (List.fold_left (fun p pp -> p^" "^pp) " " pbis_args) in try @@ -87,9 +57,9 @@ struct error "execute %s exited with code %d [stdout = '%s'; stderr = '%s']" debug_cmd n stdout stderr; let lines = List.filter (fun l-> String.length l > 0) (splitlines (stdout ^ stderr)) in let errmsg = List.hd (List.rev lines) in - let errtag = match_error_tag lines in - raise (Auth_signature.Auth_service_error (errtag, errmsg)) - | e -> error "execute %s exited: %s" debug_cmd (ExnHelper.string_of_exn e); + raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC, errmsg)) + | e -> + error "execute %s exited: %s" debug_cmd (ExnHelper.string_of_exn e); raise (Auth_signature.Auth_service_error (Auth_signature.E_GENERIC, user_friendly_error_msg)) let pbis_config (name:string) (value:string) = @@ -241,8 +211,8 @@ struct debug "parse %s: currkey=[%s] line=[%s]" debug_cmd currkey line; if List.length slices > 1 then begin - let key = String.trim (List.hd slices) in - let value = String.trim (List.nth slices 1) in + let key = string_trim (List.hd slices) in + let value = string_trim (List.nth slices 1) in debug "parse %s: key=[%s] value=[%s] currkey=[%s]" debug_cmd key value currkey; if String.length value > 0 then (acc @ [(key, value)], "") @@ -251,7 +221,7 @@ struct end else let key = currkey in - let value = String.trim line in + let value = string_trim line in debug "parse %s: key=[%s] value=[%s] currkey=[%s]" debug_cmd key value currkey; (acc @ [(key, value)], currkey) ) in @@ -353,7 +323,14 @@ struct And pbis_common will return subject_attrs as [("Number of groups found for user 'test@testdomain'", "2"), ("", line1), ("", line2) ... ("", lineN)] *) - extract_sid_from_group_list subject_attrs + List.map (fun (n,v)-> + let v = String.replace ")" "|" v in + let v = String.replace "sid =" "|" v in + let vs = String.split_f (fun c -> c = '|') v in + let sid = string_trim (List.nth vs 1) in + debug "pbis_get_group_sids_byname %s get sid=[%s]" _subject_name sid; + sid + ) (List.filter (fun (n,v)->n="") subject_attrs) let pbis_get_sid_bygid gid = diff --git a/ocaml/xapi/helpers.ml b/ocaml/xapi/helpers.ml index 348adf086a3..cd3cd2a9681 100644 --- a/ocaml/xapi/helpers.ml +++ b/ocaml/xapi/helpers.ml @@ -185,15 +185,6 @@ let update_pif_address ~__context ~self = with _ -> debug "Bridge %s is not up; not updating IP" bridge -let update_getty () = - (* Running update-issue service on best effort basis *) - try - ignore (Forkhelpers.execute_command_get_output !Xapi_globs.update_issue_script []); - ignore (Forkhelpers.execute_command_get_output !Xapi_globs.kill_process_script ["-q"; "-HUP"; "mingetty"; "agetty"]) - with e -> - debug "update_getty at %s caught exception: %s" - __LOC__ (Printexc.to_string e) - let set_gateway ~__context ~pif ~bridge = let dbg = Context.string_of_task __context in try diff --git a/ocaml/xapi/import.ml b/ocaml/xapi/import.ml index 65b0deacec9..50cafc5fdc4 100644 --- a/ocaml/xapi/import.ml +++ b/ocaml/xapi/import.ml @@ -1553,7 +1553,7 @@ let metadata_handler (req: Request.t) s _ = Tar_unix.Archive.skip s (Tar_unix.Header.length * 2); let header = header_of_xmlrpc metadata in - assert_compatible ~__context header.version; + assert_compatable ~__context header.version; if full_restore then assert_can_restore_backup ~__context rpc session_id header; with_error_handling (fun () -> @@ -1597,7 +1597,7 @@ let stream_import __context rpc session_id s content_length refresh_session conf else begin debug "importing new style VM"; let header = header_of_xmlrpc metadata in - assert_compatible ~__context header.version; + assert_compatable ~__context header.version; if config.full_restore then assert_can_restore_backup ~__context rpc session_id header; (* objects created here: *) diff --git a/ocaml/xapi/importexport.ml b/ocaml/xapi/importexport.ml index fcdecac4449..6d1076329f0 100644 --- a/ocaml/xapi/importexport.ml +++ b/ocaml/xapi/importexport.ml @@ -79,7 +79,7 @@ let this_version __context = (** Raises an exception if a prospective import cannot be handled by this code. This will get complicated over time... *) -let assert_compatible ~__context other_version = +let assert_compatable ~__context other_version = let this_version = this_version __context in let error() = error "Import version is incompatible"; @@ -257,31 +257,27 @@ let remote_metadata_export_import ~__context ~rpc ~session_id ~remote_address ~r ] ~keep_alive:false Http.Put remote_import_request in debug "Piping HTTP %s to %s" (Http.Request.to_string get) (Http.Request.to_string put); - begin try - with_transport (Unix Xapi_globs.unix_domain_socket) - (with_http get - (fun (r, ifd) -> - debug "Content-length: %s" (Stdext.Opt.default "None" (Stdext.Opt.map Int64.to_string r.Http.Response.content_length)); - let put = { put with Http.Request.content_length = r.Http.Response.content_length } in - debug "Connecting to %s:%d" remote_address !Xapi_globs.https_port; - (* Spawn a cached stunnel instance. Otherwise, once metadata tranmission completes, the connection - between local xapi and stunnel will be closed immediately, and the new spawned stunnel instance - will be revoked, this might cause the remote stunnel gets partial metadata xml file, and the - ripple effect is that remote xapi fails to parse metadata xml file. Using a cached stunnel can - not always avoid the problem since any cached stunnel entry might be evicted. However, it is - unlikely to happen in practice because the cache size is large enough.*) - with_transport (SSL (SSL.make ~use_stunnel_cache:true (), remote_address, !Xapi_globs.https_port)) - (with_http put - (fun (_, ofd) -> + with_transport (Unix Xapi_globs.unix_domain_socket) + (with_http get + (fun (r, ifd) -> + debug "Content-length: %s" (Stdext.Opt.default "None" (Stdext.Opt.map Int64.to_string r.Http.Response.content_length)); + let put = { put with Http.Request.content_length = r.Http.Response.content_length } in + debug "Connecting to %s:%d" remote_address !Xapi_globs.https_port; + (* Spawn a cached stunnel instance. Otherwise, once metadata tranmission completes, the connection + between local xapi and stunnel will be closed immediately, and the new spawned stunnel instance + will be revoked, this might cause the remote stunnel gets partial metadata xml file, and the + ripple effect is that remote xapi fails to parse metadata xml file. Using a cached stunnel can + not always avoid the problem since any cached stunnel entry might be evicted. However, it is + unlikely to happen in practice because the cache size is large enough.*) + with_transport (SSL (SSL.make ~use_stunnel_cache:true (), remote_address, !Xapi_globs.https_port)) + (with_http put + (fun (_, ofd) -> let (n: int64) = Stdext.Unixext.copy_file ?limit:r.Http.Response.content_length ifd ofd in debug "Written %Ld bytes" n - ) - ) - ) - ) - with Xmlrpc_client.Stunnel_connection_failed -> - raise (Api_errors.Server_error(Api_errors.tls_connection_failed, [remote_address; (string_of_int !Xapi_globs.https_port)])) - end; + ) + ) + ) + ); (* Wait for remote task to succeed or fail *) Cli_util.wait_for_task_completion rpc session_id remote_task; match Client.Task.get_status rpc session_id remote_task with diff --git a/ocaml/xapi/nm.ml b/ocaml/xapi/nm.ml index 9d95e946781..d02c4373ed1 100644 --- a/ocaml/xapi/nm.ml +++ b/ocaml/xapi/nm.ml @@ -462,17 +462,18 @@ let bring_pif_up ~__context ?(management_interface=false) (pif: API.ref_PIF) = let master = Db.Bond.get_master ~__context ~self:bond in Db.PIF.set_currently_attached ~__context ~self:master ~value:false end; - Xapi_mgmt_iface.on_dom0_networking_change ~__context - end; - (* sync MTU *) - try - let mtu = Int64.of_int (Net.Interface.get_mtu dbg ~name:bridge) in - if mtu <> rc.API.pIF_MTU then - Db.PIF.set_MTU ~__context ~self:pif ~value:mtu - with _ -> - warn "could not update MTU field on PIF %s" rc.API.pIF_uuid - ) + (* sync MTU *) + (try + let mtu = Int64.of_int (Net.Interface.get_mtu dbg ~name:bridge) in + Db.PIF.set_MTU ~__context ~self:pif ~value:mtu + with _ -> + debug "could not update MTU field on PIF %s" rc.API.pIF_uuid + ); + + Xapi_mgmt_iface.on_dom0_networking_change ~__context + end + ) let bring_pif_down ~__context ?(force=false) (pif: API.ref_PIF) = with_local_lock (fun () -> diff --git a/ocaml/xapi/record_util.ml b/ocaml/xapi/record_util.ml index d5db5868341..6c70ac0c180 100644 --- a/ocaml/xapi/record_util.ml +++ b/ocaml/xapi/record_util.ml @@ -375,6 +375,19 @@ let power_to_string h = | `ShuttingDown -> "shutting down" | `Migrating -> "migrating" +let string_to_vdi_type x = match (String.lowercase x) with + | "system" -> Some `system + | "user" -> Some `user + | "ephemeral" -> Some `ephemeral + | "suspend" -> Some `suspend + | "crashdump" -> Some `crashdump + | "ha statefile" -> Some `ha_statefile + | "metadata" -> Some `metadata + | "redo log" -> Some `redo_log + | "rrd" -> Some `rrd + | "pvs_cache" -> Some `pvs_cache + | _ -> None + let vdi_type_to_string t = match t with | `system -> "System" diff --git a/ocaml/xapi/sm-back-file b/ocaml/xapi/sm-back-file new file mode 100755 index 00000000000..3dc87f304c8 --- /dev/null +++ b/ocaml/xapi/sm-back-file @@ -0,0 +1,137 @@ +#!/bin/sh +# Copyright (c) 2006 XenSource Inc. +# Author: Vincent Hanquez +# +# storage manager backend: qcow operations +# + +check_arg_ge() { + if [ "$1" -lt "$2" ]; then exit 3; fi; +} + +check_arg_eq() { + if [ "$1" -ne "$2" ]; then exit 3; fi; +} + +sr_create() { + sruuid=$1 + shift +} + +sr_delete() { + sruuid=$1 + exit 2 +} + +sr_attach() { + sruuid=$1 + mkdir -p "/SR-${sruuid}" + mkdir -p "/SR-${sruuid}/images" +} + +sr_detach() { + sruuid=$1 + rm -rf "/SR-${sruuid}" +} + +vdi_create() { + sruuid=$1 + vdiuuid=$2 + size="$3k" + srname="/var/xensource/SR-${sruuid}" + vdiname="vdi-${vdiuuid}" + + # FIXME: count is wrong here + dd if=/dev/zero of=${srname}/${vdiname} count=${size} +} + +vdi_delete() { + sruuid=$1 + vdiuuid=$2 + srname="/var/xensource/SR-${sruuid}" + vdiname="vdi-${vdiuuid}" + rm ${srname}/${vdiname} +} + +vdi_attach() { + sruuid=$1 + vdiuuid=$2 + srname="/var/xensource/SR-${sruuid}" + vdiname="vdi-${vdiuuid}" + + ln -f -s "${srname}/${vdiname}" "/SR-${sruuid}/images/${vdiuuid}" +} + +vdi_detach() { + sruuid=$1 + vdiuuid=$2 + + rm -f "/SR-${sruuid}/images/${vdiuuid}" +} + +vdi_clone() { + sruuid=$1 + vdiuuid=$2 + dvdiuuid=$3 + srname="/var/xensource/SR-${sruuid}" + vdiname="vdi-${vdiuuid}" + dvdiname="vdi-${dvdiuuid}" + + cp "${srname}/${vdiname}" "${srname}/${dvdiname}" +} + +vdi_resize() { + sruuid=$1 + vdiuuid=$2 + newsize=$3 + + exit 0 +} + +cmd=$1 +shift +case "$cmd" in +sr_create) + check_arg_ge $# 2 + sr_create $* + ;; +sr_delete) + check_arg_eq $# 1 + sr_delete $* + ;; +sr_attach) + check_arg_eq $# 1 + sr_attach $* + ;; +sr_detach) + check_arg_eq $# 1 + sr_detach $* + ;; +vdi_create) + check_arg_eq $# 3 + vdi_create $* + ;; +vdi_delete) + check_arg_eq $# 2 + vdi_delete $* + ;; +vdi_attach) + check_arg_eq $# 2 + vdi_attach $* + ;; +vdi_detach) + check_arg_eq $# 2 + vdi_detach $* + ;; +vdi_clone) + check_arg_eq $# 3 + vdi_clone $* + ;; +vdi_resize) + check_arg_eq $# 3 + vdi_resize $* + ;; +*) + exit 1 +esac +exit $? diff --git a/ocaml/xapi/sm-back-lvm b/ocaml/xapi/sm-back-lvm new file mode 100644 index 00000000000..f99ab42e7be --- /dev/null +++ b/ocaml/xapi/sm-back-lvm @@ -0,0 +1,144 @@ +#!/bin/sh +# Copyright (c) 2006 XenSource Inc. +# Author: Vincent Hanquez +# +# storage manager example backend: lvm operations +# + +check_arg_ge() { + if [ "$1" -lt "$2" ]; then exit 3; fi; +} + +check_arg_eq() { + if [ "$1" -ne "$2" ]; then exit 3; fi; +} + +sr_create() { + sruuid=$1 + vgname="VG_XenStorage-${sruuid}" + shift + + vgcreate ${vgname} $* + vgs --separator : --noheadings --units k ${vgname} | cut -f 5,6 -d: | \ + sed -e 's/:/ /' +} + +sr_delete() { + sruuid=$1 + exit 2 +} + +sr_attach() { + sruuid=$1 + mkdir -p "/SR-${sruuid}" + mkdir -p "/SR-${sruuid}/images" +} + +sr_detach() { + sruuid=$1 + rm -rf "/SR-${sruuid}" +} + +vdi_create() { + sruuid=$1 + vdiuuid=$2 + size="$3k" + vgname="VG_XenStorage-${sruuid}" + vdiname="LV-${vdiuuid}" + lvcreate -L${size} -n"${vdiname}" ${vgname} +} + +vdi_delete() { + sruuid=$1 + vdiuuid=$2 + vgname="VG_XenStorage-${sruuid}" + vdiname="LV-${vdiuuid}" + lvremove -f "/dev/${vgname}/${vdiname}" +} + +vdi_attach() { + sruuid=$1 + vdiuuid=$2 + + ln -f -s "/dev/VG_XenStorage-${sruuid}/LV-${vdiuuid}" \ + "/SR-${sruuid}/images/${vdiuuid}" +} + +vdi_detach() { + sruuid=$1 + vdiuuid=$2 + + rm -f "/SR-${sruuid}/images/${vdiuuid}" +} + +vdi_clone() { + sruuid=$1 + vdiuuid=$2 + dvdiuuid=$3 + vgname="VG_XenStorage-${sruuid}" + + size=$(lvs --separator : --noheadings --units k "${vgname}/LV-${vdiuuid}" \ + | cut -d: -f 3) + lvcreate -L${size} -n"LV-${dvdiuuid}" ${vgname} + if [ $? -ne 0 ]; then exit $?; fi + + dd if="/dev/${vgname}/LV-${vdiuuid}" of="/dev/${vgname}/LV-${dvdiuuid}" + if [ $? -ne 0 ]; then exit $?; fi +} + +vdi_resize() { + sruuid=$1 + vdiuuid=$2 + newsize=$3 + vgname="VG_XenStorage-${sruuid}" + + lvresize -L${newsize} "${vgname}/LV-${vdiuuid}" +} + +cmd=$1 +shift +case "$cmd" in +sr_create) + check_arg_ge $# 2 + sr_create $* + ;; +sr_delete) + check_arg_eq $# 1 + sr_delete $* + ;; +sr_attach) + check_arg_eq $# 1 + sr_attach $* + ;; +sr_detach) + check_arg_eq $# 1 + sr_detach $* + ;; +vdi_create) + check_arg_eq $# 3 + vdi_create $* + ;; +vdi_delete) + check_arg_eq $# 2 + vdi_delete $* + ;; +vdi_attach) + check_arg_eq $# 2 + vdi_attach $* + ;; +vdi_detach) + check_arg_eq $# 2 + vdi_detach $* + ;; +vdi_clone) + check_arg_eq $# 3 + vdi_clone $* + ;; +vdi_resize) + check_arg_eq $# 3 + vdi_resize $* + ;; +*) + exit 1 +esac +exit $? diff --git a/ocaml/xapi/smint.ml b/ocaml/xapi/smint.ml index 86d09e839fa..97e94f205db 100644 --- a/ocaml/xapi/smint.ml +++ b/ocaml/xapi/smint.ml @@ -45,6 +45,18 @@ type capability = type feature = capability * int64 +let all_capabilites = + [ Sr_create; Sr_delete; Sr_attach; Sr_detach; Sr_scan; Sr_probe; Sr_update; + Sr_supports_local_caching; + Sr_metadata; + Sr_trim; + Sr_stats; + Vdi_create; Vdi_delete; Vdi_attach; Vdi_detach; + Vdi_clone; Vdi_resize; Vdi_activate; Vdi_deactivate; + Vdi_update; Vdi_introduce; + Vdi_resize_online + ] + let string_to_capability_table = [ "SR_PROBE", Sr_probe; "SR_UPDATE", Sr_update; diff --git a/ocaml/xapi/storage_access.ml b/ocaml/xapi/storage_access.ml index 6492f177191..13ee8fe11e9 100644 --- a/ocaml/xapi/storage_access.ml +++ b/ocaml/xapi/storage_access.ml @@ -179,7 +179,7 @@ module SMAPIv1 = struct String.sub queue (i + 1) (String.length queue -i - 1) with Not_found -> queue in - Server_helpers.exec_with_new_task "SR.probe" ~subtask_of:(Ref.of_string dbg) + Server_helpers.exec_with_new_task "SR.create" ~subtask_of:(Ref.of_string dbg) (fun __context -> let task = Context.get_task_id __context in Storage_interface.Raw (Sm.sr_probe (Some task,(Sm.sm_master true :: device_config)) _type sm_config) @@ -779,7 +779,7 @@ module SMAPIv1 = struct (fun __context -> (* This call 'operates' on vdi2 *) let vdi1 = find_vdi ~__context sr vdi1 |> fst in - for_vdi ~dbg ~sr ~vdi:vdi2 "VDI.compose" + for_vdi ~dbg ~sr ~vdi:vdi2 "VDI.activate" (fun device_config _type sr self -> Sm.vdi_compose device_config _type sr vdi1 self ) @@ -811,7 +811,7 @@ module SMAPIv1 = struct info "VDI.get_url dbg:%s sr:%s vdi:%s" dbg sr vdi; (* XXX: PR-1255: tapdisk shouldn't hardcode xapi urls *) (* peer_ip/session_ref/vdi_ref *) - Server_helpers.exec_with_new_task "VDI.get_url" ~subtask_of:(Ref.of_string dbg) + Server_helpers.exec_with_new_task "VDI.compose" ~subtask_of:(Ref.of_string dbg) (fun __context -> let ip = Helpers.get_management_ip_addr ~__context |> Opt.unbox in let rpc = Helpers.make_rpc ~__context in diff --git a/ocaml/xapi/storage_proxy.ml b/ocaml/xapi/storage_proxy.ml new file mode 100644 index 00000000000..6121f83f7e3 --- /dev/null +++ b/ocaml/xapi/storage_proxy.ml @@ -0,0 +1,112 @@ +(* + * Copyright (C) 2011 Citrix Systems Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; version 2.1 only. with the special + * exception on linking described in file LICENSE. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + *) + +(* This file should be auto-generated from storage_interface. + Corrollary: don't add anything which can't be auto-generated from storage_interface! *) + +open Storage_interface + +module type RPC = sig + val rpc : Rpc.call -> Rpc.response +end + +module Proxy = functor(RPC: RPC) -> struct + type context = Smint.request + + module Client = Client(RPC) + + module Query = struct + let query _ = Client.Query.query + let diagnostics _ = Client.Query.diagnostics + end + module DP = struct + let create _ = Client.DP.create + let destroy _ = Client.DP.destroy + let diagnostics _ = Client.DP.diagnostics + let attach_info _ = Client.DP.attach_info + let stat_vdi _ = Client.DP.stat_vdi + end + module SR = struct + let probe _ = Client.SR.probe + let create _ = Client.SR.create + let set_name_label _ = Client.SR.set_name_label + let set_name_description _ = Client.SR.set_name_description + let attach _ = Client.SR.attach + let detach _ = Client.SR.detach + let reset _ = Client.SR.reset + let destroy _ = Client.SR.destroy + let scan _ = Client.SR.scan + let stat _ = Client.SR.stat + let list _ = Client.SR.list + let update_snapshot_info_src _ = Client.SR.update_snapshot_info_src + let update_snapshot_info_dest _ = Client.SR.update_snapshot_info_dest + end + module VDI = struct + let epoch_begin _ = Client.VDI.epoch_begin + let attach _ = Client.VDI.attach + let activate _ = Client.VDI.activate + let deactivate _ = Client.VDI.deactivate + let detach _ = Client.VDI.detach + let epoch_end _ = Client.VDI.epoch_end + + let create _ = Client.VDI.create + let set_name_label _ = Client.VDI.set_name_label + let set_name_description _ = Client.VDI.set_name_description + let snapshot _ = Client.VDI.snapshot + let clone _ = Client.VDI.clone + let destroy _ = Client.VDI.destroy + let resize _ = Client.VDI.resize + let stat _ = Client.VDI.stat + let introduce _ = Client.VDI.introduce + let set_persistent _ = Client.VDI.set_persistent + let get_by_name _ = Client.VDI.get_by_name + let set_content_id _ = Client.VDI.set_content_id + let similar_content _ = Client.VDI.similar_content + let compose _ = Client.VDI.compose + let add_to_sm_config _ = Client.VDI.add_to_sm_config + let remove_from_sm_config _ = Client.VDI.remove_from_sm_config + let get_url _ = Client.VDI.get_url + end + + let get_by_name _ = Client.get_by_name + + module Policy = struct + let get_backend_vm _ = Client.Policy.get_backend_vm + end + + module DATA = struct + let copy_into _ = Client.DATA.copy_into + let copy _ = Client.DATA.copy + module MIRROR = struct + let start _ = Client.DATA.MIRROR.start + let stop _ = Client.DATA.MIRROR.stop + let stat _ = Client.DATA.MIRROR.stat + let receive_start _ = Client.DATA.MIRROR.receive_start + let receive_finalize _ = Client.DATA.MIRROR.receive_finalize + let receive_cancel _ = Client.DATA.MIRROR.receive_cancel + let list _ = Client.DATA.MIRROR.list + end + end + + module TASK = struct + let stat _ = Client.TASK.stat + let cancel _ = Client.TASK.cancel + let destroy _ = Client.TASK.destroy + let list _ = Client.TASK.list + end + + module UPDATES = struct + let get _ = Client.UPDATES.get + end +end diff --git a/ocaml/xapi/suite.ml b/ocaml/xapi/suite.ml index aa819a4510d..0bfe8cb2aa8 100644 --- a/ocaml/xapi/suite.ml +++ b/ocaml/xapi/suite.ml @@ -59,8 +59,6 @@ let base_suite = Test_pvs_cache_storage.test; Test_sdn_controller.test; Test_event.test; - Test_extauth_plugin_ADpbis.test; - Test_guest_agent.test; ] let handlers = [ diff --git a/ocaml/xapi/test_db_lowlevel.ml b/ocaml/xapi/test_db_lowlevel.ml index c37bbecc49f..25e7592343e 100644 --- a/ocaml/xapi/test_db_lowlevel.ml +++ b/ocaml/xapi/test_db_lowlevel.ml @@ -41,38 +41,8 @@ let test_db_get_all_records_race () = let tear_down () = Db_cache_impl.fist_delay_read_records_where := false -let test_idempotent_map () = - Db_globs.idempotent_map := false; - let __context = make_test_database () in - let (vm_ref: API.ref_VM) = make_vm ~__context () in - Db.VM.add_to_other_config ~__context ~self:vm_ref ~key:"test" ~value:"value"; - assert_raises (Db_exn.Duplicate_key ("VM","other_config",(Ref.string_of vm_ref),"test")) - (fun () -> Db.VM.add_to_other_config ~__context ~self:vm_ref ~key:"test" ~value:"value"); - assert_raises (Db_exn.Duplicate_key ("VM","other_config",(Ref.string_of vm_ref),"test")) - (fun () -> Db.VM.add_to_other_config ~__context ~self:vm_ref ~key:"test" ~value:"value2"); - - Db_globs.idempotent_map := true; - let __context = make_test_database () in - let (vm_ref: API.ref_VM) = make_vm ~__context () in - Db.VM.add_to_other_config ~__context ~self:vm_ref ~key:"test" ~value:"value"; - assert_equal (Db.VM.add_to_other_config ~__context ~self:vm_ref ~key:"test" ~value:"value") (); - assert_raises (Db_exn.Duplicate_key ("VM","other_config",(Ref.string_of vm_ref),"test")) - (fun () -> Db.VM.add_to_other_config ~__context ~self:vm_ref ~key:"test" ~value:"value2"); - - Db_globs.idempotent_map := false - -let test_slave_uses_nonlegacy_addmap () = - let operation = Db_cache_types.AddMapLegacy in - let operation' = Db_rpc_common_v1.marshall_structured_op operation |> Db_rpc_common_v1.unmarshall_structured_op in - assert_equal operation' Db_cache_types.AddMap; - let operationv2 = Db_rpc_common_v2.Request.Process_structured_field (("",""),"","","",Db_cache_types.AddMapLegacy) in - let operationv2' = Db_rpc_common_v2.Request.(operationv2 |> rpc_of_t |> t_of_rpc) in - assert_equal operationv2' (Db_rpc_common_v2.Request.Process_structured_field (("",""),"","","",Db_cache_types.AddMap)) - let test = "test_db_lowlevel" >::: [ "test_db_get_all_records_race" >:: (bracket id test_db_get_all_records_race tear_down); - "test_db_idempotent_map" >:: test_idempotent_map; - "test_slaves_use_nonlegacy_addmap" >:: test_slave_uses_nonlegacy_addmap; ] diff --git a/ocaml/xapi/test_extauth_plugin_ADpbis.ml b/ocaml/xapi/test_extauth_plugin_ADpbis.ml deleted file mode 100644 index 964dd1d1dde..00000000000 --- a/ocaml/xapi/test_extauth_plugin_ADpbis.ml +++ /dev/null @@ -1,99 +0,0 @@ -(* - * Copyright (C) Citrix Systems Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as published - * by the Free Software Foundation; version 2.1 only. with the special - * exception on linking described in file LICENSE. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Lesser General Public License for more details. - *) - -open OUnit -open Test_highlevel - -module PbisAuthErrorsCatch = Generic.Make(struct - module Io = struct - type input_t = string list - type output_t = Auth_signature.auth_service_error_tag - - let string_of_input_t = Test_printers.(list string) - let string_of_output_t output = - match output with - | Auth_signature.E_GENERIC -> "E_GENERIC" - | Auth_signature.E_LOOKUP -> "E_LOOKUP" - | Auth_signature.E_DENIED -> "E_DENIED" - | Auth_signature.E_CREDENTIALS -> "E_CREDENTIALS" - | Auth_signature.E_UNAVAILABLE -> "E_UNAVAILABLE" - | Auth_signature.E_INVALID_OU -> "E_INVALID_OU" - | Auth_signature.E_INVALID_ACCOUNT -> "E_INVALID_ACCOUNT" - - end - - let transform = Extauth_plugin_ADpbis.match_error_tag - - let tests = [ - [], Auth_signature.E_GENERIC; - [""; ""], Auth_signature.E_GENERIC; - [""; "some words"], Auth_signature.E_GENERIC; - [""; "DNS_ERROR_BAD_PACKET"], Auth_signature.E_LOOKUP; - [""; "LW_ERROR_PASSWORD_MISMATCH"], Auth_signature.E_CREDENTIALS; - [""; "LW_ERROR_INVALID_ACCOUNT"], Auth_signature.E_INVALID_ACCOUNT; - [""; "LW_ERROR_ACCESS_DENIED"], Auth_signature.E_DENIED; - [""; "LW_ERROR_DOMAIN_IS_OFFLINE"], Auth_signature.E_UNAVAILABLE; - [""; "LW_ERROR_INVALID_OU"], Auth_signature.E_INVALID_OU; - - [""; "prefixDNS_ERROR_BAD_PACKETsuffix"], Auth_signature.E_GENERIC; - [""; "prefix_DNS_ERROR_BAD_PACKET_suffix"], Auth_signature.E_GENERIC; - [""; "prefix(DNS_ERROR_BAD_PACKET)suffix"], Auth_signature.E_LOOKUP; - [""; "prefix.DNS_ERROR_BAD_PACKET.suffix"], Auth_signature.E_LOOKUP; - [""; "prefix DNS_ERROR_BAD_PACKET suffix"], Auth_signature.E_LOOKUP; - [""; "prefix\tDNS_ERROR_BAD_PACKET\tsuffix"], Auth_signature.E_LOOKUP; - ] - end) - -module PbisExtractSid = Generic.Make(struct - module Io = struct - type input_t = (string * string) list - type output_t = string list - - let string_of_input_t = Test_printers.(list (pair string string)) - let string_of_output_t = Test_printers.(list string) - end - - let transform = Extauth_plugin_ADpbis.extract_sid_from_group_list - - let tests = [ - [(" ", " ")], []; - - [("Exception","Remote connection shutdown!")], []; - - [("Number of groups found for user 'testAD@BLE'", "0"); - ("Error", "No record found")], - []; - - [("Number of groups found for user 'admin@NVC'", "1"); - ("", "Group[1 of 1] name = NVC\\testg(ab) (gid = 564135020, sid = S-1-5-21-1171552557-368733809-2946345504-1132)")], - ["S-1-5-21-1171552557-368733809-2946345504-1132"]; - - [("Number of groups found for user 'cnk3@UN'", "1"); - ("", "Group[1 of 1] name = UN\\KnmOJ (gid = 492513842, sid = S-1-5-31-5921451325-154521381-3135732118-4527)")], - ["S-1-5-31-5921451325-154521381-3135732118-4527"]; - - [("Number of groups found for user 'test@testdomain'", "2"); - ("", "Group[1 of 2] name = testdomain\\dnsadmins (gid = 580912206, sid = S-1-5-21-791009147-1041474540-2433379237-1102)"); - ("", "Group[2 of 2] name = testdomain\\domain+users (gid = 580911617, sid = S-1-5-21-791009147-1041474540-2433379237-513)")], - ["S-1-5-21-791009147-1041474540-2433379237-1102"; "S-1-5-21-791009147-1041474540-2433379237-513"]; - ] - end) - -let test = - "test_extauth_ADpbis" >::: - [ - "test_pbis_auth_errors_catch" >::: PbisAuthErrorsCatch.tests; - "test_pbis_extract_sid" >::: PbisExtractSid.tests; - ] - diff --git a/ocaml/xapi/test_gpu_group.ml b/ocaml/xapi/test_gpu_group.ml index 605ff23abc9..ad47471837a 100644 --- a/ocaml/xapi/test_gpu_group.ml +++ b/ocaml/xapi/test_gpu_group.ml @@ -55,7 +55,7 @@ let test_supported_enabled_types () = (* Invalidate the PGPU's host ref, and run a GC pass; this should destroy the * pgpu, and clear the group's supported and enabled types. *) Db.PGPU.set_host ~__context ~self:pgpu ~value:Ref.null; - Db_gc_util.gc_PGPUs ~__context; + Db_gc.gc_PGPUs ~__context; let group_supported_types = Db.GPU_group.get_supported_VGPU_types ~__context ~self:gPU_group in diff --git a/ocaml/xapi/test_guest_agent.ml b/ocaml/xapi/test_guest_agent.ml deleted file mode 100644 index 0d17a6452b8..00000000000 --- a/ocaml/xapi/test_guest_agent.ml +++ /dev/null @@ -1,371 +0,0 @@ -(* - * Copyright (C) Citrix Systems Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as published - * by the Free Software Foundation; version 2.1 only. with the special - * exception on linking described in file LICENSE. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Lesser General Public License for more details. - *) - -open OUnit -open Test_highlevel - -module Networks = Generic.Make (struct - module Io = struct - type input_t = string list - type output_t = (string * string) list - - let string_of_input_t = Test_printers.(list string) - let string_of_output_t = Test_printers.(assoc_list string string) - end - - type 'a tree = T of 'a * 'a tree list - - let rec add_path_to_tree (T(root, children)) = function - | [] -> (T(root, children)) - | node :: rest_of_path -> - try - let T(_, children_of_node) = List.find (fun (T(n, _)) -> n = node) children in - let t = add_path_to_tree (T(node, children_of_node)) rest_of_path in - T(root, t :: (List.filter (fun (T(n, _)) -> n <> node) children)) - with Not_found -> - T(root, (add_path_to_tree (T(node, [])) rest_of_path) :: children) - - let construct_tree tree path = - let open Stdext.Xstringext in - let nodes = String.split_f (fun s -> s = '/') path in - add_path_to_tree tree nodes - - let rec list_helper children = function - | [] -> List.map (fun (T(node, _)) -> node) children - | node :: rest_of_path -> - try - let T(_, children_of_node) = List.find (fun (T(n, _)) -> n = node) children in - list_helper children_of_node rest_of_path - with Not_found -> [] - - let list (T(root, children)) path = - let open Stdext.Xstringext in - let nodes = String.split_f (fun s -> s = '/') path in - list_helper children nodes - - - let transform input = - let tree = List.fold_left construct_tree (T("", [])) input in - Xapi_guest_agent.networks "attr" (list tree) - - let tests = [ - (* basic cases *) - [ "attr/vif/0/ipv6/0"; - ], [ "attr/vif/0/ipv6/0", "0/ipv6/0"; - ]; - - [ "attr/vif/0/ipv4/0"; - ], [ "attr/vif/0/ipv4/0", "0/ip"; - "attr/vif/0/ipv4/0", "0/ipv4/0"; - ]; - - [ "attr/eth0/ip"; - ], [ "attr/eth0/ip", "0/ip"; - "attr/eth0/ip", "0/ipv4/0"; - ]; - - [ "attr/eth0/ipv6/0/addr"; - ], [ "attr/eth0/ip", "0/ip"; - "attr/eth0/ip", "0/ipv4/0"; - "attr/eth0/ipv6/0/addr", "0/ipv6/0"; - ]; - - - (* index *) - [ "attr/vif/1/ipv6/2"; - ], [ "attr/vif/1/ipv6/2", "1/ipv6/2"; - ]; - - [ "attr/vif/1/ipv4/2"; - ], [ "attr/vif/1/ipv4/2", "1/ip"; - "attr/vif/1/ipv4/2", "1/ipv4/2"; - ]; - - [ "attr/eth1/ip"; - ], [ "attr/eth1/ip", "1/ip"; - "attr/eth1/ip", "1/ipv4/0"; - ]; - - [ "attr/eth1/ipv6/2/addr"; - ], [ "attr/eth1/ip", "1/ip"; - "attr/eth1/ip", "1/ipv4/0"; - "attr/eth1/ipv6/2/addr", "1/ipv6/2"; - ]; - - (* multiple ip addrs *) - [ "attr/vif/0/ipv6/0"; - "attr/vif/0/ipv6/1"; - ], [ "attr/vif/0/ipv6/1", "0/ipv6/1"; - "attr/vif/0/ipv6/0", "0/ipv6/0"; - ]; - - [ "attr/vif/0/ipv4/0"; - "attr/vif/0/ipv4/1"; - ], [ "attr/vif/0/ipv4/1", "0/ipv4/1"; - "attr/vif/0/ipv4/0", "0/ip"; - "attr/vif/0/ipv4/0", "0/ipv4/0"; - ]; - - [ "attr/eth0/ip"; - "attr/eth0/ipv6/0/addr"; - ], [ "attr/eth0/ip", "0/ip"; - "attr/eth0/ip", "0/ipv4/0"; - "attr/eth0/ipv6/0/addr", "0/ipv6/0"; - ]; - - [ "attr/vif/0/ipv4/0"; - "attr/vif/0/ipv6/0"; - ], [ "attr/vif/0/ipv4/0", "0/ip"; - "attr/vif/0/ipv4/0", "0/ipv4/0"; - "attr/vif/0/ipv6/0", "0/ipv6/0"; - ]; - - [ "attr/eth0/ip"; - "attr/vif/0/ipv4/0"; - "attr/eth0/ipv6/0/addr"; - "attr/vif/0/ipv6/0"; - ], [ "attr/vif/0/ipv4/0", "0/ip"; - "attr/vif/0/ipv4/0", "0/ipv4/0"; - "attr/vif/0/ipv6/0", "0/ipv6/0"; - ]; - - (* multiple vifs and multiple ip addrs *) - [ "attr/vif/0/ipv6/0"; - "attr/vif/0/ipv6/1"; - "attr/vif/1/ipv6/0"; - "attr/vif/1/ipv6/1"; - ], [ "attr/vif/0/ipv6/1", "0/ipv6/1"; - "attr/vif/0/ipv6/0", "0/ipv6/0"; - "attr/vif/1/ipv6/1", "1/ipv6/1"; - "attr/vif/1/ipv6/0", "1/ipv6/0"; - ]; - - [ "attr/vif/0/ipv4/0"; - "attr/vif/0/ipv4/1"; - "attr/vif/1/ipv4/0"; - "attr/vif/1/ipv4/1"; - ], [ "attr/vif/0/ipv4/1", "0/ipv4/1"; - "attr/vif/0/ipv4/0", "0/ip"; - "attr/vif/0/ipv4/0", "0/ipv4/0"; - "attr/vif/1/ipv4/1", "1/ipv4/1"; - "attr/vif/1/ipv4/0", "1/ip"; - "attr/vif/1/ipv4/0", "1/ipv4/0"; - ]; - - (* exceptions *) - [ "attr/vif/0/ipv4/a"; - "attr/vif/0/ipv4/1"; - ], []; - ] - end) - -module Initial_guest_metrics = Generic.Make (struct - module Io = struct - type input_t = (string * string) list - type output_t = (string * string) list - - let string_of_input_t = Test_printers.(assoc_list string string) - let string_of_output_t = Test_printers.(assoc_list string string) - end - - type 'a mtree = - | Lf of 'a * 'a - | Mt of 'a * 'a mtree list - - let has_name name = function - | Lf (n, _) -> n = name - | Mt (n, _) -> n = name - - let get_name = function - | Lf (n, _) -> n - | Mt (n, _) -> n - - let rec add_leaf_to_mtree paths leaf_value = function - | Lf _ -> raise (Failure "Can't add a leaf on a leaf") - | Mt (root, children) -> - (match paths with - | [] -> - (match children with - | [] -> Lf(root, leaf_value) - | _ -> raise (Failure "Can't add a leaf on a tree node")) - | node :: rest_paths -> - try - let t = List.find (has_name node) children in - (match t with - | Lf (_, _) -> raise (Failure "Can't overwrite an existing leaf") - | Mt (node, children_of_node) -> - let mt = add_leaf_to_mtree rest_paths leaf_value (Mt(node, children_of_node)) in - Mt(root, mt :: (List.filter (fun n -> not (has_name node n)) children))) - with Not_found -> - Mt(root, (add_leaf_to_mtree rest_paths leaf_value (Mt(node, []))) :: children)) - - let construct_mtree mtree (path, leaf_value) = - let open Stdext.Xstringext in - let nodes = String.split_f (fun s -> s = '/') path in - add_leaf_to_mtree nodes leaf_value mtree - - let rec list_helper children = function - | [] -> List.map get_name children - | node :: rest_paths -> - try - match List.find (has_name node) children with - | Lf (_, _) -> [] - | Mt (_, children_of_node) -> list_helper children_of_node rest_paths - with Not_found -> [] - - let list mtree path = - match mtree with - | Lf (_, _) -> [] - | Mt (_, children) -> - let open Stdext.Xstringext in - let paths = String.split_f (fun s -> s = '/') path in - list_helper children paths - - let rec lookup_helper mtree = function - | [] -> - (match mtree with - | Lf (_, v) -> Some v - | Mt (_, _) -> None) - | node :: rest_paths -> - (match mtree with - | Lf (l, v) -> lookup_helper (Lf(l, v)) rest_paths - | Mt (_, children) -> - try - lookup_helper (List.find (has_name node) children) rest_paths - with Not_found -> None) - - let lookup mtree path = - let open Stdext.Xstringext in - let paths = String.split_f (fun s -> s = '/') path in - lookup_helper mtree paths - - - let transform input = - let tree = List.fold_left construct_mtree (Mt("", [])) input in - let guest_metrics = Xapi_guest_agent.get_initial_guest_metrics (lookup tree) (list tree) in - guest_metrics.Xapi_guest_agent.networks - - - let tests = [ - (* basic cases *) - [ "attr/vif/0/ipv6/0", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; - ], [ "0/ipv6/0", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; - ]; - - [ "attr/vif/0/ipv4/0", "192.168.0.1"; - ], [ "0/ip", "192.168.0.1"; - "0/ipv4/0", "192.168.0.1"; - ]; - - [ "attr/eth0/ip", "192.168.0.1"; - ], [ "0/ip", "192.168.0.1"; - "0/ipv4/0", "192.168.0.1"; - ]; - - [ "attr/eth0/ipv6/0/addr", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; - ], [ "0/ipv6/0", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; - ]; - - (* index *) - [ "attr/vif/1/ipv6/2", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; - ], [ "1/ipv6/2", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; - ]; - - [ "attr/vif/1/ipv4/2", "192.168.0.1"; - ], [ "1/ip", "192.168.0.1"; - "1/ipv4/2", "192.168.0.1"; - ]; - - [ "attr/eth1/ip", "192.168.0.1"; - ], [ "1/ip", "192.168.0.1"; - "1/ipv4/0", "192.168.0.1"; - ]; - - [ "attr/eth1/ipv6/2/addr", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; - ], [ "1/ipv6/2", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; - ]; - - (* multiple ip addrs *) - [ "attr/vif/0/ipv6/0", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; - "attr/vif/0/ipv6/1", "fe80:0000:0000:0000:7870:94ff:fe52:dd07"; - ], [ "0/ipv6/1", "fe80:0000:0000:0000:7870:94ff:fe52:dd07"; - "0/ipv6/0", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; - ]; - - [ "attr/vif/0/ipv4/0", "192.168.0.1"; - "attr/vif/0/ipv4/1", "192.168.1.1"; - ], [ "0/ipv4/1", "192.168.1.1"; - "0/ip", "192.168.0.1"; - "0/ipv4/0", "192.168.0.1"; - ]; - - [ "attr/eth0/ip", "192.168.0.1"; - "attr/eth0/ipv6/0/addr", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; - ], [ "0/ip", "192.168.0.1"; - "0/ipv4/0", "192.168.0.1"; - "0/ipv6/0", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; - ]; - - [ "attr/vif/0/ipv4/0", "192.168.0.1"; - "attr/vif/0/ipv6/0", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; - ], [ "0/ip", "192.168.0.1"; - "0/ipv4/0", "192.168.0.1"; - "0/ipv6/0", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; - ]; - - [ "attr/eth0/ip", "192.168.0.1"; - "attr/vif/0/ipv4/0", "192.168.0.1"; - "attr/eth0/ipv6/0/addr", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; - "attr/vif/0/ipv6/0", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; - ], [ "0/ip", "192.168.0.1"; - "0/ipv4/0", "192.168.0.1"; - "0/ipv6/0", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; - ]; - - (* multiple vifs and multiple ip addrs *) - [ "attr/vif/0/ipv6/0", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; - "attr/vif/0/ipv6/1", "fe80:0000:0000:0000:7870:94ff:fe52:dd07"; - "attr/vif/1/ipv6/0", "fe80:0000:0000:0000:7870:94ff:fe52:dd16"; - "attr/vif/1/ipv6/1", "fe80:0000:0000:0000:7870:94ff:fe52:dd17"; - ], [ "0/ipv6/1", "fe80:0000:0000:0000:7870:94ff:fe52:dd07"; - "0/ipv6/0", "fe80:0000:0000:0000:7870:94ff:fe52:dd06"; - "1/ipv6/1", "fe80:0000:0000:0000:7870:94ff:fe52:dd17"; - "1/ipv6/0", "fe80:0000:0000:0000:7870:94ff:fe52:dd16"; - ]; - - [ "attr/vif/0/ipv4/0", "192.168.0.1"; - "attr/vif/0/ipv4/1", "192.168.0.2"; - "attr/vif/1/ipv4/0", "192.168.1.1"; - "attr/vif/1/ipv4/1", "192.168.1.2"; - ], [ "0/ipv4/1", "192.168.0.2"; - "0/ip", "192.168.0.1"; - "0/ipv4/0", "192.168.0.1"; - "1/ipv4/1", "192.168.1.2"; - "1/ip", "192.168.1.1"; - "1/ipv4/0", "192.168.1.1"; - ]; - - (* exceptions *) - [ "attr/vif/0/ipv4/a", "192.168.0.1"; - "attr/vif/0/ipv4/1", "192.168.0.1"; - ], []; - ] - end) - -let test = - "test_guest_agent" >::: - [ - "test_networks" >::: Networks.tests; - "test_get_initial_guest_metrics" >::: Initial_guest_metrics.tests; - ] diff --git a/ocaml/xapi/test_pvs_proxy.ml b/ocaml/xapi/test_pvs_proxy.ml index 0c10158d0df..0040337753b 100644 --- a/ocaml/xapi/test_pvs_proxy.ml +++ b/ocaml/xapi/test_pvs_proxy.ml @@ -72,18 +72,18 @@ let test_gc_proxy () = let site = make_pvs_site ~__context () in let vIF = make_vif ~__context ~device:"0" () in let proxy = Xapi_pvs_proxy.create ~__context ~site ~vIF in - ( Db_gc_util.gc_PVS_proxies ~__context + ( Db_gc.gc_PVS_proxies ~__context ; assert_equal (Db.PVS_proxy.get_site ~__context ~self:proxy) site ; assert_equal (Db.PVS_proxy.get_VIF ~__context ~self:proxy) vIF ; Db.PVS_proxy.set_site ~__context ~self:proxy ~value:Ref.null - ; Db_gc_util.gc_PVS_proxies ~__context (* should collect the proxy *) + ; Db_gc.gc_PVS_proxies ~__context (* should collect the proxy *) ; assert_equal false (Db.is_valid_ref __context proxy)); let proxy = Xapi_pvs_proxy.create ~__context ~site ~vIF in - ( Db_gc_util.gc_PVS_proxies ~__context + ( Db_gc.gc_PVS_proxies ~__context ; assert_equal (Db.PVS_proxy.get_site ~__context ~self:proxy) site ; assert_equal (Db.PVS_proxy.get_VIF ~__context ~self:proxy) vIF ; Db.PVS_proxy.set_VIF ~__context ~self:proxy ~value:Ref.null - ; Db_gc_util.gc_PVS_proxies ~__context (* should collect the proxy *) + ; Db_gc.gc_PVS_proxies ~__context (* should collect the proxy *) ; assert_equal false (Db.is_valid_ref __context proxy)) let test = diff --git a/ocaml/xapi/test_pvs_server.ml b/ocaml/xapi/test_pvs_server.ml index 4b927a5fa20..11e967fb217 100644 --- a/ocaml/xapi/test_pvs_server.ml +++ b/ocaml/xapi/test_pvs_server.ml @@ -150,10 +150,10 @@ let test_gc () = let server = Xapi_pvs_server.introduce ~__context ~addresses ~first_port ~last_port ~site in - ( Db_gc_util.gc_PVS_servers ~__context + ( Db_gc.gc_PVS_servers ~__context ; assert_equal (Db.PVS_server.get_site ~__context ~self:server) site ; Db.PVS_server.set_site ~__context ~self:server ~value:Ref.null - ; Db_gc_util.gc_PVS_servers ~__context (* should collect the server *) + ; Db_gc.gc_PVS_servers ~__context (* should collect the server *) ; assert_equal false (Db.is_valid_ref __context server) ) diff --git a/ocaml/xapi/wlb_reports.ml b/ocaml/xapi/wlb_reports.ml index 719ca2d5d62..b97331c1008 100644 --- a/ocaml/xapi/wlb_reports.ml +++ b/ocaml/xapi/wlb_reports.ml @@ -98,8 +98,8 @@ open Stdext.Xstringext module D = Debug.Make(struct let name="wlb_reports" end) open D -let report_tag = "XmlDataSet" -let diagnostics_tag = "DiagnosticData" +let report_tokens = ("", "") +let diagnostics_tokens = ("", "") let bufsize = 16384 @@ -107,8 +107,13 @@ let hex_entity s = (*debug "hex_entity %s" s; *) char_of_int (int_of_string ("0" ^ (String.sub s 1 (String.length s - 1)))) -let trim_and_send method_name tag recv_sock send_sock = +let trim_and_send method_name (start_str, end_str) recv_sock send_sock = let recv_buf = Buffer.create bufsize in + let send_buf = Buffer.create bufsize in + let recv_state = ref 1 in + let send_state = ref 1 in + let entity = ref "" in + let fill () = let s = String.create bufsize in let n = Unix.read recv_sock s 0 bufsize in @@ -116,41 +121,106 @@ let trim_and_send method_name tag recv_sock send_sock = Buffer.add_string recv_buf (String.sub s 0 n); n in - (* Since we use xml parser to parse the reponse message, we don't need to escape the xml content in `send` *) + let send s = - ignore (Unix.write send_sock s 0 (String.length s)) + let s_len = String.length s in + let rec send' i = + let c = s.[i] in + (* debug "%c" c; *) + if !send_state = 1 then + begin + if c = '&' then + send_state := 2 + else + Buffer.add_char send_buf c + end + else + begin + if c = ';' then + let e = !entity in + Buffer.add_char send_buf + (if e = "lt" then + '<' + else if e = "gt" then + '>' + else if e = "amp" then + '&' + else if e = "apos" then + '\'' + else if e = "quot" then + '"' + else + hex_entity e); + send_state := 1; + entity := "" + else + entity := !entity ^ (String.of_char c) + end; + if i < s_len - 1 then + send' (i + 1) + else + () + in + send' 0; + ignore (Unix.write send_sock (Buffer.contents send_buf) 0 + (Buffer.length send_buf)); + Buffer.clear send_buf in - let rec recv_all ()= + + let rec pump () = let n = fill() in - if n > 0 then - recv_all() - else - () + if Buffer.length recv_buf > 0 then + begin + let s = Buffer.contents recv_buf in + (* debug "%s %d" s !recv_state; *) + if !recv_state = 1 then + match String.find_all start_str s with + | n :: _ -> + Buffer.clear recv_buf; + let i = n + String.length start_str in + Buffer.add_substring recv_buf s i (String.length s - i); + recv_state := 2 + | [] -> + () + else if !recv_state = 2 then + match String.find_all end_str s with + | n :: _ -> + send (String.sub s 0 n); + Buffer.clear recv_buf; + recv_state := 3 + | [] -> + send s; + Buffer.clear recv_buf + else + Buffer.clear recv_buf; + if n > 0 then + pump() + else if !recv_state != 3 then + (* if in state 1 we are still looking for the opening tag of the data set, expect xml to be valid + if in state 2 we are still looking for the closing tag of the data set, expect xml to be truncated *) + let rec_data = (Buffer.contents recv_buf) in + if !recv_state = 1 then + begin + try + let xml_data = Xml.parse_string rec_data in + Workload_balancing.parse_result_code + method_name + (Workload_balancing.retrieve_inner_xml method_name xml_data true) + "Failed to detect end of XML, data could be truncated" + rec_data + true + with + | Xml.Error err -> + Workload_balancing.raise_malformed_response' method_name (Xml.error err) rec_data + end + else + Workload_balancing.raise_malformed_response' method_name "Expected data is truncated." rec_data + end in - recv_all(); - let s = Buffer.contents recv_buf in - debug "receive len: %d, content: %s" (String.length s) s; - try - let xml_data = Xml.parse_string s in - let report_result_xml = Workload_balancing.retrieve_inner_xml method_name xml_data true in - try - let xml_data_set_content = Workload_balancing.data_from_leaf (Workload_balancing.descend_and_match [tag] report_result_xml) in - debug "send conent: %s" xml_data_set_content; - send xml_data_set_content - with - | Workload_balancing.Xml_parse_failure error -> - Workload_balancing.parse_result_code - method_name - report_result_xml - "Failed to detect end of XML, data could be truncated" - s - true - with - | Xml.Error err -> - Workload_balancing.raise_malformed_response' method_name "Expected data is truncated." s - - -let handle req bio method_name tag (method_name, request_func) = + pump() + + +let handle req bio method_name tokens (method_name, request_func) = let client_sock = Buf_io.fd_of bio in Buf_io.assert_buffer_empty bio; debug "handle: fd = %d" (Stdext.Unixext.int_of_file_descr client_sock); @@ -166,7 +236,7 @@ let handle req bio method_name tag (method_name, request_func) = let parse response wlb_sock = Http_svr.headers client_sock (Http.http_200_ok ()); - trim_and_send method_name tag wlb_sock client_sock + trim_and_send method_name tokens wlb_sock client_sock in try request_func ~__context ~handler:parse @@ -197,11 +267,11 @@ let report_handler (req: Request.t) (bio: Buf_io.t) _ = not (List.mem k ["session_id"; "task_id"; "report"])) req.Request.query in - handle req bio "ExecuteReport" report_tag + handle req bio "ExecuteReport" report_tokens (Workload_balancing.wlb_report_request report params) (* GET /wlb_diagnostics?session_id=&task_id= *) let diagnostics_handler (req: Request.t) (bio: Buf_io.t) _ = - handle req bio "GetDiagnostics" diagnostics_tag + handle req bio "GetDiagnostics" diagnostics_tokens Workload_balancing.wlb_diagnostics_request diff --git a/ocaml/xapi/xapi.ml b/ocaml/xapi/xapi.ml index b2f529b0062..1784ae06c92 100644 --- a/ocaml/xapi/xapi.ml +++ b/ocaml/xapi/xapi.ml @@ -300,8 +300,7 @@ let bring_up_management_if ~__context () = warn "Failed to acquire a management IP address" end; (* Start the Host Internal Management Network, if needed. *) - Xapi_network.check_himn ~__context; - Helpers.update_getty () + Xapi_network.check_himn ~__context with e -> debug "Caught exception bringing up management interface: %s" (ExnHelper.string_of_exn e) diff --git a/ocaml/xapi/xapi_crashdump.ml b/ocaml/xapi/xapi_crashdump.ml index 0325e04510a..d8183877109 100644 --- a/ocaml/xapi/xapi_crashdump.ml +++ b/ocaml/xapi/xapi_crashdump.ml @@ -11,15 +11,22 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) +exception Not_implemented + +let nothrow f () = try f() with _ -> () + +let create ~__context ~vM ~vDI = + let cdumpref = Ref.make() in + let uuid = Uuid.to_string (Uuid.make_uuid()) in + Db.Crashdump.create ~__context ~ref:cdumpref ~uuid ~vM ~vDI ~other_config:[]; + cdumpref let destroy ~__context ~self = Stdext.Pervasiveext.finally - (Helpers.log_exn_continue - "destroying crashdump" - (fun ()-> - let vdi = Db.Crashdump.get_VDI ~__context ~self in - Helpers.call_api_functions ~__context - (fun rpc session_id -> - Client.Client.VDI.destroy rpc session_id vdi))) + (nothrow (fun ()-> + let vdi = Db.Crashdump.get_VDI ~__context ~self in + Helpers.call_api_functions ~__context + (fun rpc session_id -> + Client.Client.VDI.destroy rpc session_id vdi))) (fun ()-> Db.Crashdump.destroy ~__context ~self) diff --git a/ocaml/xapi/xapi_globs.ml b/ocaml/xapi/xapi_globs.ml index dd975ba82ee..c3712a4393a 100644 --- a/ocaml/xapi/xapi_globs.ml +++ b/ocaml/xapi/xapi_globs.ml @@ -980,10 +980,6 @@ let other_options = [ "modprobe_path", Arg.Set_string modprobe_path, (fun () -> !modprobe_path), "Location of the modprobe(8) command: should match $(which modprobe)"; - - "db_idempotent_map", Arg.Set Db_globs.idempotent_map, - (fun () -> string_of_bool !Db_globs.idempotent_map), "True if the add_to_ API calls should be idempotent"; - ] let all_options = options_of_xapi_globs_spec @ other_options diff --git a/ocaml/xapi/xapi_guest_agent.ml b/ocaml/xapi/xapi_guest_agent.ml index 223033dcf0e..15e681fe5ba 100644 --- a/ocaml/xapi/xapi_guest_agent.ml +++ b/ocaml/xapi/xapi_guest_agent.ml @@ -66,17 +66,6 @@ let extend base str = Printf.sprintf "%s/%s" base str * attr/eth0/ip -> 0/ip * attr/eth0/ipv6/0/addr -> 0/ipv6/0 * attr/eth0/ipv6/1/addr -> 0/ipv6/1 - * - * Example output on new xenstore protocol: - * attr/vif/0/ipv4/0 -> 0/ipv4/0 - * attr/vif/0/ipv4/1 -> 0/ipv4/1 - * attr/vif/0/ipv6/0 -> 0/ipv6/0 - * attr/vif/0/ipv6/1 -> 0/ipv6/1 - * - * For the compatibility of XAPI clients, outputs of both protocols - * will be generated. I.E. - * attr/eth0/ip -> 0/ip; 0/ipv4/0 - * attr/vif/0/ipv4/0 -> 0/ip; 0/ipv4/0 * *) let networks path (list: string -> string list) = (* Find all ipv6 addresses under a path. *) @@ -87,11 +76,10 @@ let networks path (list: string -> string list) = (* Find the ipv4 address under a path, and the ipv6 addresses if they exist. *) let find_all_ips path prefix = let ipv4 = (extend path "ip", extend prefix "ip") in - let ipv4_with_idx = (extend path "ip", extend prefix "ipv4/0") in if List.mem "ipv6" (list path) then - ipv4 :: (ipv4_with_idx :: (find_ipv6 (extend path "ipv6") (extend prefix "ipv6"))) + ipv4 :: (find_ipv6 (extend path "ipv6") (extend prefix "ipv6")) else - [ipv4; ipv4_with_idx] + [ipv4] in (* Find all "ethn", "xenbrn" or newer interface standard names * [see https://www.freedesktop.org/wiki/Software/systemd/PredictableNetworkInterfaceNames/] @@ -124,49 +112,10 @@ let networks path (list: string -> string list) = | Some pair -> pair :: acc ) [] (list path) in - let find_vifs vif_path = - let extract_vif acc vif_id = ((extend vif_path vif_id), vif_id) :: acc in - List.fold_left extract_vif [] (list vif_path) - in - let cmp a b = - try - compare (int_of_string a) (int_of_string b) - with Failure _ -> - error "String (\"%s\" or \"%s\") can't be converted into an integer as index of IP" a b; - raise (Failure "Failed to compare") - in - let find_all_vif_ips vif_path vif_id = - (* vif_path: attr/vif/0 *) - (* vif_id: 0 *) - let extract_ip_ver vif_id acc ip_ver = - let ip_addr_ids = list (extend vif_path ip_ver) in - let extract_ip_addr vif_id ip_ver acc ip_addr_id = - let key_left = Printf.sprintf "%s/%s/%s" vif_path ip_ver ip_addr_id in - let key_right = Printf.sprintf "%s/%s/%s" vif_id ip_ver ip_addr_id in - match acc with - | [] when ip_ver = "ipv4" -> - [(key_left, (extend vif_id "ip")); (key_left, key_right)] - | _ -> (key_left, key_right) :: acc - in - try - (List.fold_left (extract_ip_addr vif_id ip_ver) [] (List.stable_sort cmp ip_addr_ids)) @ acc - with Failure _ -> - error "Failed to extract IP address for vif %s." vif_id; - [] - in - let ip_vers = List.filter (fun a -> a = "ipv4" || a = "ipv6") (list vif_path) in - List.fold_left (extract_ip_ver vif_id) [] ip_vers - in - match find_vifs (extend path "vif") with - | [] -> - path - |> find_eths - |> List.map (fun (path, prefix) -> find_all_ips path prefix) - |> List.concat - | vif_pair_list -> - vif_pair_list - |> List.map (fun (vif_path, vif_id) -> find_all_vif_ips vif_path vif_id) - |> List.concat + path + |> find_eths + |> List.map (fun (path, prefix) -> find_all_ips path prefix) + |> List.concat (* One key is placed in the other map per control/* key in xenstore. This catches keys like "feature-shutdown" "feature-hibernate" "feature-reboot" diff --git a/ocaml/xapi/xapi_ha.ml b/ocaml/xapi/xapi_ha.ml index 8ee2c57b8ea..245889f7ad2 100644 --- a/ocaml/xapi/xapi_ha.ml +++ b/ocaml/xapi/xapi_ha.ml @@ -11,11 +11,11 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) - -(* Functions for implementing 'High Availability' (HA). File is divided into 3 sections: - + scripts and functions which form part of the HA subsystem interface - + internal API calls used for arming and disarming individual hosts - + external API calls (Pool.enable_ha, Pool.disable_ha) used for turning on/off HA pool-wide +(** Functions for implementing 'High Availability' (HA). File is divided into 3 sections: + + scripts and functions which form part of the HA subsystem interface + + internal API calls used for arming and disarming individual hosts + + external API calls (Pool.enable_ha, Pool.disable_ha) used for turning on/off HA pool-wide + * @group High Availability (HA) *) module D = Debug.Make(struct let name="xapi_ha" end) @@ -218,6 +218,7 @@ module Timeouts = struct end module Monitor = struct + (** Control the background HA monitoring thread *) let request_shutdown = ref false let prevent_failover_actions_until = ref 0. (* protected by the request_shutdown_m too *) @@ -235,9 +236,11 @@ module Monitor = struct let database_state_valid = ref false let database_state_valid_c = Condition.create () + (* Used to explicitly signal that we should replan *) let plan_out_of_date = ref true exception Already_started + exception Not_started (** Background thread which monitors the membership set and takes action if HA is armed and something goes wrong *) @@ -621,6 +624,7 @@ module Monitor = struct end +(** Called by MTC in Orlando Update 1 to temporarily block the VM restart thread. *) let ha_prevent_restarts_for __context seconds = (* Even if HA is not enabled, this should still go ahead (rather than doing * a successful no-op) in case HA is about to be enabled within the specified @@ -678,6 +682,14 @@ let redo_log_ha_enabled_at_startup () = (* ----------------------------- *) +(** Called when xapi restarts: server may be in emergency mode at this point. We need + to inspect the local configuration and if HA is supposed to be armed we need to + set everything up. + Note that + the master shouldn't be able to activate HA while we are offline since that would cause + us to come up with a broken configuration (the enable-HA stage has the critical task of + synchronising the HA configuration on all the hosts). So really we only want to notice + if the Pool has had HA disabled while we were offline. *) let on_server_restart () = let armed = bool_of_string (Localdb.get Constants.ha_armed) in @@ -771,6 +783,9 @@ let on_server_restart () = (* We signal the monitor that the database state is valid (wrt liveness + disabledness of hosts) later *) end +(** Called in the master xapi startup when the database is ready. We set all hosts (including this one) to + disabled then signal the monitor thread to look. It can then wait for slaves to turn up + before trying to restart VMs. *) let on_database_engine_ready () = info "Setting all hosts to dead and disabled. Hosts must re-enable themselves explicitly"; Server_helpers.exec_with_new_task "Setting all hosts to dead and disabled" @@ -788,6 +803,8 @@ let on_database_engine_ready () = (*********************************************************************************************) (* Internal API calls to configure individual hosts *) +(** Internal API call to prevent this node making an unsafe failover decision. + This call is idempotent. *) let ha_disable_failover_decisions __context localhost = debug "Disabling failover decisions"; (* FIST *) @@ -797,6 +814,10 @@ let ha_disable_failover_decisions __context localhost = end; Localdb.put Constants.ha_disable_failover_decisions "true" +(** Internal API call to disarm localhost. + If the daemon is missing then we return success. Either fencing was previously disabled and the + daemon has shutdown OR the daemon has died and this node will fence shortly... +*) let ha_disarm_fencing __context localhost = try let (_ : string) = call_script ha_disarm_fencing [] in () @@ -806,10 +827,13 @@ let ha_disarm_fencing __context localhost = let ha_set_excluded __context localhost = let (_ : string) = call_script ha_set_excluded [] in () +(** Internal API call to stop the HA daemon. + This call is idempotent. *) let ha_stop_daemon __context localhost = Monitor.stop (); let (_ : string) = call_script ha_stop_daemon [] in () +(** Emergency-mode API call to disarm localhost *) let emergency_ha_disable __context soft = let ha_armed = try bool_of_string (Localdb.get Constants.ha_armed) with _ -> false in if not ha_armed then @@ -838,6 +862,9 @@ let emergency_ha_disable __context soft = if not soft then Localdb.put Constants.ha_armed "false"; end +(** Internal API call to release any HA resources after the system has + been shutdown. This call is idempotent. Modified for CA-48539 to + call vdi.deactivate before vdi.detach. *) let ha_release_resources __context localhost = Monitor.stop (); @@ -864,6 +891,10 @@ let ha_release_resources __context localhost = (* At this point a restart won't enable the HA subsystem *) Localdb.put Constants.ha_armed "false" +(** Internal API call which blocks until this node's xHA daemon spots the invalid statefile + and exits cleanly. If the daemon survives but the statefile access is lost then this function + will return an exception and the no-statefile shutdown can be attempted. +*) let ha_wait_for_shutdown_via_statefile __context localhost = try while true do @@ -961,6 +992,7 @@ let write_config_file ~__context statevdi_paths generation = (Xha_interface.DaemonConfiguration.to_xml_string config); debug "%s file written" Xha_interface.DaemonConfiguration.filename +(** Internal API call to preconfigure localhost *) let preconfigure_host __context localhost statevdis metadata_vdi generation = info "Host.preconfigure_ha host = %s; statevdis = [ %s ]; generation = %s" (Ref.string_of localhost) (String.concat "; " (List.map Ref.string_of statevdis)) generation; @@ -1088,10 +1120,12 @@ let rec propose_new_master_internal ~__context ~address ~manual = proposed_master := Some address; proposed_master_time := Unix.gettimeofday () +(* First phase of a two-phase commit of a new master *) let propose_new_master ~__context ~address ~manual = Mutex.execute proposed_master_m (fun () -> propose_new_master_internal ~__context ~address ~manual) +(* Second phase of a two-phase commit of a new master *) let commit_new_master ~__context ~address = begin match !proposed_master with | Some x when x <> address -> @@ -1501,6 +1535,7 @@ let enable __context heartbeat_srs configuration = raise exn +(* Called before shutting down or rebooting a host *) let before_clean_shutdown_or_reboot ~__context ~host = let pool = Helpers.get_pool ~__context in if Db.Pool.get_ha_enabled ~__context ~self:pool then begin diff --git a/ocaml/xapi/xapi_ha.mli b/ocaml/xapi/xapi_ha.mli deleted file mode 100644 index 32d0a579c42..00000000000 --- a/ocaml/xapi/xapi_ha.mli +++ /dev/null @@ -1,116 +0,0 @@ -(* - * Copyright (C) 2017 Citrix Systems Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as published - * by the Free Software Foundation; version 2.1 only. with the special - * exception on linking described in file LICENSE. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Lesser General Public License for more details. - *) - -(** Functions for implementing 'High Availability' (HA). - @group High Availability (HA) *) - -val ha_redo_log : Redo_log.redo_log -(** The redo log instance used for HA *) - -(******************************************************************************) -(** {2 Interface with the low-level HA subsystem} *) - -module Monitor : - sig - (** Control the background HA monitoring thread *) - - val plan_out_of_date : bool ref - (** Used to explicitly signal that we should replan *) - - val stop : unit -> unit - end - -val ha_prevent_restarts_for : 'a -> int64 -> unit -(** Called by MTC in Orlando Update 1 to temporarily block the VM restart thread. *) - -val on_server_restart : unit -> unit -(** Called when xapi restarts: server may be in emergency mode at this point. - We need to inspect the local configuration and if HA is supposed to be armed - we need to set everything up. - Note that the master shouldn't be able to activate HA while we are offline - since that would cause us to come up with a broken configuration (the - enable-HA stage has the critical task of synchronising the HA configuration - on all the hosts). So really we only want to notice if the Pool has had - HA disabled while we were offline. *) - -val on_database_engine_ready : unit -> unit -(** Called in the master xapi startup when the database is ready. We set all - hosts (including this one) to disabled, then signal the monitor thread to look. - It can then wait for slaves to turn up before trying to restart VMs. *) - -(******************************************************************************) -(** {2 Internal API calls to configure individual hosts} *) - -val ha_disable_failover_decisions : 'a -> 'b -> unit -(** Internal API call to prevent this node making an unsafe failover decision. - This call is idempotent. *) - -val ha_disarm_fencing : 'a -> 'b -> unit -(** Internal API call to disarm localhost. If the daemon is missing then we - return success. Either fencing was previously disabled and the daemon has - shutdown OR the daemon has died and this node will fence shortly... -*) - -val ha_stop_daemon : 'a -> 'b -> unit -(** Internal API call to stop the HA daemon. This call is idempotent. *) - -val emergency_ha_disable : 'a -> bool -> unit -(** Emergency-mode API call to disarm localhost *) - -val ha_release_resources : Context.t -> 'a -> unit -(** Internal API call to release any HA resources after the system has been - shutdown. This call is idempotent. Modified for CA-48539 to call - vdi.deactivate before vdi.detach. *) - -val ha_wait_for_shutdown_via_statefile : 'a -> 'b -> unit -(** Internal API call which blocks until this node's xHA daemon spots the - invalid statefile and exits cleanly. If the daemon survives but the - statefile access is lost then this function will return an exception and - the no-statefile shutdown can be attempted. -*) - -val preconfigure_host : - Context.t -> - [ `host ] API.Ref.t -> - [ `VDI ] API.Ref.t list -> - [ `VDI ] API.Ref.t -> - string -> unit -(** Internal API call to preconfigure localhost *) - -val join_liveset : 'a -> 'b Ref.t -> unit - -val propose_new_master : __context:'a -> address:string -> manual:'b -> unit -(** First phase of a two-phase commit of a new master *) - -val commit_new_master : __context:Context.t -> address:string -> unit -(** Second phase of a two-phase commit of a new master *) - -val abort_new_master : __context:'a -> address:string -> unit - -(******************************************************************************) -(** {2 External API calls} *) - -(** {3 Pool.*_ha API calls} *) - -val disable : Context.t -> unit - -val enable : - Context.t -> [ `SR ] API.Ref.t list -> (string * string) list -> unit - -(** {3 Functions called by host.* API calls} *) - -val before_clean_shutdown_or_reboot : __context:Context.t -> host:'a -> unit -(** Called before shutting down or rebooting a host - (called by the host.shutdown, host.reboot API functions). *) - diff --git a/ocaml/xapi/xapi_mgmt_iface.ml b/ocaml/xapi/xapi_mgmt_iface.ml index cddd1f2c9c1..9f268173f0e 100644 --- a/ocaml/xapi/xapi_mgmt_iface.ml +++ b/ocaml/xapi/xapi_mgmt_iface.ml @@ -167,6 +167,15 @@ let wait_for_management_ip ~__context = done; end); !ip +let update_getty () = + (* Running update-issue service on best effort basis *) + try + ignore (Forkhelpers.execute_command_get_output !Xapi_globs.update_issue_script []); + ignore (Forkhelpers.execute_command_get_output !Xapi_globs.kill_process_script ["-q"; "-HUP"; "mingetty"; "agetty"]) + with e -> + debug "update_getty at %s caught exception: %s" + __LOC__ (Printexc.to_string e) + let on_dom0_networking_change ~__context = debug "Checking to see if hostname or management IP has changed"; (* Need to update: @@ -185,19 +194,17 @@ let on_dom0_networking_change ~__context = Db.Host.set_name_label ~__context ~self:localhost ~value:new_hostname; begin match Helpers.get_management_ip_addr ~__context with | Some ip -> - (* WARNING: this does NOT detect IP address changes that happen before - xapi's startup (see CA-242706) *) if Db.Host.get_address ~__context ~self:localhost <> ip then begin debug "Changing Host.address in database to: %s" ip; Db.Host.set_address ~__context ~self:localhost ~value:ip; debug "Refreshing console URIs"; - Helpers.update_getty (); + update_getty (); Dbsync_master.refresh_console_urls ~__context end | None -> if Db.Host.get_address ~__context ~self:localhost <> "" then begin debug "Changing Host.address in database to: '' (host has no management IP address)"; - Helpers.update_getty (); + update_getty (); Db.Host.set_address ~__context ~self:localhost ~value:"" end end; diff --git a/ocaml/xapi/xapi_pool.ml b/ocaml/xapi/xapi_pool.ml index 2f5054d9438..6b3c20525e5 100644 --- a/ocaml/xapi/xapi_pool.ml +++ b/ocaml/xapi/xapi_pool.ml @@ -245,21 +245,21 @@ let pre_join_checks ~__context ~rpc ~session_id ~force = let master_ref = get_master rpc session_id in let master = Client.Host.get_record ~rpc ~session_id ~self:master_ref in - (* Check software version, but as of CA-249786 don't check the build number*) + (* Check software version *) let get_software_version_fields fields = let open Xapi_globs in begin try List.assoc _platform_version fields with _ -> "" end, begin match get_compatibility_name fields with Some x -> x | None -> "" end, + begin try List.assoc _build_number fields with _ -> "" end, begin try List.assoc _git_id fields with _ -> "" end, begin try if List.mem_assoc linux_pack_vsn_key fields then "installed" else "not present" with _ -> "not present" end in - - let print_software_version (version,name,id,linux_pack) = - debug "version:%s, name:%s, id:%s, linux_pack:%s" version name id linux_pack in + let print_software_version (version,name,number,id,linux_pack) = + debug "version:%s, name:%s, build:%s, id:%s, linux_pack:%s" version name number id linux_pack in let master_software_version = master.API.host_software_version in let my_software_version = Db.Host.get_software_version ~__context ~self:me in diff --git a/ocaml/xapi/xapi_pool_patch.ml b/ocaml/xapi/xapi_pool_patch.ml index 8cd76cb688d..80c35c5ec7b 100644 --- a/ocaml/xapi/xapi_pool_patch.ml +++ b/ocaml/xapi/xapi_pool_patch.ml @@ -52,21 +52,9 @@ let pool_patch_upload_handler (req: Http.Request.t) s _ = is to avoid our task being prematurely marked as completed by the import_raw_vdi handler. *) let strip = List.filter (fun (k,v) -> k <> "task_id") in - let add_sr query = - match Importexport.sr_of_req ~__context req with - | Some _ -> query (* There was already an SR specified *) - | None -> - let pool = Db.Pool.get_all ~__context |> List.hd in - let default_SR = Db.Pool.get_default_SR ~__context ~self:pool in - ("sr_id",Ref.string_of default_SR)::query - in let subtask = Client.Task.create rpc session_id "VDI upload" "" in Stdext.Pervasiveext.finally (fun () -> - let req = Http.Request.{ - req with - cookie = strip req.cookie; - query = ("task_id",Ref.string_of subtask) :: strip req.query |> add_sr; - } in + let req = Http.Request.{req with cookie = strip req.cookie; query = ("task_id",Ref.string_of subtask) :: strip req.query} in let vdi_opt = Import_raw_vdi.localhost_handler rpc session_id (Importexport.vdi_of_req ~__context req) req s in match vdi_opt with | Some vdi -> diff --git a/ocaml/xapi/xapi_pool_update.ml b/ocaml/xapi/xapi_pool_update.ml index 2aa78835c22..4aaabf47358 100644 --- a/ocaml/xapi/xapi_pool_update.ml +++ b/ocaml/xapi/xapi_pool_update.ml @@ -190,7 +190,6 @@ let create_yum_config ~__context ~self ~url = ; Printf.sprintf "name=%s" name_label ; Printf.sprintf "baseurl=%s" url ; if signed then Printf.sprintf ("gpgkey=file:///etc/pki/rpm-gpg/%s") key else "" - ; "" (* Newline at the end of the file *) ] let attach_helper ~__context ~uuid ~vdi = @@ -467,12 +466,9 @@ let resync_host ~__context ~host = List.iter (fun update_ref -> let pool_patch_ref = Xapi_pool_patch.pool_patch_of_update ~__context update_ref in - let uuid = Db.Pool_update.get_uuid ~__context ~self:update_ref in - let mtime = (Unix.stat (Filename.concat update_applied_dir uuid)).Unix.st_mtime in - Xapi_pool_patch.write_patch_applied_db ~__context ~date:mtime ~self:pool_patch_ref ~host () + Xapi_pool_patch.write_patch_applied_db ~__context ~self:pool_patch_ref ~host () ) update_refs; - Create_misc.create_updates_requiring_reboot_info ~__context ~host; - Create_misc.create_software_version ~__context + Create_misc.create_updates_requiring_reboot_info ~__context ~host end else Db.Host.set_updates ~__context ~self:host ~value:[]; @@ -499,10 +495,7 @@ let resync_host ~__context ~host = && Xapi_pool_patch.pool_patch_of_update ~__context self |> fun self -> Db.Pool_patch.get_host_patches ~__context ~self |> function [] -> false | _ -> true) - |> List.iter (fun self -> destroy ~__context ~self); - - (* Clean up host_patch table *) - Db_gc_util.gc_Host_patches ~__context + |> List.iter (fun self -> destroy ~__context ~self) end let pool_update_download_handler (req: Request.t) s _ = diff --git a/ocaml/xapi/xapi_vdi.ml b/ocaml/xapi/xapi_vdi.ml index f15d22194aa..c31f8d7fedf 100644 --- a/ocaml/xapi/xapi_vdi.ml +++ b/ocaml/xapi/xapi_vdi.ml @@ -1,5 +1,5 @@ (* - * Copyright (C) 2006-2017 Citrix Systems Inc. + * Copyright (C) 2006-2009 Citrix Systems Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published @@ -11,7 +11,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. *) -(* Module that defines API functions for VDI objects +(** Module that defines API functions for VDI objects * @group XenAPI functions *) @@ -25,32 +25,8 @@ open Printf (**************************************************************************************) (* current/allowed operations checking *) -let check_sm_feature_error (op:API.vdi_operations) sm_features sr = - let required_sm_feature = Smint.(match op with - | `forget - | `snapshot - | `copy - | `scan - | `force_unlock - | `blocked - -> None - | `destroy -> Some Vdi_delete - | `resize -> Some Vdi_resize - | `update -> Some Vdi_update - | `resize_online -> Some Vdi_resize_online - | `generate_config -> Some Vdi_generate_config - | `clone -> Some Vdi_clone - | `mirror -> Some Vdi_mirror - ) in - match required_sm_feature with - | None -> None - | Some feature -> - if Smint.(has_capability feature sm_features) - then None - else Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) - -(* Checks to see if an operation is valid in this state. Returns Some exception - if not and None if everything is ok. *) +(** Checks to see if an operation is valid in this state. Returns Some exception + if not and None if everything is ok. *) let check_operation_error ~__context ?(sr_records=[]) ?(pbd_records=[]) ?(vbd_records=[]) ha_enabled record _ref' op = let _ref = Ref.string_of _ref' in let current_ops = record.Db_actions.vDI_current_operations in @@ -137,6 +113,8 @@ let check_operation_error ~__context ?(sr_records=[]) ?(pbd_records=[]) ?(vbd_re (* NB RO vs RW sharing checks are done in xapi_vbd.ml *) + let sm_features = Xapi_sr_operations.features_of_sr_internal ~__context ~_type:sr_type in + let blocked_by_attach = if operation_can_be_performed_live then false @@ -150,11 +128,6 @@ let check_operation_error ~__context ?(sr_records=[]) ?(pbd_records=[]) ?(vbd_re then Some (Api_errors.vdi_in_use,[_ref; (Record_util.vdi_operation_to_string op)]) else if my_has_current_operation_vbd_records <> [] then Some (Api_errors.other_operation_in_progress, [ "VDI"; _ref ]) - else - let sm_features = Xapi_sr_operations.features_of_sr_internal ~__context ~_type:sr_type in - let sm_feature_error = check_sm_feature_error op sm_features sr in - if sm_feature_error <> None - then sm_feature_error else ( match op with | `forget -> @@ -178,17 +151,32 @@ let check_operation_error ~__context ?(sr_records=[]) ?(pbd_records=[]) ?(vbd_re then Some (Api_errors.ha_enable_in_progress, []) else if List.mem record.Db_actions.vDI_type [`ha_statefile; `metadata ] && Xapi_pool_helpers.ha_disable_in_progress ~__context then Some (Api_errors.ha_disable_in_progress, []) + else + if not Smint.(has_capability Vdi_delete sm_features) + then Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) else None | `resize -> if ha_enabled && List.mem record.Db_actions.vDI_type [ `ha_statefile; `redo_log ] then Some (Api_errors.ha_is_enabled, []) + else + if not Smint.(has_capability Vdi_resize sm_features) + then Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) + else None + | `update -> + if not Smint.(has_capability Vdi_update sm_features) + then Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) else None | `resize_online -> if ha_enabled && List.mem record.Db_actions.vDI_type [ `ha_statefile; `redo_log ] then Some (Api_errors.ha_is_enabled, []) + else + if not Smint.(has_capability Vdi_resize_online sm_features) + then Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) + else None + | `generate_config -> + if not Smint.(has_capability Vdi_generate_config sm_features) + then Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) else None - | `snapshot when not Smint.(has_capability Vdi_snapshot sm_features) -> - Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) | `snapshot when record.Db_actions.vDI_sharable -> Some (Api_errors.vdi_is_sharable, [ _ref ]) | `snapshot when reset_on_boot -> @@ -201,7 +189,15 @@ let check_operation_error ~__context ?(sr_records=[]) ?(pbd_records=[]) ?(vbd_re if List.mem record.Db_actions.vDI_type [ `ha_statefile; `redo_log ] then Some (Api_errors.operation_not_allowed, ["VDI containing HA statefile or redo log cannot be copied (check the VDI's allowed operations)."]) else None - | `mirror | `clone | `generate_config | `scan | `force_unlock | `blocked | `update -> None + | `clone -> + if not Smint.(has_capability Vdi_clone sm_features) + then Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) + else None + | `mirror -> + if not Smint.(has_capability Vdi_mirror sm_features) + then Some (Api_errors.sr_operation_not_supported, [Ref.string_of sr]) + else None + | _ -> None ) let assert_operation_valid ~__context ~self ~(op:API.vdi_operations) = @@ -227,6 +223,15 @@ let update_allowed_operations_internal ~__context ~self ~sr_records ~pbd_records let update_allowed_operations ~__context ~self : unit = update_allowed_operations_internal ~__context ~self ~sr_records:[] ~pbd_records:[] ~vbd_records:[] +(** Someone is cancelling a task so remove it from the current_operations *) +let cancel_task ~__context ~self ~task_id = + let all = List.map fst (Db.VDI.get_current_operations ~__context ~self) in + if List.mem task_id all then + begin + Db.VDI.remove_from_current_operations ~__context ~self ~key:task_id; + update_allowed_operations ~__context ~self + end + let cancel_tasks ~__context ~self ~all_tasks_in_db ~task_ids = let ops = Db.VDI.get_current_operations ~__context ~self in let set = (fun value -> Db.VDI.set_current_operations ~__context ~self ~value) in @@ -234,7 +239,7 @@ let cancel_tasks ~__context ~self ~all_tasks_in_db ~task_ids = (**************************************************************************************) -(* Helper function to create a new VDI record with all fields copied from +(** Helper function to create a new VDI record with all fields copied from an original, except ref and *_operations, UUID and others supplied as optional arguments. If a new UUID is not supplied, a fresh one is generated. storage_lock defaults to false. diff --git a/ocaml/xapi/xapi_vdi.mli b/ocaml/xapi/xapi_vdi.mli deleted file mode 100644 index 127008fa3ea..00000000000 --- a/ocaml/xapi/xapi_vdi.mli +++ /dev/null @@ -1,204 +0,0 @@ -(* - * Copyright (C) 2017 Citrix Systems Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as published - * by the Free Software Foundation; version 2.1 only. with the special - * exception on linking described in file LICENSE. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU Lesser General Public License for more details. - *) - -(** Module that defines API functions for VDI objects - * @group XenAPI functions -*) - -(** Checks to see if an operation is valid in this state. Returns Some exception - if not and None if everything is ok. *) -(* Exposed here only for use by Test_vdi_allowed_operations, this - declaration also serves to annotate the (op:API.vdi_operations) - parameter. *) -val check_operation_error : - __context:Context.t -> - ?sr_records:'a list -> - ?pbd_records:('b API.Ref.t * API.pBD_t) list -> - ?vbd_records:('c * Db_actions.vBD_t) list -> - bool -> - Db_actions.vDI_t -> - API.ref_VDI -> - API.vdi_operations -> - (string * string list) option - -val assert_operation_valid : - __context:Context.t -> - self:[ `VDI ] API.Ref.t -> op:API.vdi_operations -> unit - -val update_allowed_operations_internal : - __context:Context.t -> - self:[ `VDI ] API.Ref.t -> - sr_records:'a list -> - pbd_records:('b API.Ref.t * API.pBD_t) list -> - vbd_records:('c * Db_actions.vBD_t) list -> unit - -val update_allowed_operations : - __context:Context.t -> self:[ `VDI ] API.Ref.t -> unit - -val cancel_tasks : - __context:Context.t -> - self:[ `VDI ] API.Ref.t -> - all_tasks_in_db:'a Ref.t list -> task_ids:string list -> unit - -val create : - __context:Context.t -> - name_label:string -> - name_description:string -> - sR:[ `SR ] API.Ref.t -> - virtual_size:int64 -> - _type:API.vdi_type -> - sharable:bool -> - read_only:bool -> - other_config:(string * string) list -> - xenstore_data:(string * string) list -> - sm_config:(string * string) list -> tags:string list -> [ `VDI ] API.Ref.t - -val pool_introduce : - __context:Context.t -> - uuid:string -> - name_label:string -> - name_description:string -> - sR:[ `SR ] API.Ref.t -> - _type:API.vdi_type -> - sharable:bool -> - read_only:bool -> - other_config:(string * string) list -> - location:string -> - xenstore_data:(string * string) list -> - sm_config:(string * string) list -> - managed:bool -> - virtual_size:int64 -> - physical_utilisation:int64 -> - metadata_of_pool:[ `pool ] API.Ref.t -> - is_a_snapshot:bool -> - snapshot_time:API.Date.iso8601 -> - snapshot_of:[ `VDI ] API.Ref.t -> [ `VDI ] Ref.t - -val db_introduce : - __context:Context.t -> - uuid:string -> - name_label:string -> - name_description:string -> - sR:[ `SR ] API.Ref.t -> - _type:API.vdi_type -> - sharable:bool -> - read_only:bool -> - other_config:(string * string) list -> - location:string -> - xenstore_data:(string * string) list -> - sm_config:(string * string) list -> - managed:bool -> - virtual_size:int64 -> - physical_utilisation:int64 -> - metadata_of_pool:[ `pool ] API.Ref.t -> - is_a_snapshot:bool -> - snapshot_time:API.Date.iso8601 -> - snapshot_of:[ `VDI ] API.Ref.t -> [ `VDI ] Ref.t - -val db_forget : __context:Context.t -> vdi:[ `VDI ] API.Ref.t -> unit - -val introduce : - __context:Context.t -> - uuid:string -> - name_label:string -> - name_description:string -> - sR:[ `SR ] API.Ref.t -> - _type:API.vdi_type -> - sharable:bool -> - read_only:'a -> - other_config:(string * string) list -> - location:string -> - xenstore_data:(string * string) list -> - sm_config:(string * string) list -> - managed:'b -> - virtual_size:'c -> - physical_utilisation:'d -> - metadata_of_pool:'e -> - is_a_snapshot:'f -> - snapshot_time:'g -> snapshot_of:'h -> [ `VDI ] API.Ref.t -val update : __context:Context.t -> vdi:[ `VDI ] API.Ref.t -> unit -val forget : __context:Context.t -> vdi:[ `VDI ] API.Ref.t -> unit - -(** driver_params is the storage-backend-specific parameters that are used to drive the - snapshot operation (e.g. vmhint for NetAPP) -*) -val snapshot_and_clone : - (dbg:string -> - sr:string -> - vdi_info:Storage_interface.vdi_info -> Storage_interface.vdi_info) -> - __context:Context.t -> - vdi:[ `VDI ] API.Ref.t -> - driver_params:(string * string) list -> [ `VDI ] API.Ref.t - -val snapshot : - __context:Context.t -> - vdi:[ `VDI ] API.Ref.t -> - driver_params:(string * string) list -> [ `VDI ] API.Ref.t -val destroy : __context:Context.t -> self:[ `VDI ] API.Ref.t -> unit -val resize_online : - __context:Context.t -> vdi:[ `VDI ] API.Ref.t -> size:int64 -> unit -val resize : - __context:Context.t -> vdi:[ `VDI ] API.Ref.t -> size:int64 -> unit -val generate_config : - __context:Context.t -> host:'a -> vdi:[ `VDI ] API.Ref.t -> string -val clone : - __context:Context.t -> - vdi:[ `VDI ] API.Ref.t -> - driver_params:(string * string) list -> [ `VDI ] API.Ref.t -val copy : - __context:Context.t -> - vdi:[ `VDI ] API.Ref.t -> - sr:'a Ref.t -> - base_vdi:API.ref_VDI -> - into_vdi:[ `VDI ] API.Ref.t Client.Id.t -> [ `VDI ] API.Ref.t Client.Id.t -val force_unlock : __context:'a -> vdi:'b -> 'c -val set_sharable : - __context:Context.t -> self:[ `VDI ] API.Ref.t -> value:bool -> unit -val set_managed : - __context:Context.t -> self:[ `VDI ] API.Ref.t -> value:bool -> unit -val set_read_only : - __context:Context.t -> self:[ `VDI ] API.Ref.t -> value:bool -> unit -val set_missing : - __context:Context.t -> self:[ `VDI ] API.Ref.t -> value:bool -> unit -val set_virtual_size : - __context:Context.t -> self:[ `VDI ] API.Ref.t -> value:int64 -> unit -val set_physical_utilisation : - __context:Context.t -> self:[ `VDI ] API.Ref.t -> value:int64 -> unit -val set_is_a_snapshot : - __context:Context.t -> self:[ `VDI ] API.Ref.t -> value:bool -> unit -val set_snapshot_of : - __context:Context.t -> - self:[ `VDI ] API.Ref.t -> value:[ `VDI ] API.Ref.t -> unit -val set_snapshot_time : - __context:Context.t -> - self:[ `VDI ] API.Ref.t -> value:API.Date.iso8601 -> unit -val set_metadata_of_pool : - __context:Context.t -> - self:[ `VDI ] API.Ref.t -> value:[ `pool ] API.Ref.t -> unit -val set_on_boot : - __context:Context.t -> - self:[ `VDI ] API.Ref.t -> value:[< `persist | `reset > `persist ] -> unit -val set_allow_caching : - __context:Context.t -> self:[ `VDI ] API.Ref.t -> value:bool -> unit -val set_name_label : - __context:Context.t -> self:[ `VDI ] API.Ref.t -> value:string -> unit -val set_name_description : - __context:Context.t -> self:[ `VDI ] API.Ref.t -> value:string -> unit -val checksum : __context:Context.t -> self:API.ref_VDI -> string - -(** Open a foreign database on a VDI *) -val open_database : - __context:Context.t -> self:[ `VDI ] API.Ref.t -> API.ref_session - -val read_database_pool_uuid : __context:'a -> self:API.ref_VDI -> string diff --git a/ocaml/xapi/xapi_vm_helpers.ml b/ocaml/xapi/xapi_vm_helpers.ml index afff11174f7..3ea36ef7919 100644 --- a/ocaml/xapi/xapi_vm_helpers.ml +++ b/ocaml/xapi/xapi_vm_helpers.ml @@ -647,10 +647,6 @@ let choose_host_uses_wlb ~__context = ~self:(Helpers.get_pool ~__context))) -(* This is a stub used in the pattern_matching below to silence a - * warning in the newer ocaml compilers *) -exception Float_of_string_failure - (** Given a virtual machine, returns a host it can boot on, giving *) (** priority to an affinity host if one is present. WARNING: called *) (** while holding the global lock from the message forwarding layer. *) @@ -666,9 +662,8 @@ let choose_host_for_vm ~__context ~vm ~snapshot = | ["WLB"; "0.0"; rec_id; zero_reason] -> filter_and_convert tl | ["WLB"; stars; rec_id] -> - let st = try float_of_string stars with Failure _ -> raise Float_of_string_failure - in - (h, st, rec_id) :: filter_and_convert tl + (h, float_of_string stars, rec_id) + :: filter_and_convert tl | _ -> filter_and_convert tl end | [] -> [] @@ -733,7 +728,7 @@ let choose_host_for_vm ~__context ~vm ~snapshot = with _ -> () end; choose_host_for_vm_no_wlb ~__context ~vm ~snapshot - | Float_of_string_failure -> + | Failure "float_of_string" -> debug "Star ratings from wlb could not be parsed to floats. \ Using original algorithm"; choose_host_for_vm_no_wlb ~__context ~vm ~snapshot diff --git a/ocaml/xapi/xapi_vm_lifecycle.ml b/ocaml/xapi/xapi_vm_lifecycle.ml index 11ac7dad722..63c724677ae 100644 --- a/ocaml/xapi/xapi_vm_lifecycle.ml +++ b/ocaml/xapi/xapi_vm_lifecycle.ml @@ -307,21 +307,16 @@ let is_mobile ~__context vm strict = && not @@ nested_virt ~__context vm metrics) || not strict -let maybe_get_guest_metrics ~__context ~ref = - if Db.is_valid_ref __context ref - then Some (Db.VM_guest_metrics.get_record_internal ~__context ~self:ref) - else None (** Take an internal VM record and a proposed operation. Return None iff the operation would be acceptable; otherwise Some (Api_errors., [list of strings]) corresponding to the first error found. Checking stops at the first error. The "strict" param sets whether we require feature-flags for ops that need guest support: ops in the suspend-like and shutdown-like categories. *) -let check_operation_error ~__context ~ref ~op ~strict = - let vmr = Db.VM.get_record_internal ~__context ~self:ref in - let vmgmr = maybe_get_guest_metrics ~__context ~ref:(vmr.Db_actions.vM_guest_metrics) in +let check_operation_error ~__context ~vmr ~vmgmr ~ref ~clone_suspended_vm_enabled ~vdis_reset_and_caching ~op ~strict = let ref_str = Ref.string_of ref in let power_state = vmr.Db_actions.vM_power_state in + let current_ops = vmr.Db_actions.vM_current_operations in let is_template = vmr.Db_actions.vM_is_a_template in let is_snapshot = vmr.Db_actions.vM_is_a_snapshot in @@ -342,7 +337,6 @@ let check_operation_error ~__context ~ref ~op ~strict = (* if other operations are in progress, check that the new operation is allowed concurrently with them. *) let current_error = check current_error (fun () -> - let current_ops = vmr.Db_actions.vM_current_operations in if List.length current_ops <> 0 && not (is_allowed_concurrently ~op ~current_ops) then report_concurrent_operations_error ~current_ops ~ref_str else None) in @@ -434,12 +428,6 @@ let check_operation_error ~__context ~ref ~op ~strict = (* Check for an error due to VDI caching/reset behaviour *) let current_error = check current_error (fun () -> - let vdis_reset_and_caching = List.filter_map (fun vbd -> - try - let vdi = Db.VBD.get_VDI ~__context ~self:vbd in - let sm_config = Db.VDI.get_sm_config ~__context ~self:vdi in - Some ((assoc_opt "on_boot" sm_config = Some "reset"), (bool_of_assoc "caching" sm_config)) - with _ -> None) vmr.Db_actions.vM_VBDs in if op = `checkpoint || op = `snapshot || op = `suspend || op = `snapshot_with_quiesce then (* If any vdi exists with on_boot=reset, then disallow checkpoint, snapshot, suspend *) if List.exists fst vdis_reset_and_caching @@ -490,8 +478,26 @@ let check_operation_error ~__context ~ref ~op ~strict = current_error +let maybe_get_guest_metrics ~__context ~ref = + if Db.is_valid_ref __context ref + then Some (Db.VM_guest_metrics.get_record_internal ~__context ~self:ref) + else None + +let get_info ~__context ~self = + let all = Db.VM.get_record_internal ~__context ~self in + let gm = maybe_get_guest_metrics ~__context ~ref:(all.Db_actions.vM_guest_metrics) in + let clone_suspended_vm_enabled = Helpers.clone_suspended_vm_enabled ~__context in + let vdis_reset_and_caching = List.filter_map (fun vbd -> + try + let vdi = Db.VBD.get_VDI ~__context ~self:vbd in + let sm_config = Db.VDI.get_sm_config ~__context ~self:vdi in + Some ((assoc_opt "on_boot" sm_config = Some "reset"), (bool_of_assoc "caching" sm_config)) + with _ -> None) all.Db_actions.vM_VBDs in + all, gm, clone_suspended_vm_enabled, vdis_reset_and_caching + let get_operation_error ~__context ~self ~op ~strict = - check_operation_error ~__context ~ref:self ~op ~strict + let all, gm, clone_suspended_vm_enabled, vdis_reset_and_caching = get_info ~__context ~self in + check_operation_error __context all gm self clone_suspended_vm_enabled vdis_reset_and_caching op strict let assert_operation_valid ~__context ~self ~op ~strict = match get_operation_error ~__context ~self ~op ~strict with @@ -499,8 +505,9 @@ let assert_operation_valid ~__context ~self ~op ~strict = | Some (a,b) -> raise (Api_errors.Server_error (a,b)) let update_allowed_operations ~__context ~self = + let all, gm, clone_suspended_vm_enabled, vdis_reset_and_caching = get_info ~__context ~self in let check accu op = - match check_operation_error ~__context ~ref:self ~op ~strict:true with + match check_operation_error __context all gm self clone_suspended_vm_enabled vdis_reset_and_caching op true with | None -> op :: accu | _ -> accu in diff --git a/ocaml/xapi/xapi_vm_migrate.ml b/ocaml/xapi/xapi_vm_migrate.ml index 7db8156cc2a..d65ee9dc3eb 100644 --- a/ocaml/xapi/xapi_vm_migrate.ml +++ b/ocaml/xapi/xapi_vm_migrate.ml @@ -619,8 +619,8 @@ let vdi_copy_fun __context dbg vdi_map remote is_intra_pool remote_vdis so_far t match mirror_id with | Some mid -> ignore(Storage_access.unregister_mirror mid); - let m = SMAPI.DATA.MIRROR.stat ~dbg ~id:mid in (try SMAPI.DATA.MIRROR.stop ~dbg ~id:mid with _ -> ()); + let m = SMAPI.DATA.MIRROR.stat ~dbg ~id:mid in m.Mirror.failed | None -> false in if mirror_failed then raise (Api_errors.Server_error(Api_errors.mirror_failed,[Ref.string_of vconf.vdi])) diff --git a/ocaml/xapi/xapi_xenops.ml b/ocaml/xapi/xapi_xenops.ml index 7bcfdf05c0e..05f6d61fe48 100644 --- a/ocaml/xapi/xapi_xenops.ml +++ b/ocaml/xapi/xapi_xenops.ml @@ -435,7 +435,7 @@ module MD = struct let rate = Int64.of_string (List.assoc "kbps" qos_params) in Some (rate, timeslice) with - | Failure _ (* int_of_string *) -> + | Failure "int_of_string" -> log_qos_failure "parameter \"kbps\" not an integer"; None | Not_found -> log_qos_failure "necessary parameter \"kbps\" not found"; None @@ -604,7 +604,8 @@ module MD = struct } with | Not_found -> failwith "Intel GVT-g settings not specified" - | Failure _ (* int_of_string *)-> failwith "Intel GVT-g settings invalid" + | Failure "int_of_string" -> + failwith "Intel GVT-g settings invalid" let of_mxgpu_vgpu ~__context vm vgpu = let open Vgpu in @@ -629,7 +630,8 @@ module MD = struct } with | Not_found -> failwith "AMD MxGPU settings not specified" - | Failure _ (* int_of_string *) -> failwith "AMD MxGPU settings invalid" + | Failure "int_of_string" -> + failwith "AMD MxGPU settings invalid" let vgpus_of_vm ~__context (vmref, vm) = let open Vgpu in diff --git a/opam b/opam index 7a4606eb86f..c20eafe352b 100644 --- a/opam +++ b/opam @@ -1,58 +1,39 @@ -opam-version: "1.2" -maintainer: "xen-api@lists.xen.org" -authors: [ "xen-api@lists.xen.org" ] -homepage: "https://github.com/xapi-project/xen-api" -bug-reports: "https://github.com/xapi-project/xen-api/issues" -dev-repo: "https://github.com/xapi-project/xen-api.git" +opam-version: "1" +maintainer: "dave.scott@citrix.com" build: [ - ["./configure"] + ["./configure" "--disable-warn-error" "--varpatchdir" "%{prefix}%/var/patch" "--optdir" "%{lib}%/xcp" "--plugindir" "%{lib}%/xcp/plugins" "--hooksdir" "%{prefix}%/etc/hook-scripts" "--xapiconf" "%{prefix}%/etc/xapi.conf" "--libexecdir" "%{lib}%/xcp/bin" "--scriptsdir" "%{lib}%/xcp/scripts" "--sharedir" "%{share}%/xcp" "--webdir" "%{share}%/xcp/web" "--cluster-stack-root" "%{lib}%/xcp/bin/cluster-stack" "--bindir" "%{bin}%" "--sbindir" "%{bin}%" "--etcdir" "%{prefix}%/etc"] [make] + ["install" "-m" "0755" "ocaml/xapi/xapi.opt" "%{bin}%/xapi"] ] -install: [ - ["oasis" "setup"] - ["ocaml" "setup.ml" "-install"] -] -build-test: [make "test"] -remove: [ - ["oasis" "setup"] - ["ocaml" "setup.ml" "-uninstall"] - ["ocamlfind" "remove" "xapi"] - ["ocamlfind" "remove" "xapi-client"] - ["ocamlfind" "remove" "xapi-cli-protocol"] - ["ocamlfind" "remove" "xapi-consts"] - ["ocamlfind" "remove" "xapi-datamodel"] - ["ocamlfind" "remove" "xapi-database"] - ["ocamlfind" "remove" "xapi-types"] -] +build-test: [make "test" ] +remove: ["rm" "%{bin}%/xapi"] depends: [ - "oasis" {build} - "ocamlfind" {build} + "ocamlfind" "xapi-test-utils" - "xapi-idl" + "xapi-idl" {>= "0.12.2"} "xapi-libs-transitional" "xen-api-client" "xapi-netdev" + "omake" "cdrom" "fd-send-recv" "xapi-forkexecd" - "vhd-format" + "libvhd" "nbd" "oclock" "ounit" "rpc" "ssl" - "xapi-stdext" + "xapi-stdext" {>= "0.13.0"} "xapi-tapctl" "xenctrl" "xenstore" "xapi-inventory" "tar-format" - "opasswd" + "opasswd" {>= "0.9.3"} "xapi-rrdd-plugin" - "pci" + "pci" {>= "0.2.0"} "sha" - "xapi-xenopsd" - "mustache" ] depexts: [ [["centos"] ["pam-devel"]] diff --git a/scripts/plugins/firewall-port b/scripts/plugins/firewall-port index fbd14fc2c9f..20b527b7575 100644 --- a/scripts/plugins/firewall-port +++ b/scripts/plugins/firewall-port @@ -5,19 +5,17 @@ set -e ################################################# -# Use this script to open/close port with specified -# protocol. +# Use this script to open/close port. # # Usage: -# ./firewall-port {open|close} port protocol +# ./firewall-port {open|close} port # ################################################# OP="$1" PORT="$2" -PROTOCOL="${3:-tcp}" CHAIN="xapi-INPUT" -RULE="-p $PROTOCOL -m conntrack --ctstate NEW -m $PROTOCOL --dport $PORT -j ACCEPT" +RULE="-p tcp -m conntrack --ctstate NEW -m tcp --dport $PORT -j ACCEPT" case "${OP}" in open) @@ -40,7 +38,7 @@ case "${OP}" in fi ;; *) - echo $"Usage: $0 {open|close} {port} {protocol}" 1>&2 + echo $"Usage: $0 {open|close} {port}" 1>&2 exit 1 ;; esac