From dff8996715f938f54c18daab550d460134670bc6 Mon Sep 17 00:00:00 2001 From: David Scott Date: Thu, 7 May 2015 13:33:56 +0100 Subject: [PATCH 01/11] local-allocator: always round up to the next extent If we ask for 1 bytes we should use (1 + extent_size - 1) / extent_size i.e. we should allocate a whole extent even though it is more than what we asked for. Fixes #65 Signed-off-by: David Scott --- xenvm-local-allocator/local_allocator.ml | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/xenvm-local-allocator/local_allocator.ml b/xenvm-local-allocator/local_allocator.ml index d0c2591..2555a44 100644 --- a/xenvm-local-allocator/local_allocator.ml +++ b/xenvm-local-allocator/local_allocator.ml @@ -399,13 +399,15 @@ let main config daemon socket journal fromLVM toLVM = | Some data_volume -> let sector_size = Int64.of_int sector_size in let current = Int64.mul sector_size (sizeof data_volume) in + let extent_b = Int64.mul sector_size extent_size in + (* NB: make sure we round up to the next extent *) let nr_extents = match action with | `Absolute x -> - Int64.(div (div (sub x current) sector_size) extent_size) + Int64.(div (add (sub x current) (sub extent_b 1L)) extent_b) | `IncreaseBy x -> - Int64.(div (div x sector_size) extent_size) in - if nr_extents < 0L then begin - error "Request for -ve number of extents"; + Int64.(div (add x extent_b) extent_b) in + if nr_extents <= 0L then begin + error "Request for %Ld (<= 0) segments" nr_extents; return () end else begin FreePool.remove nr_extents From d514b0541121d0e52bec73b98b99138818abd8e3 Mon Sep 17 00:00:00 2001 From: David Scott Date: Thu, 7 May 2015 13:35:37 +0100 Subject: [PATCH 02/11] lvresize/extend: it is an error to shrink the volume via the local allocator We can shrink deactivated volumes as normal through the API. The local allocator can only allocate, so fail if shrinking is requested. Also avoid calling the local allocator to allocate 0 bytes. Signed-off-by: David Scott --- xenvm/lvresize.ml | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/xenvm/lvresize.ml b/xenvm/lvresize.ml index 605ef6c..0e26495 100644 --- a/xenvm/lvresize.ml +++ b/xenvm/lvresize.ml @@ -20,6 +20,8 @@ let lvresize copts live (vg_name,lv_opt) real_size percent_size = | Some info -> info.local_device (* If we've got a default, use that *) | None -> failwith "Need to know the local device!" in + let existing_size = Int64.(mul (mul 512L vg.Lvm.Vg.extent_size) (Lvm.Lv.size_in_extents lv)) in + let device_is_active = let name = Mapper.name_of vg lv in let all = Devmapper.ls () in @@ -49,17 +51,24 @@ let lvresize copts live (vg_name,lv_opt) real_size percent_size = Lwt_io.write_line oc (Sexplib.Sexp.to_string (ResizeRequest.sexp_of_t r)) >>= fun () -> Lwt_io.close oc in - match live, info with | true, Some { Xenvm_common.local_allocator_path = Some allocator } -> - if device_is_active - then resize_locally allocator - else resize_remotely () + if device_is_active then begin + match size with + | `Absolute size -> + (* The local allocator can only allocate. When in this state we cannot shrink: + deactivate the device first. *) + if size < existing_size + then failwith (Printf.sprintf "Existing size is %Ld: cannot decrease to %Ld" existing_size size); + if size = existing_size + then return () + else resize_locally allocator + | _ -> resize_locally allocator + end else resize_remotely () | _, _ -> (* safe to allocate remotely *) resize_remotely () ) - let live_arg = let doc = "Resize a live device using the local allocator" in Arg.(value & flag & info ["live"] ~doc) From b5dae3a870e9d01d1f837d2aaec45601b4f45e09 Mon Sep 17 00:00:00 2001 From: David Scott Date: Thu, 7 May 2015 14:00:19 +0100 Subject: [PATCH 03/11] Local allocator is synchronous, with error reporting It will exit with 0 only if the resize has been successful. Otherwise it will print a diagnostic message to stderr and exit with non-zero. Fixes #66 Signed-off-by: David Scott --- _oasis | 2 +- idl/resizeResponse.ml | 13 +++++++++++++ xenvm-local-allocator/local_allocator.ml | 13 +++++++++++-- xenvm/lvresize.ml | 16 +++++++++++++++- 4 files changed, 40 insertions(+), 4 deletions(-) create mode 100644 idl/resizeResponse.ml diff --git a/_oasis b/_oasis index 3c1701f..3c71b1c 100644 --- a/_oasis +++ b/_oasis @@ -13,7 +13,7 @@ Library xenvmidl CompiledObject: best Path: idl Findlibname: xenvmidl - Modules: Xenvm_interface, Xenvm_client, Log, Result, Errors, ResizeRequest + Modules: Xenvm_interface, Xenvm_client, Log, Result, Errors, ResizeRequest, ResizeResponse BuildDepends: rpclib, rpclib.syntax, sexplib, sexplib.syntax, lvm, cohttp.lwt, threads, mirage-block-unix, devmapper, bisect Executable "xenvmd" diff --git a/idl/resizeResponse.ml b/idl/resizeResponse.ml new file mode 100644 index 0000000..f5018b4 --- /dev/null +++ b/idl/resizeResponse.ml @@ -0,0 +1,13 @@ +open Sexplib.Std + +module T = struct + type t = + | Device_mapper_device_does_not_exist of string + | Request_for_no_segments of int64 + | Success + with sexp + (** Response from xenvm-local-allocator to xenvm *) +end + +include SexpToCstruct.Make(T) +include T diff --git a/xenvm-local-allocator/local_allocator.ml b/xenvm-local-allocator/local_allocator.ml index 2555a44..ad11041 100644 --- a/xenvm-local-allocator/local_allocator.ml +++ b/xenvm-local-allocator/local_allocator.ml @@ -395,7 +395,7 @@ let main config daemon socket journal fromLVM toLVM = (* Log this kind of error. This tapdisk may block but at least others will keep going *) error "Couldn't find device mapper device: %s" device; - return () + return (ResizeResponse.Device_mapper_device_does_not_exist device) | Some data_volume -> let sector_size = Int64.of_int sector_size in let current = Int64.mul sector_size (sizeof data_volume) in @@ -408,7 +408,7 @@ let main config daemon socket journal fromLVM toLVM = Int64.(div (add x extent_b) extent_b) in if nr_extents <= 0L then begin error "Request for %Ld (<= 0) segments" nr_extents; - return () + return (ResizeResponse.Request_for_no_segments nr_extents) end else begin FreePool.remove nr_extents >>= fun extents -> @@ -421,6 +421,8 @@ let main config daemon socket journal fromLVM toLVM = (* The operation is now in the journal *) wait () (* The operation is now complete *) + >>= fun () -> + return ResizeResponse.Success end ) ) in @@ -433,6 +435,8 @@ let main config daemon socket journal fromLVM toLVM = >>= fun device -> let r = { ResizeRequest.local_dm_name = device; action = `IncreaseBy 1L } in handler r + >>= fun resp -> + Lwt_io.write_line Lwt_io.stdout (Sexplib.Sexp.to_string_hum (ResizeResponse.sexp_of_t resp)) >>= fun () -> stdin () in debug "Creating Unix domain socket %s" config.Config.socket; @@ -448,11 +452,16 @@ let main config daemon socket journal fromLVM toLVM = Lwt_unix.accept s >>= fun (fd, _) -> let ic = Lwt_io.of_fd ~mode:Lwt_io.input fd in + let oc = Lwt_io.of_fd ~mode:Lwt_io.output ~close:return fd in (* read one line *) Lwt_io.read_line ic >>= fun message -> let r = ResizeRequest.t_of_sexp (Sexplib.Sexp.of_string message) in handler r + >>= fun resp -> + Lwt_io.write_line oc (Sexplib.Sexp.to_string (ResizeResponse.sexp_of_t resp)) + >>= fun () -> + Lwt_io.flush oc >>= fun () -> Lwt_io.close ic >>= fun () -> diff --git a/xenvm/lvresize.ml b/xenvm/lvresize.ml index 0e26495..8ecc408 100644 --- a/xenvm/lvresize.ml +++ b/xenvm/lvresize.ml @@ -50,7 +50,21 @@ let lvresize copts live (vg_name,lv_opt) real_size percent_size = let r = { ResizeRequest.local_dm_name = name; action = size } in Lwt_io.write_line oc (Sexplib.Sexp.to_string (ResizeRequest.sexp_of_t r)) >>= fun () -> - Lwt_io.close oc in + let ic = Lwt_io.of_fd ~mode:Lwt_io.input ~close:return s in + Lwt_io.read_line ic + >>= fun txt -> + let resp = ResizeResponse.t_of_sexp (Sexplib.Sexp.of_string txt) in + Lwt_io.close oc + >>= fun () -> + match resp with + | ResizeResponse.Device_mapper_device_does_not_exist name -> + Printf.fprintf stderr "Device mapper device does not exist: %s\n%!" name; + exit 1 + | ResizeResponse.Request_for_no_segments nr -> + Printf.fprintf stderr "Request for an illegal number of segments: %Ld\n%!" nr; + exit 2 + | ResizeResponse.Success -> + return () in match live, info with | true, Some { Xenvm_common.local_allocator_path = Some allocator } -> if device_is_active then begin From 1bac1e3c150d9b58adecaa6a6beba35c6ae13d8a Mon Sep 17 00:00:00 2001 From: David Scott Date: Thu, 7 May 2015 14:01:53 +0100 Subject: [PATCH 04/11] Regenerate OASIS Signed-off-by: David Scott --- idl/xenvmidl.mldylib | 3 ++- idl/xenvmidl.mllib | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/idl/xenvmidl.mldylib b/idl/xenvmidl.mldylib index 247029d..cbe8baf 100644 --- a/idl/xenvmidl.mldylib +++ b/idl/xenvmidl.mldylib @@ -1,9 +1,10 @@ # OASIS_START -# DO NOT EDIT (digest: d1d1ec0a025cbfc9d6a3157d3e0422d2) +# DO NOT EDIT (digest: 10ee93a04fc3f517b71619e8ea26d853) Xenvm_interface Xenvm_client Log Result Errors ResizeRequest +ResizeResponse # OASIS_STOP diff --git a/idl/xenvmidl.mllib b/idl/xenvmidl.mllib index 247029d..cbe8baf 100644 --- a/idl/xenvmidl.mllib +++ b/idl/xenvmidl.mllib @@ -1,9 +1,10 @@ # OASIS_START -# DO NOT EDIT (digest: d1d1ec0a025cbfc9d6a3157d3e0422d2) +# DO NOT EDIT (digest: 10ee93a04fc3f517b71619e8ea26d853) Xenvm_interface Xenvm_client Log Result Errors ResizeRequest +ResizeResponse # OASIS_STOP From 29d7335405ee26c82db31472a5075708ffe483e7 Mon Sep 17 00:00:00 2001 From: David Scott Date: Thu, 7 May 2015 15:05:24 +0100 Subject: [PATCH 05/11] local allocator: acquire an exclusive lock to prevent multiple startups We create a lock file based on the Unix domain socket path to prevent a second instance starting up. Signed-off-by: David Scott --- _oasis | 2 +- idl/pidfile.ml | 14 ++++++++++++++ xenvm-local-allocator/local_allocator.ml | 2 ++ 3 files changed, 17 insertions(+), 1 deletion(-) create mode 100644 idl/pidfile.ml diff --git a/_oasis b/_oasis index 3c71b1c..5291b3c 100644 --- a/_oasis +++ b/_oasis @@ -13,7 +13,7 @@ Library xenvmidl CompiledObject: best Path: idl Findlibname: xenvmidl - Modules: Xenvm_interface, Xenvm_client, Log, Result, Errors, ResizeRequest, ResizeResponse + Modules: Xenvm_interface, Xenvm_client, Log, Result, Errors, ResizeRequest, ResizeResponse, Pidfile BuildDepends: rpclib, rpclib.syntax, sexplib, sexplib.syntax, lvm, cohttp.lwt, threads, mirage-block-unix, devmapper, bisect Executable "xenvmd" diff --git a/idl/pidfile.ml b/idl/pidfile.ml new file mode 100644 index 0000000..a917ec3 --- /dev/null +++ b/idl/pidfile.ml @@ -0,0 +1,14 @@ +(* We wish to ensure at-most-one copy of the program is started *) + +let write_pid pidfile = + let txt = string_of_int (Unix.getpid ()) in + try + let fd = Unix.openfile pidfile [ Unix.O_WRONLY; Unix.O_CREAT ] 0o0644 in + Unix.lockf fd Unix.F_TLOCK (String.length txt); + let (_: int) = Unix.write fd txt 0 (String.length txt) in + () + with e -> + Printf.fprintf stderr "%s\n" (Printexc.to_string e); + Printf.fprintf stderr "The pidfile %s is locked: you cannot start the program twice!\n" pidfile; + Printf.fprintf stderr "If the process was shutdown cleanly then verify and remove the pidfile.\n%!"; + exit 1 diff --git a/xenvm-local-allocator/local_allocator.ml b/xenvm-local-allocator/local_allocator.ml index ad11041..1208096 100644 --- a/xenvm-local-allocator/local_allocator.ml +++ b/xenvm-local-allocator/local_allocator.ml @@ -287,6 +287,8 @@ let main config daemon socket journal fromLVM toLVM = fromLVM = (match fromLVM with None -> config.Config.fromLVM | Some x -> x); } in debug "Loaded configuration: %s" (Sexplib.Sexp.to_string_hum (Config.sexp_of_t config)); + Pidfile.write_pid (config.Config.socket ^ ".lock"); + if daemon then Lwt_daemon.daemonize (); let t = From c01601b6b9771c90a81efb6158ee5bcd92af411d Mon Sep 17 00:00:00 2001 From: David Scott Date: Thu, 7 May 2015 15:29:08 +0100 Subject: [PATCH 06/11] xenvmd: prevent multiple instances being started up If we are listening on a path, we create a lock file based on the path name. If we are listening on a TCP port, we rely on the bind being exclusive (although the TCP code is deprecated) Signed-off-by: David Scott --- xenvmd/xenvmd.ml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/xenvmd/xenvmd.ml b/xenvmd/xenvmd.ml index 70487b7..9ce3068 100644 --- a/xenvmd/xenvmd.ml +++ b/xenvmd/xenvmd.ml @@ -618,6 +618,13 @@ let run port sock_path config daemon = let config = { config with Config.listenPort = match port with None -> config.Config.listenPort | Some x -> x } in let config = { config with Config.listenPath = match sock_path with None -> config.Config.listenPath | Some x -> Some x } in if daemon then Lwt_daemon.daemonize (); + ( match config.Config.listenPath with + | None -> + (* don't need a lock file because we'll fail to bind to the port *) + () + | Some path -> + info "Writing pidfile to %s" path; + Pidfile.write_pid (path ^ ".lock") ); let t = info "Started with configuration: %s" (Sexplib.Sexp.to_string_hum (Config.sexp_of_t config)); VolumeManager.vgopen ~devices:config.Config.devices From 24f6f785458c2655854b0ac6db399e4a91cd7a90 Mon Sep 17 00:00:00 2001 From: David Scott Date: Thu, 7 May 2015 15:30:11 +0100 Subject: [PATCH 07/11] local allocator: acquire lock file after daemonizing I suspect the daemonize code was closing the fd and releasing the lock. Fixes #67 Signed-off-by: David Scott --- xenvm-local-allocator/local_allocator.ml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/xenvm-local-allocator/local_allocator.ml b/xenvm-local-allocator/local_allocator.ml index 1208096..d4de29b 100644 --- a/xenvm-local-allocator/local_allocator.ml +++ b/xenvm-local-allocator/local_allocator.ml @@ -287,10 +287,10 @@ let main config daemon socket journal fromLVM toLVM = fromLVM = (match fromLVM with None -> config.Config.fromLVM | Some x -> x); } in debug "Loaded configuration: %s" (Sexplib.Sexp.to_string_hum (Config.sexp_of_t config)); - Pidfile.write_pid (config.Config.socket ^ ".lock"); - if daemon then Lwt_daemon.daemonize (); + Pidfile.write_pid (config.Config.socket ^ ".lock"); + let t = Device.read_sector_size config.Config.devices >>= fun sector_size -> From bfdb82ddf8d38c60cfb75f157b8f58d1e3f44d74 Mon Sep 17 00:00:00 2001 From: David Scott Date: Thu, 7 May 2015 15:35:55 +0100 Subject: [PATCH 08/11] Regenerate OASIS Signed-off-by: David Scott --- idl/xenvmidl.mldylib | 3 ++- idl/xenvmidl.mllib | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/idl/xenvmidl.mldylib b/idl/xenvmidl.mldylib index cbe8baf..79eca06 100644 --- a/idl/xenvmidl.mldylib +++ b/idl/xenvmidl.mldylib @@ -1,5 +1,5 @@ # OASIS_START -# DO NOT EDIT (digest: 10ee93a04fc3f517b71619e8ea26d853) +# DO NOT EDIT (digest: 57b376d9a9ea46b70acab11aab5f012c) Xenvm_interface Xenvm_client Log @@ -7,4 +7,5 @@ Result Errors ResizeRequest ResizeResponse +Pidfile # OASIS_STOP diff --git a/idl/xenvmidl.mllib b/idl/xenvmidl.mllib index cbe8baf..79eca06 100644 --- a/idl/xenvmidl.mllib +++ b/idl/xenvmidl.mllib @@ -1,5 +1,5 @@ # OASIS_START -# DO NOT EDIT (digest: 10ee93a04fc3f517b71619e8ea26d853) +# DO NOT EDIT (digest: 57b376d9a9ea46b70acab11aab5f012c) Xenvm_interface Xenvm_client Log @@ -7,4 +7,5 @@ Result Errors ResizeRequest ResizeResponse +Pidfile # OASIS_STOP From 4c98a5591f05e178e8181411190ee097bc3366e9 Mon Sep 17 00:00:00 2001 From: David Scott Date: Thu, 7 May 2015 17:07:43 +0100 Subject: [PATCH 09/11] xenvmd: on a FromLVM state transition Suspended -> Running, resend free blocks We missed one case where this could happen: when initially registering a host where the local allocator has been spawned previously. Signed-off-by: David Scott --- xenvmd/xenvmd.ml | 43 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 35 insertions(+), 8 deletions(-) diff --git a/xenvmd/xenvmd.ml b/xenvmd/xenvmd.ml index 9ce3068..83725a7 100644 --- a/xenvmd/xenvmd.ml +++ b/xenvmd/xenvmd.ml @@ -104,13 +104,19 @@ module FromLVM = struct module R = Shared_block.Ring.Make(Log)(Vg_IO.Volume)(FreeAllocation) let create ~disk () = fatal_error "FromLVM.create" (R.Producer.create ~disk ()) - let rec attach ~name ~disk () = R.Producer.attach ~queue:(name ^ " FromLVM Producer") ~client:"xenvmd" ~disk () >>= function - | `Error `Suspended -> - debug "FromLVM.attach got `Suspended; sleeping"; - Lwt_unix.sleep 5. - >>= fun () -> - attach ~name ~disk () - | x -> fatal_error "FromLVM.attach" (return x) + let attach ~name ~disk () = + let initial_state = ref `Running in + let rec loop () = R.Producer.attach ~queue:(name ^ " FromLVM Producer") ~client:"xenvmd" ~disk () >>= function + | `Error `Suspended -> + debug "FromLVM.attach got `Suspended; sleeping"; + Lwt_unix.sleep 5. + >>= fun () -> + initial_state := `Suspended; + loop () + | x -> fatal_error "FromLVM.attach" (return x) in + loop () + >>= fun x -> + return (!initial_state, x) let state t = fatal_error "FromLVM.state" (R.Producer.state t) let rec push t item = R.Producer.push ~t ~item () >>= function | `Error (`Msg x) -> fatal_error_t (Printf.sprintf "Error pushing to the FromLVM queue: %s" x) @@ -271,7 +277,25 @@ module VolumeManager = struct | `Error _ -> fail (Failure (Printf.sprintf "Failed to open %s" fromLVM)) | `Ok disk -> FromLVM.attach ~name ~disk () - >>= fun from_LVM -> + >>= fun (initial_state, from_LVM) -> + ( if initial_state = `Suspended then begin + debug "The FromLVM queue was already suspended: resending the free blocks"; + ( match Vg_IO.find vg freeLVM with + | Some lv -> return lv + | None -> assert false ) >>= fun lv -> + let allocation = Lvm.Lv.to_allocation (Vg_IO.Volume.metadata_of lv) in + FromLVM.push from_LVM allocation + >>= fun pos -> + FromLVM.advance from_LVM pos + >>= fun () -> + debug "Free blocks pushed"; + return () + end else begin + debug "The FromLVM queue was running: no need to resend the free blocks"; + return () + end ) + >>= fun () -> + debug "querying state"; FromLVM.state from_LVM >>= fun state -> debug "FromLVM queue is currently %s" (match state with `Running -> "Running" | `Suspended -> "Suspended"); @@ -290,12 +314,15 @@ module VolumeManager = struct let to_lvm = List.assoc name !to_LVMs in ToLVM.pop to_lvm >>= fun (pos, items) -> + debug "FromLVM queue %s has %d items" name (List.length items); Lwt_list.iter_s (function { ExpandVolume.volume; segments } -> write (fun vg -> + debug "Expanding volume %s" volume; let id = (Lvm.Vg.LVs.find_by_name volume vg.Lvm.Vg.lvs).Lvm.Lv.id in Lvm.Vg.do_op vg (Lvm.Redo.Op.(LvExpand(id, { lvex_segments = segments }))) ) >>= fun () -> write (fun vg -> + debug "Removing free blocks from %s free LV" name; let (_,freeid) = (List.assoc name !free_LVs) in Lvm.Vg.do_op vg (Lvm.Redo.Op.(LvCrop(freeid, { lvc_segments = segments }))) ) From df6d22dfaf9022f8f602158c3b7361d17a799b4b Mon Sep 17 00:00:00 2001 From: David Scott Date: Thu, 7 May 2015 20:45:29 +0100 Subject: [PATCH 10/11] local allocator: allocate incrementally Previously we would try to allocate everything we needed in one chunk. If the free space isn't available we would block. However xenvmd does not know we need more space and won't give us more, so we deadlock. Instead we allocate as much as we can (up to the amount we really need), expand the volume, thus draining the free pool, which triggers xenvmd to refill it. Fix #69 Signed-off-by: David Scott --- xenvm-local-allocator/local_allocator.ml | 29 +++++++++++++++++++----- 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/xenvm-local-allocator/local_allocator.ml b/xenvm-local-allocator/local_allocator.ml index d4de29b..bf550ff 100644 --- a/xenvm-local-allocator/local_allocator.ml +++ b/xenvm-local-allocator/local_allocator.ml @@ -151,6 +151,8 @@ module FreePool = struct return () ) + (* Allocate up to [nr_extents]. Blocks if there is no space free. Can return + a partial allocation. *) let remove nr_extents = Lwt_mutex.with_lock m (fun () -> @@ -161,10 +163,17 @@ module FreePool = struct | `Ok x -> free := Lvm.Pv.Allocator.sub !free x; return x - | _ -> + | `Error (`OnlyThisMuchFree 0L) -> Lwt_condition.wait ~mutex:m c >>= fun () -> - wait () in + wait () + | `Error (`OnlyThisMuchFree n) -> + begin match Lvm.Pv.Allocator.find !free n with + | `Ok x -> + free := Lvm.Pv.Allocator.sub !free x; + return x + | _ -> assert false + end in wait () ) @@ -392,7 +401,8 @@ let main config daemon socket journal fromLVM toLVM = fun { ResizeRequest.local_dm_name = device; action } -> Lwt_mutex.with_lock m (fun () -> - ( match Devmapper.stat device with + (* We may need to enlarge in multiple chunks if the free pool is depleted *) + let rec expand action = match Devmapper.stat device with | None -> (* Log this kind of error. This tapdisk may block but at least others will keep going *) @@ -414,6 +424,8 @@ let main config daemon socket journal fromLVM toLVM = end else begin FreePool.remove nr_extents >>= fun extents -> + (* This may have allocated short *) + let nr_extents' = Lvm.Pv.Allocator.size extents in let segments, targets = extend_volume vg_device metadata data_volume extents in let _, volume = Mapper.vg_lv_of_name device in let volume = { ExpandVolume.volume; segments } in @@ -424,9 +436,14 @@ let main config daemon socket journal fromLVM toLVM = wait () (* The operation is now complete *) >>= fun () -> - return ResizeResponse.Success - end - ) + let action = match action with + | `Absolute x -> `Absolute x + | `IncreaseBy x -> `IncreaseBy Int64.(sub x (mul nr_extents' (mul sector_size extent_size))) in + if nr_extents = nr_extents' + then return ResizeResponse.Success + else expand action + end in + expand action ) in let ls = Devmapper.ls () in From 33a0273ccff389cdedba6de8deb784347d163eb5 Mon Sep 17 00:00:00 2001 From: David Scott Date: Thu, 7 May 2015 20:47:11 +0100 Subject: [PATCH 11/11] local allocator: new segments and targets should be stored in ascending order In particular device mapper likes its targets to be posted in order of virtual address. Signed-off-by: David Scott --- xenvm-local-allocator/local_allocator.ml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xenvm-local-allocator/local_allocator.ml b/xenvm-local-allocator/local_allocator.ml index bf550ff..ebad27c 100644 --- a/xenvm-local-allocator/local_allocator.ml +++ b/xenvm-local-allocator/local_allocator.ml @@ -278,7 +278,7 @@ let extend_volume device vg lv extents = (Lvm.Pv.Name.to_string pvname); next_sector, segments, targets ) (next_sector, [], []) extents in - segments, targets + List.rev segments, List.rev targets let stat x = match Devmapper.stat x with