Skip to content

Commit

Permalink
Merge 461c348 into 86104f3
Browse files Browse the repository at this point in the history
  • Loading branch information
imobachgs committed Oct 10, 2018
2 parents 86104f3 + 461c348 commit ffbad99
Show file tree
Hide file tree
Showing 26 changed files with 1,085 additions and 220 deletions.
21 changes: 14 additions & 7 deletions src/lib/y2storage/autoinst_profile/drive_section.rb
Expand Up @@ -49,7 +49,8 @@ def self.attributes
{ name: :pesize },
{ name: :type },
{ name: :use },
{ name: :skip_list }
{ name: :skip_list },
{ name: :raid_options }
]
end

Expand Down Expand Up @@ -98,6 +99,10 @@ def self.attributes
# @!attribute skip_list
# @return [Array<SkipListSection] collection of <skip_list> entries

# @!attribute raid_options
# @return [RaidOptionsSection] RAID options
# @see RaidOptionsSection

# Constructor
#
# @param parent [#parent,#section_name] parent section
Expand All @@ -119,10 +124,13 @@ def init_from_hashes(hash)
@use = use_value_from_string(hash["use"]) if hash["use"]
@partitions = partitions_from_hash(hash)
@skip_list = SkipListSection.new_from_hashes(hash.fetch("skip_list", []), self)
if hash["raid_options"]
@raid_options = RaidOptionsSection.new_from_hashes(hash["raid_options"], self)
end
end

def default_type_for(hash)
return :CT_MD if hash["device"] == "/dev/md"
return :CT_MD if hash["device"].to_s.start_with?("/dev/md")
:CT_DISK
end

Expand Down Expand Up @@ -166,10 +174,8 @@ def init_from_device(device)
#
# @return [String] MD RAID device name
def name_for_md
# TODO: a proper profile will always include one partition for each MD
# drive, but as soon as we introduce error handling and reporting we
# should do something if #partitions is empty (wrong profile).
partitions.first.name_for_md
return partitions.first.name_for_md if device == "/dev/md"
device
end

# Content of the section in the format used by the AutoYaST modules
Expand Down Expand Up @@ -279,9 +285,10 @@ def init_from_vg(vg)
# @return [Boolean]
def init_from_md(md)
@type = :CT_MD
@device = "/dev/md"
@device = md.name
@partitions = partitions_from_collection([md])
@enable_snapshots = enabled_snapshots?([md.filesystem])
@raid_options = RaidOptionsSection.new_from_storage(md)
true
end

Expand Down
16 changes: 15 additions & 1 deletion src/lib/y2storage/planned/devices_collection.rb
Expand Up @@ -85,10 +85,24 @@ def append(devices)
#
# @return [Array<Planned::Partition>]
def partitions
@partitions ||= devices.select { |d| d.is_a?(Planned::Partition) } +
@partitions ||= disk_partitions + md_partitions
end

# Returns the list of planned partitions for disks devices
#
# @return [Array<Planned::Partition>]
def disk_partitions
@disk_partitions ||= devices.select { |d| d.is_a?(Planned::Partition) } +
disks.map(&:partitions).flatten
end

# Returns the list of planned partitions for software RAID devices
#
# @return [Array<Planned::Partition>]
def md_partitions
@md_partitions ||= mds.map(&:partitions).flatten
end

# Returns the list of planned disks
#
# @return [Array<Planned::Disk>]
Expand Down
14 changes: 14 additions & 0 deletions src/lib/y2storage/planned/has_size.rb
Expand Up @@ -42,6 +42,10 @@ module HasSize
# @return [DiskSize] maximum acceptable size
attr_accessor :max_size

# @return [Integer] percentage of the container's size to be used by this volume (used
# when the container's size cannot be determined in advance)
attr_accessor :percent_size

# @return [Float] factor used to distribute the extra space
# between devices
attr_accessor :weight
Expand All @@ -59,10 +63,20 @@ def min_size
(respond_to?(:reuse_name) && reuse_name) ? DiskSize.zero : @min_size
end

# Determines the size which should take within a container device
#
# @param container [BlkDevice] Container device
# @return [DiskSize]
def size_in(container)
(container.size * percent_size / 100).floor(container.block_size)
end

alias_method :min, :min_size
alias_method :max, :max_size
alias_method :percent, :percent_size
alias_method :min=, :min_size=
alias_method :max=, :max_size=
alias_method :percent=, :percent_size=

# Class methods for the mixin
module ClassMethods
Expand Down
4 changes: 0 additions & 4 deletions src/lib/y2storage/planned/lvm_lv.rb
Expand Up @@ -41,10 +41,6 @@ class LvmLv < Device
# @return [String] name to use for Y2Storage::LvmLv#lv_name
attr_accessor :logical_volume_name

# @return [Integer] percentage of the volume group size to be used for
# this LV
attr_accessor :percent_size

# @return [LvType] Logical volume type
attr_accessor :lv_type

Expand Down
7 changes: 7 additions & 0 deletions src/lib/y2storage/planned/md.rb
Expand Up @@ -56,6 +56,12 @@ class Md < Device
# @return [Array<String>] sorted list of device names
attr_accessor :devices_order

# @return [Y2Storage::PartitionTables::Type] Partition table type
attr_accessor :ptable_type

# @return [Array<Planned::Partition>] List of planned partitions
attr_accessor :partitions

# Constructor.
#
def initialize(name: nil)
Expand All @@ -65,6 +71,7 @@ def initialize(name: nil)
initialize_can_be_encrypted
initialize_can_be_pv
@name = name
@partitions = []
end

# Adds the provided block devices to the existing MD array
Expand Down
7 changes: 7 additions & 0 deletions src/lib/y2storage/planned/partition.rb
Expand Up @@ -78,6 +78,13 @@ def initialize(mount_point, filesystem_type = nil)
@primary = false
end

# Determines whether the partition is used as part of a VG or a MD
#
# @return [Boolean]
def component?
lvm_pv? || md_member?
end

def self.to_string_attrs
[
:mount_point, :reuse_name, :reuse_sid, :min_size, :max_size,
Expand Down
1 change: 1 addition & 0 deletions src/lib/y2storage/proposal.rb
Expand Up @@ -42,3 +42,4 @@ module Proposal
require "y2storage/proposal/devices_planner"
require "y2storage/proposal/space_maker"
require "y2storage/proposal/initial_strategies"
require "y2storage/proposal/partition_table_creator"
76 changes: 62 additions & 14 deletions src/lib/y2storage/proposal/autoinst_devices_creator.rb
Expand Up @@ -138,7 +138,7 @@ def best_distribution(planned_partitions, disk_names)
#
# @return [Array<Array<Planned::Partition>, Array<Planned::Partition>, CreatorResult>]
def process_partitions(planned_devices, disk_names)
planned_partitions = planned_devices.partitions
planned_partitions = sized_partitions(planned_devices.disk_partitions)
parts_to_reuse, parts_to_create = planned_partitions.partition(&:reuse?)
creator_result = create_partitions(parts_to_create, disk_names)
reuse_devices(parts_to_reuse, creator_result.devicegraph)
Expand All @@ -156,8 +156,8 @@ def process_partitions(planned_devices, disk_names)
def process_mds(planned_devices, devs_to_reuse, creator_result)
mds_to_reuse, mds_to_create = planned_devices.mds.partition(&:reuse?)
devs_to_reuse_in_md = reusable_by_md(devs_to_reuse)
creator_result.merge!(create_mds(mds_to_create, creator_result, devs_to_reuse_in_md))
mds_to_reuse.each { |i| i.reuse!(creator_result.devicegraph) }
creator_result.merge!(create_mds(planned_devices.mds, creator_result, devs_to_reuse_in_md))
reuse_mds(mds_to_reuse, creator_result)

[mds_to_create, mds_to_reuse, creator_result]
end
Expand All @@ -173,7 +173,7 @@ def process_vgs(planned_devices, devs_to_reuse, creator_result)
planned_vgs = planned_devices.vgs
creator_result.merge!(set_up_lvm(planned_vgs, creator_result, devs_to_reuse))
vgs_to_reuse = planned_vgs.select(&:reuse?)
reuse_vgs(vgs_to_reuse, creator_result.devicegraph)
creator_result = reuse_vgs(vgs_to_reuse, creator_result)

[planned_vgs, creator_result]
end
Expand Down Expand Up @@ -245,7 +245,7 @@ def create_logical_volumes(devicegraph, vg, pvs)
lvm_creator.create_volumes(new_vg, pvs)
end

# Reuses partitions or logical volumes for the given devicegraph
# Reuses partitions for the given devicegraph
#
# Shrinking partitions/logical volumes should be processed first in order to free
# some space for growing ones.
Expand All @@ -260,11 +260,25 @@ def reuse_devices(reused_devices, devicegraph)
# Reuses volume groups for the given devicegraph
#
# @param reused_vgs [Array<Planned::LvmVg>] Volume groups to reuse
# @param devicegraph [Devicegraph] Devicegraph to reuse partitions
def reuse_vgs(reused_vgs, devicegraph)
reused_vgs.each do |vg|
vg.reuse!(devicegraph)
reuse_devices(vg.all_lvs.select(&:reuse?), devicegraph)
# @param previous_result [Proposal::CreatorResult] Result containing the devicegraph
# to work on
def reuse_vgs(reused_vgs, previous_result)
reused_vgs.each_with_object(previous_result) do |vg, result|
lvm_creator = Proposal::LvmCreator.new(result.devicegraph)
result.merge!(lvm_creator.reuse_volumes(vg))
end
end

# Reuses MD RAIDs for the given devicegraph
#
# @param reused_mds [Array<Planned::Md>] Volume groups to reuse
# @param previous_result [Proposal::CreatorResult] Starting point
# to work on
# @return [Proposal::CreatorResult] Result containing the reused MD RAID devices
def reuse_mds(reused_mds, previous_result)
reused_mds.each_with_object(previous_result) do |md, result|
md_creator = Proposal::MdCreator.new(result.devicegraph)
result.merge!(md_creator.reuse_partitions(md))
end
end

Expand All @@ -273,17 +287,34 @@ def reuse_vgs(reused_vgs, devicegraph)
# @param mds [Array<Planned::Md>] List of planned MD arrays to create
# @param previous_result [Proposal::CreatorResult] Starting point
# @param devs_to_reuse [Array<Planned::Partition, Planned::StrayBlkDevice>] List of devices
# to reuse.
# to reuse
# @return [Proposal::CreatorResult] Result containing the specified MD RAIDs
def create_mds(mds, previous_result, devs_to_reuse)
mds.reduce(previous_result) do |result, md|
md_creator = Proposal::MdCreator.new(result.devicegraph)
devices = previous_result.created_names { |d| d.raid_name == md.name }
devices = result.created_names { |d| d.raid_name == md.name }
devices += devs_to_reuse.select { |d| d.raid_name == md.name }.map(&:reuse_name)
result.merge(md_creator.create_md(md, devices))
result.merge(create_md(result.devicegraph, md, devices))
end
end

# Create a MD RAID
#
# @param devicegraph [Devicegraph] Starting devicegraph
# @param md [Planned::Md] List of planned MD arrays to create
# @param devices [Array<Planned::Device>] List of devices to include in the RAID
# @return [Proposal::CreatorResult] Result containing the specified RAID
#
# @raise NoDiskSpaceError
def create_md(devicegraph, md, devices)
md_creator = Proposal::MdCreator.new(devicegraph)
md_creator.create_md(md, devices)
rescue NoDiskSpaceError
md_creator = Proposal::MdCreator.new(devicegraph)
new_md = md.clone
new_md.partitions = flexible_devices(md.partitions)
md_creator.create_md(new_md, devices)
end

# Return a new planned devices with flexible limits
#
# The min_size is removed and a proportional weight is set for every device.
Expand All @@ -305,6 +336,23 @@ def flexible_devices(devices)
def reusable_by_md(planned_devices)
planned_devices.select { |d| d.respond_to?(:raid_name) }
end

# Returns a list of planned partitions adjusting the size
#
# All partitions which sizes are specified as percentage will get their minimal and maximal
# sizes adjusted.
#
# @param planned_partitions [Array<Planned::Partition>] List of planned partitions
# @return [Array<Planned::Partition>] New list of planned partitions with adjusted sizes
def sized_partitions(planned_partitions)
planned_partitions.map do |part|
new_part = part.clone
next new_part unless new_part.percent_size
disk = original_graph.find_by_name(part.disk)
new_part.max = new_part.min = new_part.size_in(disk)
new_part
end
end
end
end
end

0 comments on commit ffbad99

Please sign in to comment.