Skip to content

Commit

Permalink
Merge pull request #125 from gobindadas/single-node-fix
Browse files Browse the repository at this point in the history
Added cli support for singlenode deployment
  • Loading branch information
gobindadas committed Dec 14, 2020
2 parents 5cf5f73 + b3a49ec commit 2b61849
Show file tree
Hide file tree
Showing 2 changed files with 167 additions and 1 deletion.
7 changes: 6 additions & 1 deletion playbooks/hc-ansible-deployment/README
Expand Up @@ -21,6 +21,11 @@ How to run:
For gluster deployment clean up:
# ansible-playbook -i gluster_inventory.yml tasks/gluster_cleanup.yml

For single node HC deployment, there is separate inventory called "single_node_gluster_inventory.yml".
How to run:
# cd hc-ansible-deployment
# ansible-playbook -i single_node_gluster_inventory.yml hc_deployment.yml --extra-vars='@he_gluster_vars.json'

ovirt_repo_release_rpm value should be particular release repo if you are installing role from ansible-galaxy

-> The below variables are required only when user wants to install pkgs using subscription-manager.
Expand All @@ -43,7 +48,7 @@ ovirt_repo_release_rpm value should be particular release repo if you are instal
2. Create a backup copy of the template inventory file
3. Edit the inventory file right set of values
4. Encrypt the inventory with ansible-vault # ansible-vault encrypt luks_tang_inventory.yml
5. Run the playbook to configure NBDE with LUKS device and tang server:
5. Run the playbook to configure NBDE with LUKS device and tang server:
# cd hc-ansible-playbook
# ansible-playbook -i luks_tang_inventory.yml tasks/luks_tang_setup.yml --tags luksencrypt,bindtang --ask-vault-pass
6. If blaclisting the devices is also required, include 'blacklistdevices' along with the tags
Expand Down
161 changes: 161 additions & 0 deletions playbooks/hc-ansible-deployment/single_node_gluster_inventory.yml
@@ -0,0 +1,161 @@
hc_nodes:
hosts:
# Host1
<host1-backend-network-FQDN>:

# Blacklist multipath devices which are used for gluster bricks
# If you omit blacklist_mpath_devices it means all device will be whitelisted.
# If the disks are not blacklisted, and then its taken that multipath configuration
# exists in the server and one should provide /dev/mapper/<WWID> instead of /dev/sdx
blacklist_mpath_devices:
- sdb
- sdc

# Enable this section 'gluster_infra_vdo', if dedupe & compression is
# required on that storage volume.
# The variables refers to:
# name - VDO volume name to be used
# device - Disk name on which VDO volume to created
# logicalsize - Logical size of the VDO volume.This value is 10 times
# the size of the physical disk
# emulate512 - VDO device is made as 4KB block sized storage volume(4KN)
# slabsize - VDO slab size. If VDO logical size >= 1000G then
# slabsize is 32G else slabsize is 2G
#
# Following VDO values are as per recommendation and treated as constants:
# blockmapcachesize - 128M
# writepolicy - auto
#
# gluster_infra_vdo:
# - { name: 'vdo_sdc', device: '/dev/sdc', logicalsize: '5000G', emulate512: 'off', slabsize: '32G',
# blockmapcachesize: '128M', writepolicy: 'auto' }
# - { name: 'vdo_sdd', device: '/dev/sdd', logicalsize: '3000G', emulate512: 'off', slabsize: '32G',
# blockmapcachesize: '128M', writepolicy: 'auto' }

# When dedupe and compression is enabled on the device,
# use pvname for that device as '/dev/mapper/<vdo_device_name>
#
# The variables refers to:
# vgname - VG to be created on the disk
# pvname - Physical disk (/dev/sdc) or VDO volume (/dev/mapper/vdo_sdc)
gluster_infra_volume_groups:
- vgname: gluster_vg_sdb
pvname: /dev/sdb
- vgname: gluster_vg_sdc
pvname: /dev/mapper/vdo_sdc
- vgname: gluster_vg_sdd
pvname: /dev/mapper/vdo_sdd

gluster_infra_mount_devices:
- path: /gluster_bricks/engine
lvname: gluster_lv_engine
vgname: gluster_vg_sdb
- path: /gluster_bricks/data
lvname: gluster_lv_data
vgname: gluster_vg_sdc
- path: /gluster_bricks/vmstore
lvname: gluster_lv_vmstore
vgname: gluster_vg_sdd

# 'thinpoolsize' is the sum of sizes of all LVs to be created on that VG
# In the case of VDO enabled, 'thinpoolsize' is 10 times the sum of sizes
# of all LVs to be created on that VG. Recommended values for
# 'poolmetadatasize' is 16GB and that should be considered exclusive of
# 'thinpoolsize'
gluster_infra_thinpools:
- {vgname: 'gluster_vg_sdc', thinpoolname: 'gluster_thinpool_sdc', thinpoolsize: '500G', poolmetadatasize: '16G'}
- {vgname: 'gluster_vg_sdd', thinpoolname: 'gluster_thinpool_sdd', thinpoolsize: '500G', poolmetadatasize: '16G'}

# Enable the following section if LVM cache is to enabled
# Following are the variables:
# vgname - VG with the slow HDD device that needs caching
# cachedisk - Comma separate value of slow HDD and fast SSD
# In this example, /dev/sdb is the slow HDD, /dev/sde is fast SSD
# cachelvname - LV cache name
# cachethinpoolname - Thinpool to which the fast SSD to be attached
# cachelvsize - Size of cache data LV. This is the SSD_size - (1/1000) of SSD_size
# 1/1000th of SSD space will be used by cache LV meta
# cachemode - writethrough or writeback
# gluster_infra_cache_vars:
# - vgname: gluster_vg_sdb
# cachedisk: /dev/sdb,/dev/sde
# cachelvname: cachelv_thinpool_sdb
# cachethinpoolname: gluster_thinpool_sdb
# cachelvsize: '250G'
# cachemode: writethrough

# Only the engine brick needs to be thickly provisioned
# Engine brick requires 100GB of disk space
gluster_infra_thick_lvs:
- vgname: gluster_vg_sdb
lvname: gluster_lv_engine
size: 100G

gluster_infra_lv_logicalvols:
- vgname: gluster_vg_sdc
thinpool: gluster_thinpool_sdc
lvname: gluster_lv_data
lvsize: 200G
- vgname: gluster_vg_sdd
thinpool: gluster_thinpool_sdd
lvname: gluster_lv_vmstore
lvsize: 200G

# Common configurations
vars:
# In case of IPv6 based deployment "gluster_features_enable_ipv6" needs to be enabled,below line needs to be uncommented, like:
# gluster_features_enable_ipv6: true

cluster_nodes:
- <host1-backend-network-fqdn>
gluster_features_hci_volume_options:
{ group: 'distributed-virt',
storage.owner-uid: '36',
storage.owner-gid: '36',
performance.strict-o-direct: 'on',
network.remote-dio: 'off',
network.ping-timeout: '20',
}

gluster_features_hci_cluster: "{{ cluster_nodes }}"
gluster_features_hci_volumes:
- volname: engine
brick: /gluster_bricks/engine/engine
- volname: data
brick: /gluster_bricks/data/data
- volname: vmstore
brick: /gluster_bricks/vmstore/vmstore

# Firewall setup
gluster_infra_fw_ports:
- 2049/tcp
- 54321/tcp
- 5900-6923/tcp
- 16514/tcp
- 5666/tcp
- 16514/tcp
gluster_infra_fw_permanent: true
gluster_infra_fw_state: enabled
gluster_infra_fw_zone: public
gluster_infra_fw_services:
- glusterfs
# Allowed values for 'gluster_infra_disktype' - RAID6, RAID5, JBOD
gluster_infra_disktype: JBOD

# 'gluster_infra_diskcount' is the number of data disks in the RAID set.
# Note for JBOD its 1
gluster_infra_diskcount: 1

gluster_infra_stripe_unit_size: 256
gluster_features_force_varlogsizecheck: false
gluster_set_selinux_labels: true

## Auto add storage domain vars
gluster:
hosts:
<host1-frontend-network-fqdn>:

vars:
storage_domains:
- {"name":"data","host":"host1-frontend-network-fqdn","address":"host1-backend-network-fqdn","path":"/data","function":"data","mount_options":""}
- {"name":"vmstore","host":"host1-frontend-network-fqdn","address":"host1-backend-network-fqdn","path":"/vmstore","function":"data","mount_options":""}

0 comments on commit 2b61849

Please sign in to comment.