/
ceph
executable file
·1223 lines (1024 loc) · 44.7 KB
/
ceph
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/bin/bash
#
# lib/ceph
# Functions to control the configuration
# and operation of the **Ceph** storage service
# Dependencies:
#
# - ``functions`` file
# - ``CEPH_DATA_DIR`` or ``DATA_DIR`` must be defined
# ``stack.sh`` calls the entry points in this order:
#
# - install_ceph
# - configure_ceph
# - start_ceph
# - stop_ceph
# - cleanup_ceph
# - cleanup_containerized_ceph
# Save trace setting
XTRACE=$(set +o | grep xtrace)
set +o xtrace
# Defaults
# --------
TEST_MASTER=$(trueorfalse False TEST_MASTER)
CEPH_RELEASE=${CEPH_RELEASE:-pacific}
GANESHA_RELEASE=${GANESHA_RELEASE:-'unspecified'}
# Remove "v" and "-stable" prefix/suffix tags
GANESHA_RELEASE=$(echo $GANESHA_RELEASE | sed -e "s/^v//" -e "s/-stable$//")
if [[ "$MANILA_CEPH_DRIVER" == "cephfsnfs" && "$GANESHA_RELEASE" == "unspecified" ]]; then
# default ganesha release based on ceph release
case $CEPH_RELEASE in
pacific)
GANESHA_RELEASE='3.5' ;;
*)
GANESHA_RELEASE='4.0' ;;
esac
fi
# Deploy a Ceph demo container instead of a non-containerized version
CEPH_CONTAINERIZED=$(trueorfalse False CEPH_CONTAINERIZED)
# Set ``CEPH_DATA_DIR`` to the location of Ceph drives and objects.
# Default is the common DevStack data directory.
CEPH_DATA_DIR=${CEPH_DATA_DIR:-/var/lib/ceph}
CEPH_DISK_IMAGE=${CEPH_DISK_IMAGE:-${CEPH_DATA_DIR}/drives/images/ceph.img}
# Set ``CEPH_CONF_DIR`` to the location of the configuration files.
# Default is ``/etc/ceph``.
CEPH_CONF_DIR=${CEPH_CONF_DIR:-/etc/ceph}
# DevStack will create a loop-back disk formatted as XFS to store the
# Ceph data. Set ``CEPH_LOOPBACK_DISK_SIZE`` to the disk size in
# kilobytes.
VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-8GB}
CEPH_LOOPBACK_DISK_SIZE_DEFAULT=${CEPH_LOOPBACK_DISK_SIZE_DEFAULT:-$VOLUME_BACKING_FILE_SIZE}
CEPH_LOOPBACK_DISK_SIZE=\
${CEPH_LOOPBACK_DISK_SIZE:-$CEPH_LOOPBACK_DISK_SIZE_DEFAULT}
# Common
CEPH_FSID=$(uuidgen)
CEPH_CONF_FILE=${CEPH_CONF_DIR}/ceph.conf
MDS_ID=${MDS_ID:-a}
MGR_ID=${MGR_ID:-x}
# RBD configuration defaults
if [[ ${DISTRO} =~ (bionic|xenial) ]]; then
CEPH_RBD_DEFAULT_FEATURES=${CEPH_RBD_DEFAULT_FEATURES:-"layering, exclusive-lock"}
else
CEPH_RBD_DEFAULT_FEATURES=${CEPH_RBD_DEFAULT_FEATURES:-"layering, exclusive-lock, object-map, fast-diff"}
fi
# Glance
GLANCE_CEPH_USER=${GLANCE_CEPH_USER:-glance}
GLANCE_CEPH_POOL=${GLANCE_CEPH_POOL:-images}
GLANCE_CEPH_POOL_PG=${GLANCE_CEPH_POOL_PG:-8}
GLANCE_CEPH_POOL_PGP=${GLANCE_CEPH_POOL_PGP:-8}
GLANCE_RGW_BACKEND=${GLANCE_RGW_BACKEND:-False}
# Nova
NOVA_CEPH_POOL=${NOVA_CEPH_POOL:-vms}
NOVA_CEPH_POOL_PG=${NOVA_CEPH_POOL_PG:-8}
NOVA_CEPH_POOL_PGP=${NOVA_CEPH_POOL_PGP:-8}
# Cinder
CINDER_CEPH_POOL=${CINDER_CEPH_POOL:-volumes}
CINDER_CEPH_POOL_PG=${CINDER_CEPH_POOL_PG:-8}
CINDER_CEPH_POOL_PGP=${CINDER_CEPH_POOL_PGP:-8}
CINDER_CEPH_USER=${CINDER_CEPH_USER:-cinder}
CINDER_CEPH_UUID=${CINDER_CEPH_UUID:-$(uuidgen)}
# Manila
CEPHFS_POOL_PG=${CEPHFS_POOL_PG:-8}
# Multiple filesystems enable more than one devstack to share
# the same REMOTE_CEPH cluster. Note that in addition to setting
# CEPHFS_MULTIPLE_FILESYSTEMS and REMOTE_CEPH, each devstack
# needs to set distinct values for CEPHFS_FILESYSTEM,
# CEPHFS_METADATA_POOL, and CEPHFS_DATA_POOL.
CEPHFS_MULTIPLE_FILESYSTEMS=${CEPHFS_MULTIPLE_FILESYSTEMS:-False}
CEPHFS_FILESYSTEM=${CEPHFS_FILESYSTEM:-cephfs}
CEPHFS_METADATA_POOL=${CEPHFS_METADATA_POOL:-cephfs_metadata}
CEPHFS_DATA_POOL=${CEPHFS_DATA_POOL:-cephfs_data}
MANILA_CEPH_DRIVER=${MANILA_CEPH_DRIVER:-cephfsnative}
MANILA_CEPH_USER=${MANILA_CEPH_USER:-manila}
# Allows driver to store NFS-Ganesha exports and export counter as
# RADOS objects in CephFS's data pool. This needs NFS-Ganesha v2.5.4 or later,
# Ceph v12.2.2 or later, and OpenStack Queens or later.
MANILA_CEPH_GANESHA_RADOS_STORE=${MANILA_CEPH_GANESHA_RADOS_STORE:-True}
# Set ``CEPH_REPLICAS`` to configure how many replicas are to be
# configured for your Ceph cluster. By default we are configuring
# only one replica since this is way less CPU and memory intensive. If
# you are planning to test Ceph replication feel free to increase this value
CEPH_REPLICAS=${CEPH_REPLICAS:-1}
CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS})
# Rados gateway
CEPH_RGW_PORT=${CEPH_RGW_PORT:-8080}
CEPH_RGW_IDENTITY_API_VERSION=${CEPH_RGW_IDENTITY_API_VERSION:-3}
CEPH_RGW_KEYSTONE_SSL=$(trueorfalse False CEPH_RGW_KEYSTONE_SSL)
# iSCSI defaults
CEPH_ISCSI_TARGET_IQN=${CEPH_ISCSI_TARGET_IQN:-iqn.1993-08.org.opendev:01:a9aa4032d2c1}
CEPH_ISCSI_API_USER=${CEPH_ISCSI_API_USER:-openstack}
CEPH_ISCSI_API_PASSWORD=${CEPH_ISCSI_API_PASSWORD:-openstack}
CEPH_ISCSI_API_HOST=${CEPH_ISCSI_API_HOST:-$SERVICE_HOST}
CEPH_ISCSI_API_PORT=${CEPH_ISCSI_API_PORT:-5002}
CEPH_ISCSI_GATEWAY_CFG=${CEPH_CONF_DIR}/iscsi-gateway.cfg
CEPH_ISCSI_MINIMUM_GATEWAYS=${CEPH_ISCSI_MINIMUM_GATEWAYS:-1}
# gwcli requires a pool named rbd
CEPH_ISCSI_POOL="rbd"
CEPH_ISCSI_POOL_PG=${CEPH_ISCSI_POOL_PG:-8}
# Ceph REST API (for containerized version only)
# Default is 5000, but Keystone already listens on 5000
CEPH_REST_API_PORT=${CEPH_REST_API_PORT:-5001}
# Set minimum client version
CEPH_MIN_CLIENT_VERSION=${CEPH_MIN_CLIENT_VERSION}
# Connect to an existing Ceph cluster
REMOTE_CEPH=$(trueorfalse False REMOTE_CEPH)
REMOTE_CEPH_ADMIN_KEY_PATH=\
${REMOTE_CEPH_ADMIN_KEY_PATH:-$CEPH_CONF_DIR/ceph.client.admin.keyring}
REMOTE_CEPH_RGW=$(trueorfalse False REMOTE_CEPH_RGW)
if [[ "$TARGET_BRANCH" =~ stable/(ocata|pike) ]]; then
# not supported before Queens
ATTACH_ENCRYPTED_VOLUME_AVAILABLE=False
fi
# Set INIT_SYSTEM to upstart, systemd, or init. In our domain it should be
# safe to assume that if the init system is not upstart or systemd that it
# is sysvinit rather than other theoretical possibilities like busybox.
INIT_SYSTEM=$(init --version 2>/dev/null | grep -qs upstart && echo upstart \
|| cat /proc/1/comm)
# Functions
# ------------
# Containerized Ceph
function deploy_containerized_ceph {
install_package docker docker.io ceph-common
DOCKER_EXEC="docker exec ceph-demo"
initial_configure_ceph
sudo docker run -d \
--name ceph-demo \
--net=host \
-v ${CEPH_CONF_DIR}:${CEPH_CONF_DIR} \
-v ${CEPH_DATA_DIR}:${CEPH_DATA_DIR} \
-e MON_IP=${SERVICE_HOST} \
-e CEPH_PUBLIC_NETWORK=$(grep -o ${SERVICE_HOST%??}0/.. /proc/net/fib_trie | head -1) \
-e RGW_CIVETWEB_PORT=${CEPH_RGW_PORT} \
-e RESTAPI_PORT=${CEPH_REST_API_PORT} \
ceph/demo
# wait for ceph to be healthy then continue
ceph_status
}
function wait_for_daemon {
timeout=20
daemon_to_test=$1
while [ $timeout -ne 0 ]; do
if eval $daemon_to_test; then
return 0
fi
sleep 1
let timeout=timeout-1
done
return 1
}
function ceph_status {
echo "Waiting for Ceph to be ready"
return $(wait_for_daemon "sudo docker exec ceph-demo ceph health | grep -sq HEALTH_OK")
}
# is_ceph_enabled_for_service() - checks whether the OpenStack service
# specified as an argument is enabled with Ceph as its storage backend.
function is_ceph_enabled_for_service {
local config config_name enabled service
enabled=1
service=$1
# Construct the global variable ENABLE_CEPH_.* corresponding to a
# $service.
config_name=ENABLE_CEPH_$(echo $service | \
tr '[:lower:]' '[:upper:]' | tr '-' '_')
config=$(eval echo "\$$config_name")
if (is_service_enabled $service) && [[ $config == 'True' ]]; then
enabled=0
fi
return $enabled
}
# _get_ceph_version() - checks version of Ceph mon daemon or CLI based on an
# argument. Checking mon daemon version requires the mon daemon to be up
# and healthy.
function _get_ceph_version {
local ceph_version_str
local mon_started
if [[ $1 == 'cli' ]]; then
# ceph --version show CLI version
ceph_version_str=$(sudo ceph --version | cut -d ' ' -f 3 | \
cut -d '.' -f 1,2)
elif [[ $1 == 'mon' ]]; then
# ceph version show mon daemon version
mon_started=$(wait_for_daemon "sudo systemctl is-active --quiet ceph-mon@$(hostname)")
if $mon_started; then
ceph_version_str=$(sudo ceph version | cut -d ' ' -f 3 | \
cut -f 1,2 -d '.')
else
die $LINENO "ceph-mon@${hostname} is not running and it's not possible to \
retrieve it's version"
fi
else
die $LINENO "Invalid argument. The get_ceph_version function needs \
an argument that can be 'cli' or 'mon'."
fi
echo $ceph_version_str
}
# import_libvirt_secret_ceph() - Imports Cinder user key into libvirt
# so it can connect to the Ceph cluster while attaching a Cinder block device
function import_libvirt_secret_ceph {
cat <<EOF | sudo tee secret.xml>/dev/null
<secret ephemeral='no' private='no'>
<uuid>${CINDER_CEPH_UUID}</uuid>
<usage type='ceph'>
<name>client.${CINDER_CEPH_USER} secret</name>
</usage>
</secret>
EOF
sudo virsh secret-define --file secret.xml
sudo virsh secret-set-value --secret ${CINDER_CEPH_UUID} \
--base64 $(sudo ceph -c ${CEPH_CONF_FILE} \
auth get-key client.${CINDER_CEPH_USER})
sudo rm -f secret.xml
}
# _undefine_virsh_secret() - Undefine Cinder key secret from libvirt
function _undefine_virsh_secret {
if is_ceph_enabled_for_service cinder || \
is_ceph_enabled_for_service nova; then
local virsh_uuid
virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
sudo virsh secret-undefine ${virsh_uuid} &>/dev/null
fi
}
# check_os_support_ceph() - Check if the OS provides a decent version of Ceph
function check_os_support_ceph {
if [[ ! ${DISTRO} =~ (jammy|focal|bionic|xenial|f31|f32|f33|f34|rhel8|rhel9) ]]; then
echo "WARNING: devstack-plugin-ceph hasn't been tested with $DISTRO. \
Set FORCE_CEPH_INSTALL=yes in your local.conf if you'd like to \
attempt installation anyway."
if [[ "$FORCE_CEPH_INSTALL" != "yes" ]]; then
die $LINENO "Not proceeding with install."
fi
fi
if [[ ! $INIT_SYSTEM == 'systemd' ]]; then
die "This plugin is only supported on systemd enabled systems currently."
fi
}
# check_os_support_ceph_iscsi() - Make sure kernel supports iscsi requirements
function check_os_support_for_iscsi {
KERNEL_CONFIG="/boot/config-$(uname -r)"
target_core=$(grep -E '(CONFIG_TARGET_CORE=m|CONFIG_TARGET_CORE=y)' $KERNEL_CONFIG)
tcm_user=$(grep -E '(CONFIG_TCM_USER2=m|CONFIG_TCM_USER2=y)' $KERNEL_CONFIG)
iscsi_target=$(grep -E '(CONFIG_ISCSI_TARGET=m|CONFIG_ISCSI_TARGET=y)' $KERNEL_CONFIG)
if [ -z "$target_core" ] || [ -z "$tcm_user" ] || [ -z "$iscsi_target" ]; then
die "Ceph iSCSI cannot work. The required kernel modules are not installed."
fi
}
# cleanup_ceph() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
function cleanup_ceph_remote {
# do a proper cleanup from here to avoid leftover on the remote Ceph cluster
if is_ceph_enabled_for_service glance; then
sudo ceph -c ${CEPH_CONF_FILE} osd pool delete $GLANCE_CEPH_POOL $GLANCE_CEPH_POOL \
--yes-i-really-really-mean-it > /dev/null 2>&1
sudo ceph -c ${CEPH_CONF_FILE} auth del client.$GLANCE_CEPH_USER > /dev/null 2>&1
fi
if is_ceph_enabled_for_service cinder; then
sudo ceph -c ${CEPH_CONF_FILE} osd pool delete $CINDER_CEPH_POOL $CINDER_CEPH_POOL \
--yes-i-really-really-mean-it > /dev/null 2>&1
sudo ceph -c ${CEPH_CONF_FILE} auth del client.$CINDER_CEPH_USER > /dev/null 2>&1
fi
if is_ceph_enabled_for_service c-bak; then
sudo ceph -c ${CEPH_CONF_FILE} osd pool delete $CINDER_BAK_CEPH_POOL $CINDER_BAK_CEPH_POOL \
--yes-i-really-really-mean-it > /dev/null 2>&1
sudo ceph -c ${CEPH_CONF_FILE} auth del client.$CINDER_BAK_CEPH_USER > /dev/null 2>&1
fi
if is_ceph_enabled_for_service nova; then
iniset $NOVA_CONF libvirt rbd_secret_uuid ""
sudo ceph -c ${CEPH_CONF_FILE} osd pool delete $NOVA_CEPH_POOL $NOVA_CEPH_POOL \
--yes-i-really-really-mean-it > /dev/null 2>&1
fi
# Clean up the disk image and mount that we created
destroy_disk ${CEPH_DISK_IMAGE} ${CEPH_DATA_DIR}
}
function cleanup_ceph_embedded {
sudo killall -w -9 ceph-mon ceph-osd ceph-mds
if [ "$ENABLE_CEPH_RGW" = "True" ]; then
sudo killall -w -9 radosgw
fi
sudo rm -rf ${CEPH_DATA_DIR}/*/*
if egrep -q ${CEPH_DATA_DIR} /proc/mounts; then
sudo umount ${CEPH_DATA_DIR}
fi
if [[ -e ${CEPH_DISK_IMAGE} ]]; then
sudo rm -f ${CEPH_DISK_IMAGE}
fi
# purge ceph config file and keys
sudo rm -rf ${CEPH_CONF_DIR}/*
}
function cleanup_ceph_general {
_undefine_virsh_secret
if is_ceph_enabled_for_service manila && [ $MANILA_CEPH_DRIVER == 'cephfsnfs' ]; then
cleanup_nfs_ganesha
cleanup_repo_nfsganesha
fi
if is_ceph_enabled_for_service manila; then
sudo ceph -c ${CEPH_CONF_FILE} fs rm $CEPHFS_FILESYSTEM \
--yes-i-really-mean-it
sudo ceph -c ${CEPH_CONF_FILE} osd pool delete $CEPHFS_METADATA_POOL $CEPHFS_METADATA_POOL \
--yes-i-really-really-mean-it > /dev/null 2>&1
sudo ceph -c ${CEPH_CONF_FILE} osd pool delete $CEPHFS_DATA_POOL $CEPHFS_DATA_POOL \
--yes-i-really-really-mean-it > /dev/null 2>&1
sudo ceph -c ${CEPH_CONF_FILE} auth del client.$MANILA_CEPH_USER > /dev/null 2>&1
fi
cleanup_repo_ceph
}
function cleanup_containerized_ceph {
sudo docker rm -f ceph-demo
sudo rm -rf ${CEPH_CONF_DIR}/*
sudo rm -rf ${CEPH_DATA_DIR}
}
function initial_configure_ceph {
# create a backing file disk
create_disk ${CEPH_DISK_IMAGE} ${CEPH_DATA_DIR} ${CEPH_LOOPBACK_DISK_SIZE}
# populate ceph directory
sudo mkdir -p \
${CEPH_DATA_DIR}/{bootstrap-mds,bootstrap-osd,bootstrap-mgr,bootstrap-rgw,mgr,rgw,mds,mon,osd,tmp,radosgw}
}
# configure_ceph() - Set config files, create data dirs, etc
function configure_ceph {
local count=0
initial_configure_ceph
# create ceph monitor initial key and directory
sudo ceph-authtool ${CEPH_DATA_DIR}/tmp/keyring.mon.$(hostname) \
--create-keyring --name=mon. --add-key=$(ceph-authtool --gen-print-key) \
--cap mon 'allow *'
# gen admin keyring, gen client.admin user and add user to keyring
sudo ceph-authtool ${CEPH_CONF_DIR}/ceph.client.admin.keyring \
--create-keyring --gen-key -n client.admin --cap mon 'allow *' \
--cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'
# add gen keys to ceph.mon.keyring
sudo ceph-authtool ${CEPH_DATA_DIR}/tmp/keyring.mon.$(hostname) \
--import-keyring ${CEPH_CONF_DIR}/ceph.client.admin.keyring
sudo mkdir -p ${CEPH_DATA_DIR}/mon/ceph-$(hostname)
# create a default ceph configuration file
iniset -sudo ${CEPH_CONF_FILE} global "fsid" "${CEPH_FSID}"
iniset -sudo ${CEPH_CONF_FILE} global "mon_initial_members" "$(hostname)"
iniset -sudo ${CEPH_CONF_FILE} global "mon_host" "${SERVICE_HOST}"
iniset -sudo ${CEPH_CONF_FILE} global "auth_cluster_required" "cephx"
iniset -sudo ${CEPH_CONF_FILE} global "auth_service_required" "cephx"
iniset -sudo ${CEPH_CONF_FILE} global "auth_client_required" "cephx"
iniset -sudo ${CEPH_CONF_FILE} global "filestore_xattr_use_omap" "true"
iniset -sudo ${CEPH_CONF_FILE} global "osd crush chooseleaf type" "0"
iniset -sudo ${CEPH_CONF_FILE} global "osd journal size" "100"
iniset -sudo ${CEPH_CONF_FILE} global "osd pool default size" "${CEPH_REPLICAS}"
iniset -sudo ${CEPH_CONF_FILE} global "rbd default features" "${CEPH_RBD_DEFAULT_FEATURES}"
iniset -sudo ${CEPH_CONF_FILE} client "debug_client" "10"
local gigs
gigs=$(echo $CEPH_LOOPBACK_DISK_SIZE | grep -o '^[0-9]*')
iniset -sudo ${CEPH_CONF_FILE} global "bluestore_block_size" $((($gigs - 4) << 30))
# bootstrap the ceph monitor
sudo ceph-mon -c ${CEPH_CONF_FILE} --mkfs -i $(hostname) \
--keyring ${CEPH_DATA_DIR}/tmp/keyring.mon.$(hostname)
sudo chown -R ceph. ${CEPH_DATA_DIR}
sudo systemctl enable ceph-mon@$(hostname)
sudo systemctl start ceph-mon@$(hostname)
local ceph_version
ceph_version=$(_get_ceph_version mon)
if vercmp "$ceph_version" ">=" "14.0"; then
for key in bootstrap-{mds,osd,rgw}; do
sudo ceph auth get client.$key -o ${CEPH_DATA_DIR}/$key/ceph.keyring
done
fi
sudo mkdir -p ${CEPH_DATA_DIR}/mgr/ceph-${MGR_ID}
sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create mgr.${MGR_ID} \
mon 'allow profile mgr' mds 'allow *' osd 'allow *' \
-o ${CEPH_DATA_DIR}/mgr/ceph-${MGR_ID}/keyring
sudo chown -R ceph. ${CEPH_DATA_DIR}/mgr
# create a simple rule to take OSDs instead of hosts with CRUSH
# then apply this rule to the default pool
if [[ $CEPH_REPLICAS -ne 1 ]]; then
sudo ceph -c ${CEPH_CONF_FILE} \
osd crush rule create-simple devstack default osd
RULE_ID=$(sudo ceph -c ${CEPH_CONF_FILE} \
osd crush rule dump devstack | \
awk '/rule_id/ {print $2}' | \
cut -d ',' -f1)
sudo ceph -c ${CEPH_CONF_FILE} \
osd pool set rbd crush_ruleset ${RULE_ID}
sudo ceph -c ${CEPH_CONF_FILE} \
osd pool set data crush_ruleset ${RULE_ID}
sudo ceph -c ${CEPH_CONF_FILE} \
osd pool set metadata crush_ruleset ${RULE_ID}
fi
# create the OSD(s)
for rep in ${CEPH_REPLICAS_SEQ}; do
OSD_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd create)
sudo mkdir -p ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}
sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create osd.${OSD_ID} \
mon 'allow profile osd ' osd 'allow *' | \
sudo tee ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/keyring
sudo chown ceph. ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}
sudo chown ceph. ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/keyring
sudo ceph-osd -c ${CEPH_CONF_FILE} --setuser ceph --setgroup ceph -i ${OSD_ID} --mkfs
sudo ceph-osd -c ${CEPH_CONF_FILE} -i ${OSD_ID} --mkfs
sudo systemctl enable ceph-osd@${OSD_ID}
done
if [ ! -z "$CEPH_MIN_CLIENT_VERSION" ]; then
sudo ceph -c ${CEPH_CONF_FILE} \
osd set-require-min-compat-client ${CEPH_MIN_CLIENT_VERSION}
fi
if is_ceph_enabled_for_service manila; then
# create a MDS
sudo mkdir -p ${CEPH_DATA_DIR}/mds/ceph-${MDS_ID}
sudo chown ceph. ${CEPH_DATA_DIR}/mds/ceph-${MDS_ID}
sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create mds.${MDS_ID} \
mon 'allow profile mds ' osd 'allow rw' mds 'allow' \
-o ${CEPH_DATA_DIR}/mds/ceph-${MDS_ID}/keyring
sudo chown ceph. ${CEPH_DATA_DIR}/mds/ceph-${MDS_ID}/keyring
sudo systemctl enable ceph-mds@${MDS_ID}
fi
if [ "$ENABLE_CEPH_RGW" = "True" ]; then
_configure_ceph_rgw
fi
if [ "$ENABLE_CEPH_ISCSI" = "True" ]; then
_configure_ceph_iscsi
fi
}
function _configure_rgw_ceph_section {
configure_ceph_embedded_rgw_paths
iniset -sudo ${CEPH_CONF_FILE} ${key} "host" "$(hostname)"
iniset -sudo ${CEPH_CONF_FILE} ${key} "keyring" "${dest}/keyring"
iniset -sudo ${CEPH_CONF_FILE} ${key} "rgw socket path" "/tmp/radosgw-$(hostname).sock"
iniset -sudo ${CEPH_CONF_FILE} ${key} "log file" "/var/log/ceph/radosgw-$(hostname).log"
iniset -sudo ${CEPH_CONF_FILE} ${key} "rgw data" "${dest}"
iniset -sudo ${CEPH_CONF_FILE} ${key} "rgw print continue" "false"
iniset -sudo ${CEPH_CONF_FILE} ${key} "rgw frontends" "civetweb port=${CEPH_RGW_PORT}"
iniset -sudo ${CEPH_CONF_FILE} ${key} "rgw keystone url" "$KEYSTONE_SERVICE_URI"
iniset -sudo ${CEPH_CONF_FILE} ${key} "rgw s3 auth use keystone" "true"
iniset -sudo ${CEPH_CONF_FILE} ${key} "rgw keystone admin user" "radosgw"
iniset -sudo ${CEPH_CONF_FILE} ${key} "rgw keystone admin password" "$SERVICE_PASSWORD"
iniset -sudo ${CEPH_CONF_FILE} ${key} "rgw keystone accepted roles" "Member, _member_, admin, ResellerAdmin"
if [ "$CEPH_RGW_KEYSTONE_SSL" = "True" ]; then
iniset -sudo ${CEPH_CONF_FILE} ${key} "nss db path" "${dest}/nss"
else
iniset -sudo ${CEPH_CONF_FILE} ${key} "rgw keystone verify ssl" "false"
fi
if [[ $CEPH_RGW_IDENTITY_API_VERSION == '2.0' ]]; then
iniset -sudo ${CEPH_CONF_FILE} ${key} "rgw keystone admin tenant" "$SERVICE_PROJECT_NAME"
else
iniset -sudo ${CEPH_CONF_FILE} ${key} "rgw keystone admin project" "$SERVICE_PROJECT_NAME"
iniset -sudo ${CEPH_CONF_FILE} ${key} "rgw keystone admin domain" "$SERVICE_DOMAIN_NAME"
iniset -sudo ${CEPH_CONF_FILE} ${key} "rgw keystone api version" "3"
fi
}
function _configure_ceph_rgw_container {
_configure_rgw_ceph_section
sudo docker restart ceph-demo
}
function _configure_ceph_rgw {
# bootstrap rados gateway
_configure_rgw_ceph_section
sudo mkdir -p $dest
sudo ceph auth get-or-create $key \
osd 'allow rwx' mon 'allow rw' \
-o ${dest}/keyring
sudo systemctl enable ceph-radosgw@rgw.$(hostname)
sudo chown -R ceph. ${CEPH_DATA_DIR}
}
function _configure_ceph_iscsi_gateway {
iniset -sudo ${CEPH_ISCSI_GATEWAY_CFG} config "minimum_gateways" $CEPH_ISCSI_MINIMUM_GATEWAYS
iniset -sudo ${CEPH_ISCSI_GATEWAY_CFG} config "trusted_ip_list" "$HOST_IP,localhost"
iniset -sudo ${CEPH_ISCSI_GATEWAY_CFG} config "cluster_name" "ceph"
iniset -sudo ${CEPH_ISCSI_GATEWAY_CFG} config "gateway_keyring" "ceph.client.admin.keyring"
iniset -sudo ${CEPH_ISCSI_GATEWAY_CFG} config "prometheus_host" "$CEPH_ISCSI_API_HOST"
iniset -sudo ${CEPH_ISCSI_GATEWAY_CFG} config "api_secure" "false"
iniset -sudo ${CEPH_ISCSI_GATEWAY_CFG} config "api_user" "$CEPH_ISCSI_API_USER"
iniset -sudo ${CEPH_ISCSI_GATEWAY_CFG} config "api_password" "$CEPH_ISCSI_API_PASSWORD"
iniset -sudo ${CEPH_ISCSI_GATEWAY_CFG} config "api_port" "$CEPH_ISCSI_API_PORT"
}
function _configure_ceph_iscsi {
_configure_ceph_iscsi_gateway
sudo ceph -c ${CEPH_CONF_FILE} \
osd pool create ${CEPH_ISCSI_POOL} ${CEPH_ISCSI_POOL_PG}
sudo systemctl daemon-reload
sudo systemctl enable tcmu-runner
sudo systemctl enable rbd-target-gw
sudo systemctl enable rbd-target-api
}
function _post_start_configure_iscsi_gateway {
# Now we setup the rbd-target-gw and rbd-target-api for use
GWCLI=$(which gwcli)
removeme=$(sudo systemctl status rbd-target-api)
FQDN=$(hostname -f)
# create the target_iqn for exporting all volumes
sudo $GWCLI /iscsi-targets create $CEPH_ISCSI_TARGET_IQN
# now we add the gateway definition
# Didn't find the gateway, so lets create it
sudo $GWCLI /iscsi-targets/$CEPH_ISCSI_TARGET_IQN/gateways create $FQDN $HOST_IP skipchecks=true
}
function start_ceph_iscsi {
sudo systemctl start tcmu-runner
sudo systemctl start rbd-target-gw
sudo systemctl start rbd-target-api
sleep 10
# we have to setup the gateway and api after they start
_post_start_configure_iscsi_gateway
}
function stop_ceph_iscsi {
GWCLI=$(which gwcli)
FQDN=$(hostname -f)
sudo $GWCLI /iscsi-targets/$CEPH_ISCSI_TARGET_IQN/gateways delete $FQDN confirm=true
sudo $GWCLI /iscsi-targets delete $CEPH_ISCSI_TARGET_IQN
sudo systemctl stop rbd-target-api
sudo systemctl stop rbd-target-gw
sudo systemctl stop tcmu-runner
}
function _create_swift_endpoint {
local swift_service
swift_service=$(get_or_create_service "swift" "object-store" "Swift Service")
local swift_endpoint
swift_endpoint="$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:${CEPH_RGW_PORT}/swift/v1"
get_or_create_endpoint $swift_service \
"$REGION_NAME" $swift_endpoint $swift_endpoint $swift_endpoint
}
function configure_ceph_embedded_rgw_paths {
if [[ "$CEPH_CONTAINERIZED" == "True" ]]; then
dest=${CEPH_DATA_DIR}/radosgw/$(hostname)
key=client.radosgw.gateway
else
dest=${CEPH_DATA_DIR}/radosgw/ceph-rgw.$(hostname)
key=client.rgw.$(hostname)
fi
}
function configure_ceph_embedded_rgw {
configure_ceph_embedded_rgw_paths
# keystone endpoint for radosgw
_create_swift_endpoint
# Create radosgw service user with admin privileges
create_service_user "radosgw" "admin"
if [ "$CEPH_RGW_KEYSTONE_SSL" = "True" ]; then
# radosgw needs to access keystone's revocation list
sudo mkdir -p ${dest}/nss
sudo openssl x509 -in /etc/keystone/ssl/certs/ca.pem -pubkey | \
sudo certutil -d ${dest}/nss -A -n ca -t "TCu,Cu,Tuw"
sudo openssl x509 -in /etc/keystone/ssl/certs/signing_cert.pem -pubkey | \
sudo certutil -A -d ${dest}/nss -n signing_cert -t "P,P,P"
fi
}
function start_ceph_embedded_rgw {
sudo systemctl enable ceph-radosgw@rgw.$(hostname)
sudo systemctl start ceph-radosgw@rgw.$(hostname)
}
function configure_ceph_embedded_glance {
# configure Glance service options, ceph pool, ceph user and ceph key
if [[ $CEPH_REPLICAS -ne 1 ]]; then
sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} osd pool \
set ${GLANCE_CEPH_POOL} crush_ruleset ${RULE_ID}
fi
}
# configure_ceph_glance() - Glance config needs to come after Glance is set up
function configure_ceph_glance {
if [[ "$GLANCE_RGW_BACKEND" = "True" && "$ENABLE_CEPH_RGW" = "True" ]]; then
# common glance accounts for swift
create_service_user "glance-swift" "ResellerAdmin"
iniset $GLANCE_SWIFT_STORE_CONF ref1 user $SERVICE_PROJECT_NAME:glance-swift
AUTH_URL=$KEYSTONE_SERVICE_URI/v$CEPH_RGW_IDENTITY_API_VERSION
iniset $GLANCE_SWIFT_STORE_CONF ref1 key $SERVICE_PASSWORD
iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $AUTH_URL
iniset $GLANCE_SWIFT_STORE_CONF ref1 user_domain_name $SERVICE_DOMAIN_NAME
iniset $GLANCE_SWIFT_STORE_CONF ref1 project_domain_name $SERVICE_DOMAIN_NAME
iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_version $CEPH_RGW_IDENTITY_API_VERSION
iniset $GLANCE_API_CONF glance_store default_store swift
iniset $GLANCE_API_CONF glance_store swift_store_create_container_on_put True
iniset $GLANCE_API_CONF glance_store swift_store_config_file $GLANCE_SWIFT_STORE_CONF
iniset $GLANCE_API_CONF glance_store default_swift_reference ref1
iniset $GLANCE_API_CONF glance_store stores "file, http, swift"
else
sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} osd pool create \
${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP}
sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} auth \
get-or-create client.${GLANCE_CEPH_USER} \
mon "allow r" \
osd "allow class-read object_prefix rbd_children, \
allow rx pool=${CINDER_CEPH_POOL}, \
allow rx pool=${NOVA_CEPH_POOL}, \
allow rwx pool=${GLANCE_CEPH_POOL}" | \
sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
sudo chown ${STACK_USER}:$(id -g -n $whoami) \
${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
iniset $GLANCE_API_CONF DEFAULT show_multiple_locations True
iniset $GLANCE_API_CONF glance_store default_store rbd
iniset $GLANCE_API_CONF glance_store stores "file, http, rbd"
iniset $GLANCE_API_CONF glance_store rbd_store_ceph_conf $CEPH_CONF_FILE
iniset $GLANCE_API_CONF glance_store rbd_store_user $GLANCE_CEPH_USER
iniset $GLANCE_API_CONF glance_store rbd_store_pool $GLANCE_CEPH_POOL
fi
}
function configure_ceph_manila {
sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} osd pool create ${CEPHFS_METADATA_POOL} \
${CEPHFS_POOL_PG}
sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} osd pool create ${CEPHFS_DATA_POOL} \
${CEPHFS_POOL_PG}
if [[ ${CEPHFS_MULTIPLE_FILESYSTEMS} == 'True' ]]; then
sudo ceph -c ${CEPH_CONF_FILE} fs flag set enable_multiple true \
--yes-i-really-mean-it
fi
sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} fs new ${CEPHFS_FILESYSTEM} ${CEPHFS_METADATA_POOL} \
${CEPHFS_DATA_POOL}
sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} auth get-or-create \
client.${MANILA_CEPH_USER} \
mon "allow *" osd "allow rw" mds "allow *" mgr "allow *" \
-o ${CEPH_CONF_DIR}/ceph.client.${MANILA_CEPH_USER}.keyring
sudo chown ${STACK_USER}:$(id -g -n $whoami) \
${CEPH_CONF_DIR}/ceph.client.${MANILA_CEPH_USER}.keyring
# Enable snapshots in CephFS.
sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} fs set ${CEPHFS_FILESYSTEM} allow_new_snaps true \
--yes-i-really-mean-it
# Make manila's libcephfs client a root user.
iniset -sudo ${CEPH_CONF_FILE} client.${MANILA_CEPH_USER} "client mount uid" "0"
iniset -sudo ${CEPH_CONF_FILE} client.${MANILA_CEPH_USER} "client mount gid" "0"
if [ $MANILA_CEPH_DRIVER == 'cephfsnfs' ]; then
configure_nfs_ganesha
# NFS-Ganesha server cannot run alongwith with other kernel NFS server.
sudo systemctl stop nfs-server || true
sudo systemctl disable nfs-server || true
sudo systemctl enable nfs-ganesha
sudo systemctl start nfs-ganesha || (
echo "Ganesha didn't start. Let's debug..." >&2
sudo systemctl status nfs-ganesha || true
echo "**Ganesha conf file**" >&2
sudo cat /etc/ganesha/ganesha.conf || true
echo "**Ganesha log file**" >&2
sudo cat /var/log/ganesha/ganesha.log || true
echo "**Exiting**" >&2
exit 1
)
echo "Ganesha started successfully!" >&2
fi
# RESTART DOCKER CONTAINER
}
function configure_nfs_ganesha {
# Configure NFS-Ganesha to work with Manila's CephFS driver
sudo mkdir -p /etc/ganesha/export.d
if [ $MANILA_CEPH_GANESHA_RADOS_STORE == 'True' ]; then
# Create an empty placeholder ganesha export index object
echo | sudo rados -p ${CEPHFS_DATA_POOL} put ganesha-export-index -
cat <<EOF | sudo tee /etc/ganesha/ganesha.conf>/dev/null
RADOS_URLS {
ceph_conf = ${CEPH_CONF_FILE};
userid = admin;
}
CACHEINODE {
Dir_Max = 1;
Dir_Chunk = 0;
Cache_FDs = false;
NParts = 1;
Cache_Size = 1;
}
EXPORT_DEFAULTS {
Attr_Expiration_Time = 0;
}
%url rados://${CEPHFS_DATA_POOL}/ganesha-export-index
EOF
else
sudo touch /etc/ganesha/export.d/INDEX.conf
echo "%include /etc/ganesha/export.d/INDEX.conf" | sudo tee /etc/ganesha/ganesha.conf
fi
}
function cleanup_nfs_ganesha {
sudo systemctl stop nfs-ganesha
sudo systemctl disable nfs-ganesha
sudo uninstall_package nfs-ganesha nfs-ganesha-ceph libntirpc3 nfs-ganesha-rados-urls nfs-ganesha-vfs
}
function configure_ceph_embedded_manila {
if [[ $CEPH_REPLICAS -ne 1 ]]; then
sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} osd pool set ${CEPHFS_DATA_POOL} \
crush_ruleset ${RULE_ID}
sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} osd pool set ${CEPHFS_METADATA_POOL} \
crush_ruleset ${RULE_ID}
fi
}
function configure_ceph_embedded_nova {
# configure Nova service options, ceph pool, ceph user and ceph key
if [[ $CEPH_REPLICAS -ne 1 ]]; then
sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} osd pool \
set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID}
fi
}
# configure_ceph_nova() - Nova config needs to come after Nova is set up
function configure_ceph_nova {
# When REMOTE_CEPH=True is set on subnodes skip the creation of the nova
# pool as it has already been created on the controller that has
# REMOTE_CEPH=False.
if [[ "$REMOTE_CEPH" == "False" ]]; then
sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} osd pool create \
${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP}
fi
iniset $NOVA_CONF libvirt rbd_user ${CINDER_CEPH_USER}
iniset $NOVA_CONF libvirt rbd_secret_uuid ${CINDER_CEPH_UUID}
iniset $NOVA_CONF libvirt inject_key false
iniset $NOVA_CONF libvirt inject_partition -2
iniset $NOVA_CONF libvirt disk_cachemodes "network=writeback"
iniset $NOVA_CONF libvirt images_type rbd
iniset $NOVA_CONF libvirt images_rbd_pool ${NOVA_CEPH_POOL}
iniset $NOVA_CONF libvirt images_rbd_ceph_conf ${CEPH_CONF_FILE}
if ! is_ceph_enabled_for_service cinder; then
sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} \
auth get-or-create client.${CINDER_CEPH_USER} \
mon "allow r" \
osd "allow class-read object_prefix rbd_children, \
allow rwx pool=${CINDER_CEPH_POOL}, \
allow rwx pool=${NOVA_CEPH_POOL}, \
allow rwx pool=${GLANCE_CEPH_POOL}" | \
sudo tee \
${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring \
> /dev/null
sudo chown ${STACK_USER}:$(id -g -n $whoami) \
${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
fi
}
function configure_ceph_embedded_cinder {
# Configure Cinder service options, ceph pool, ceph user and ceph key
if [[ $CEPH_REPLICAS -ne 1 ]]; then
sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} osd pool \
set ${CINDER_CEPH_POOL} crush_ruleset ${RULE_ID}
fi
}
# configure_ceph_cinder() - Cinder config needs to come after Cinder is set up
function configure_ceph_cinder {
sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} osd pool create \
${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP}
sudo $DOCKER_EXEC ceph -c ${CEPH_CONF_FILE} auth get-or-create \
client.${CINDER_CEPH_USER} \
mon "allow r" \
osd "allow class-read object_prefix rbd_children, \
allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL}, \
allow rwx pool=${GLANCE_CEPH_POOL}" | \
sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
sudo chown ${STACK_USER}:$(id -g -n $whoami) \
${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
sudo rbd pool init ${CINDER_CEPH_POOL}
}
# install_ceph_remote() - Collect source and prepare
function install_ceph_remote {
install_package ceph-common
# ceph-common in Bionic (18.04) installs only the python2 variants of
# required packages, meaning we need to install the python3 variants
# manually. Hopefully this won't be necessary in Focal (20.04)
# https://packages.ubuntu.com/bionic/ceph-common
if python3_enabled; then
install_package python3-cephfs python3-prettytable python3-rados python3-rbd python3-requests
fi
# Since pip10, pip will refuse to uninstall files from packages
# that were created with distutils (rather than more modern
# setuptools). This is because it technically doesn't have a
# manifest of what to remove. However, in most cases, simply
# overwriting works. So this hacks around those packages that
# have been dragged in by some other system dependency
if is_ubuntu; then
sudo rm -rf /usr/lib/python3/dist-packages/logutils*.egg-info
fi
if is_fedora; then
sudo rm -rf /usr/lib64/python3*/site-packages/logutils*.egg-info
fi
}
function dnf_add_repository_ceph {
local ceph_release=$1
local package_release=$2
cat > ceph.repo <<EOF
[ceph]
name=Ceph packages for \$basearch
baseurl=https://download.ceph.com/rpm-${ceph_release}/${package_release}/\$basearch
enabled=1
priority=2
gpgcheck=1
gpgkey=https://download.ceph.com/keys/release.asc
[ceph-noarch]
name=Ceph noarch packages
baseurl=https://download.ceph.com/rpm-${ceph_release}/${package_release}/noarch
enabled=1
priority=2
gpgcheck=1
gpgkey=https://download.ceph.com/keys/release.asc
[ceph-source]
name=Ceph source packages
baseurl=https://download.ceph.com/rpm-${ceph_release}/${package_release}/SRPMS
enabled=0
priority=2
gpgcheck=1
gpgkey=https://download.ceph.com/keys/release.asc
EOF
sudo dnf config-manager --add-repo ceph.repo
}
function dnf_add_repository_nfsganesha {
local repo=""
case $ganesha_release in
3.*)
repo="centos-release-nfs-ganesha30" ;;
*)
repo="centos-release-nfs-ganesha4" ;;
esac
sudo dnf -y install ${repo}
}
# configure_repo_ceph() - Configure Ceph repositories
# Usage: configure_repo_ceph <package_release>
# - package_release: to override the os_RELEASE variable
function configure_repo_ceph {
package_release=${1:-$os_RELEASE}
if is_ubuntu; then
if [[ "${TEST_MASTER}" == "True" ]]; then
repo_file_name="/etc/apt/sources.list.d/ceph-master.list"
sudo wget -c "https://shaman.ceph.com/api/repos/ceph/master/latest/ubuntu/${package_release}/flavors/default/repo" -O ${repo_file_name}
else
wget -q -O- 'https://download.ceph.com/keys/release.asc' | sudo apt-key add -
sudo apt-add-repository -y "deb https://download.ceph.com/debian-${CEPH_RELEASE}/ $package_release main"
fi
sudo apt-get -y update
elif is_fedora; then
package_release="el"${package_release}
if [[ "${TEST_MASTER}" == "True" ]]; then
repo_file_name="/etc/yum.repos.d/ceph-master.repo"
sudo wget -c "https://shaman.ceph.com/api/repos/ceph/master/latest/centos/${package_release}/flavors/default/repo" -O ${repo_file_name}
sudo dnf config-manager --add-repo ${repo_file_name}
else
dnf_add_repository_ceph ${CEPH_RELEASE} ${package_release}
fi
fi
}