blob: 7e9d2d334eb4320be5686b4771785c34e8e842b7 [file] [log] [blame]
Sean Daguee263c822014-12-05 14:25:28 -05001#!/bin/bash
2#
Sébastien Han36f2f022014-01-06 18:09:26 +01003# lib/cinder_backends/ceph
4# Configure the ceph backend
5
6# Enable with:
7#
8# CINDER_ENABLED_BACKENDS+=,ceph:ceph
9#
10# Optional parameters:
11# CINDER_BAK_CEPH_POOL=<pool-name>
12# CINDER_BAK_CEPH_USER=<user>
13# CINDER_BAK_CEPH_POOL_PG=<pg-num>
14# CINDER_BAK_CEPH_POOL_PGP=<pgp-num>
15
16# Dependencies:
17#
18# - ``functions`` file
19# - ``cinder`` configurations
20
21# configure_ceph_backend_lvm - called from configure_cinder()
22
23
24# Save trace setting
25MY_XTRACE=$(set +o | grep xtrace)
26set +o xtrace
27
28
29# Defaults
30# --------
31
32CINDER_BAK_CEPH_POOL=${CINDER_BAK_CEPH_POOL:-backups}
33CINDER_BAK_CEPH_POOL_PG=${CINDER_BAK_CEPH_POOL_PG:-8}
34CINDER_BAK_CEPH_POOL_PGP=${CINDER_BAK_CEPH_POOL_PGP:-8}
35CINDER_BAK_CEPH_USER=${CINDER_BAK_CEPH_USER:-cinder-bak}
36
37
38# Entry Points
39# ------------
40
41# configure_cinder_backend_ceph - Set config files, create data dirs, etc
42# configure_cinder_backend_ceph $name
43function configure_cinder_backend_ceph {
44 local be_name=$1
45
46 iniset $CINDER_CONF $be_name volume_backend_name $be_name
47 iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.rbd.RBDDriver"
48 iniset $CINDER_CONF $be_name rbd_ceph_conf "$CEPH_CONF"
49 iniset $CINDER_CONF $be_name rbd_pool "$CINDER_CEPH_POOL"
50 iniset $CINDER_CONF $be_name rbd_user "$CINDER_CEPH_USER"
51 iniset $CINDER_CONF $be_name rbd_uuid "$CINDER_CEPH_UUID"
52 iniset $CINDER_CONF $be_name rbd_flatten_volume_from_snapshot False
53 iniset $CINDER_CONF $be_name rbd_max_clone_depth 5
54 iniset $CINDER_CONF DEFAULT glance_api_version 2
55
56 if is_service_enabled c-bak; then
Sébastien Han36f2f022014-01-06 18:09:26 +010057 sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP}
Sébastien Han4eb04a52014-12-04 16:22:41 +010058 if [ "$REMOTE_CEPH" = "False" ]; then
59 # Configure Cinder backup service options, ceph pool, ceph user and ceph key
60 sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} size ${CEPH_REPLICAS}
61 if [[ $CEPH_REPLICAS -ne 1 ]]; then
62 sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
63 fi
Sébastien Han36f2f022014-01-06 18:09:26 +010064 fi
65 sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
66 sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
67
68 iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph"
69 iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF"
70 iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL"
71 iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER"
72 iniset $CINDER_CONF DEFAULT backup_ceph_stripe_unit 0
73 iniset $CINDER_CONF DEFAULT backup_ceph_stripe_count 0
74 iniset $CINDER_CONF DEFAULT restore_discard_excess_bytes True
75 fi
76}
77
78# Restore xtrace
79$MY_XTRACE
80
81# Local variables:
82# mode: shell-script
83# End: