blob: e9d2a02ad3f1f533005d8626a0890185f4637aaa [file] [log] [blame]
Sébastien Han36f2f022014-01-06 18:09:26 +01001# lib/cinder_backends/ceph
2# Configure the ceph backend
3
4# Enable with:
5#
6# CINDER_ENABLED_BACKENDS+=,ceph:ceph
7#
8# Optional parameters:
9# CINDER_BAK_CEPH_POOL=<pool-name>
10# CINDER_BAK_CEPH_USER=<user>
11# CINDER_BAK_CEPH_POOL_PG=<pg-num>
12# CINDER_BAK_CEPH_POOL_PGP=<pgp-num>
13
14# Dependencies:
15#
16# - ``functions`` file
17# - ``cinder`` configurations
18
19# configure_ceph_backend_lvm - called from configure_cinder()
20
21
22# Save trace setting
23MY_XTRACE=$(set +o | grep xtrace)
24set +o xtrace
25
26
27# Defaults
28# --------
29
30CINDER_BAK_CEPH_POOL=${CINDER_BAK_CEPH_POOL:-backups}
31CINDER_BAK_CEPH_POOL_PG=${CINDER_BAK_CEPH_POOL_PG:-8}
32CINDER_BAK_CEPH_POOL_PGP=${CINDER_BAK_CEPH_POOL_PGP:-8}
33CINDER_BAK_CEPH_USER=${CINDER_BAK_CEPH_USER:-cinder-bak}
34
35
36# Entry Points
37# ------------
38
39# configure_cinder_backend_ceph - Set config files, create data dirs, etc
40# configure_cinder_backend_ceph $name
41function configure_cinder_backend_ceph {
42 local be_name=$1
43
44 iniset $CINDER_CONF $be_name volume_backend_name $be_name
45 iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.rbd.RBDDriver"
46 iniset $CINDER_CONF $be_name rbd_ceph_conf "$CEPH_CONF"
47 iniset $CINDER_CONF $be_name rbd_pool "$CINDER_CEPH_POOL"
48 iniset $CINDER_CONF $be_name rbd_user "$CINDER_CEPH_USER"
49 iniset $CINDER_CONF $be_name rbd_uuid "$CINDER_CEPH_UUID"
50 iniset $CINDER_CONF $be_name rbd_flatten_volume_from_snapshot False
51 iniset $CINDER_CONF $be_name rbd_max_clone_depth 5
52 iniset $CINDER_CONF DEFAULT glance_api_version 2
53
54 if is_service_enabled c-bak; then
55 # Configure Cinder backup service options, ceph pool, ceph user and ceph key
56 sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP}
57 sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} size ${CEPH_REPLICAS}
58 if [[ $CEPH_REPLICAS -ne 1 ]]; then
59 sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
60 fi
61 sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
62 sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
63
64 iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph"
65 iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF"
66 iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL"
67 iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER"
68 iniset $CINDER_CONF DEFAULT backup_ceph_stripe_unit 0
69 iniset $CINDER_CONF DEFAULT backup_ceph_stripe_count 0
70 iniset $CINDER_CONF DEFAULT restore_discard_excess_bytes True
71 fi
72}
73
74# Restore xtrace
75$MY_XTRACE
76
77# Local variables:
78# mode: shell-script
79# End: