Merge "Implement Ceph backend for Glance / Cinder / Nova"
diff --git a/extras.d/60-ceph.sh b/extras.d/60-ceph.sh
new file mode 100644
index 0000000..5fb34ea
--- /dev/null
+++ b/extras.d/60-ceph.sh
@@ -0,0 +1,44 @@
+# ceph.sh - DevStack extras script to install Ceph
+
+if is_service_enabled ceph; then
+    if [[ "$1" == "source" ]]; then
+        # Initial source
+        source $TOP_DIR/lib/ceph
+    elif [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
+        echo_summary "Installing Ceph"
+        install_ceph
+        echo_summary "Configuring Ceph"
+        configure_ceph
+        # NOTE (leseb): Do everything here because we need to have Ceph started before the main
+        # OpenStack components. Ceph OSD must start here otherwise we can't upload any images.
+        echo_summary "Initializing Ceph"
+        init_ceph
+        start_ceph
+    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
+        if is_service_enabled glance; then
+            echo_summary "Configuring Glance for Ceph"
+            configure_ceph_glance
+        fi
+        if is_service_enabled nova; then
+            echo_summary "Configuring Nova for Ceph"
+            configure_ceph_nova
+        fi
+        if is_service_enabled cinder; then
+            echo_summary "Configuring Cinder for Ceph"
+            configure_ceph_cinder
+            # NOTE (leseb): the part below is a requirement from Cinder in order to attach volumes
+            # so we should run the following within the if statement.
+            echo_summary "Configuring libvirt secret"
+            import_libvirt_secret_ceph
+        fi
+    fi
+
+    if [[ "$1" == "unstack" ]]; then
+        stop_ceph
+        cleanup_ceph
+    fi
+
+    if [[ "$1" == "clean" ]]; then
+        cleanup_ceph
+    fi
+fi
diff --git a/files/apts/ceph b/files/apts/ceph
new file mode 100644
index 0000000..69863ab
--- /dev/null
+++ b/files/apts/ceph
@@ -0,0 +1,2 @@
+ceph    # NOPRIME
+xfsprogs
diff --git a/files/rpms-suse/ceph b/files/rpms-suse/ceph
new file mode 100644
index 0000000..8d46500
--- /dev/null
+++ b/files/rpms-suse/ceph
@@ -0,0 +1,3 @@
+ceph    # NOPRIME
+xfsprogs
+lsb
diff --git a/files/rpms/ceph b/files/rpms/ceph
new file mode 100644
index 0000000..5483735
--- /dev/null
+++ b/files/rpms/ceph
@@ -0,0 +1,3 @@
+ceph    # NOPRIME
+xfsprogs
+redhat-lsb-core
diff --git a/functions b/functions
index ca8ef80..cd9e078 100644
--- a/functions
+++ b/functions
@@ -546,6 +546,40 @@
     }
 fi
 
+
+# create_disk - Create backing disk
+function create_disk {
+    local node_number
+    local disk_image=${1}
+    local storage_data_dir=${2}
+    local loopback_disk_size=${3}
+
+    # Create a loopback disk and format it to XFS.
+    if [[ -e ${disk_image} ]]; then
+        if egrep -q ${storage_data_dir} /proc/mounts; then
+            sudo umount ${storage_data_dir}/drives/sdb1
+            sudo rm -f ${disk_image}
+        fi
+    fi
+
+    sudo mkdir -p ${storage_data_dir}/drives/images
+
+    sudo truncate -s ${loopback_disk_size} ${disk_image}
+
+    # Make a fresh XFS filesystem. Use bigger inodes so xattr can fit in
+    # a single inode. Keeping the default inode size (256) will result in multiple
+    # inodes being used to store xattr. Retrieving the xattr will be slower
+    # since we have to read multiple inodes. This statement is true for both
+    # Swift and Ceph.
+    sudo mkfs.xfs -f -i size=1024 ${disk_image}
+
+    # Mount the disk with mount options to make it as efficient as possible
+    if ! egrep -q ${storage_data_dir} /proc/mounts; then
+        sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8  \
+            ${disk_image} ${storage_data_dir}
+    fi
+}
+
 # Restore xtrace
 $XTRACE
 
diff --git a/lib/ceph b/lib/ceph
new file mode 100644
index 0000000..32a4760
--- /dev/null
+++ b/lib/ceph
@@ -0,0 +1,286 @@
+# lib/ceph
+# Functions to control the configuration and operation of the **Ceph** storage service
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``CEPH_DATA_DIR`` or ``DATA_DIR`` must be defined
+
+# ``stack.sh`` calls the entry points in this order (via ``extras.d/60-ceph.sh``):
+#
+# - install_ceph
+# - configure_ceph
+# - init_ceph
+# - start_ceph
+# - stop_ceph
+# - cleanup_ceph
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+
+# Set ``CEPH_DATA_DIR`` to the location of Ceph drives and objects.
+# Default is the common DevStack data directory.
+CEPH_DATA_DIR=${CEPH_DATA_DIR:-/var/lib/ceph}
+CEPH_DISK_IMAGE=${CEPH_DATA_DIR}/drives/images/ceph.img
+
+# Set ``CEPH_CONF_DIR`` to the location of the configuration files.
+# Default is ``/etc/ceph``.
+CEPH_CONF_DIR=${CEPH_CONF_DIR:-/etc/ceph}
+
+# DevStack will create a loop-back disk formatted as XFS to store the
+# Ceph data. Set ``CEPH_LOOPBACK_DISK_SIZE`` to the disk size in
+# kilobytes.
+# Default is 1 gigabyte.
+CEPH_LOOPBACK_DISK_SIZE_DEFAULT=2G
+CEPH_LOOPBACK_DISK_SIZE=${CEPH_LOOPBACK_DISK_SIZE:-$CEPH_LOOPBACK_DISK_SIZE_DEFAULT}
+
+# Common
+CEPH_FSID=$(uuidgen)
+CEPH_CONF_FILE=${CEPH_CONF_DIR}/ceph.conf
+
+# Glance
+GLANCE_CEPH_USER=${GLANCE_CEPH_USER:-glance}
+GLANCE_CEPH_POOL=${GLANCE_CEPH_POOL:-images}
+GLANCE_CEPH_POOL_PG=${GLANCE_CEPH_POOL_PG:-8}
+GLANCE_CEPH_POOL_PGP=${GLANCE_CEPH_POOL_PGP:-8}
+
+# Nova
+NOVA_CEPH_POOL=${NOVA_CEPH_POOL:-vms}
+NOVA_CEPH_POOL_PG=${NOVA_CEPH_POOL_PG:-8}
+NOVA_CEPH_POOL_PGP=${NOVA_CEPH_POOL_PGP:-8}
+
+# Cinder
+CINDER_CEPH_POOL=${CINDER_CEPH_POOL:-volumes}
+CINDER_CEPH_POOL_PG=${CINDER_CEPH_POOL_PG:-8}
+CINDER_CEPH_POOL_PGP=${CINDER_CEPH_POOL_PGP:-8}
+CINDER_CEPH_USER=${CINDER_CEPH_USER:-cinder}
+CINDER_CEPH_UUID=${CINDER_CEPH_UUID:-$(uuidgen)}
+
+# Set ``CEPH_REPLICAS`` to configure how many replicas are to be
+# configured for your Ceph cluster. By default we are configuring
+# only one replica since this is way less CPU and memory intensive. If
+# you are planning to test Ceph replication feel free to increase this value
+CEPH_REPLICAS=${CEPH_REPLICAS:-1}
+CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS})
+
+# Functions
+# ------------
+
+# import_libvirt_secret_ceph() - Imports Cinder user key into libvirt
+# so it can connect to the Ceph cluster while attaching a Cinder block device
+function import_libvirt_secret_ceph {
+    cat > secret.xml <<EOF
+<secret ephemeral='no' private='no'>
+   <uuid>${CINDER_CEPH_UUID}</uuid>
+   <usage type='ceph'>
+     <name>client.${CINDER_CEPH_USER} secret</name>
+   </usage>
+</secret>
+EOF
+    sudo virsh secret-define --file secret.xml
+    sudo virsh secret-set-value --secret ${CINDER_CEPH_UUID} --base64 $(sudo ceph -c ${CEPH_CONF_FILE} auth get-key client.${CINDER_CEPH_USER})
+    sudo rm -f secret.xml
+}
+
+# cleanup_ceph() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_ceph {
+    sudo pkill -f ceph-mon
+    sudo pkill -f ceph-osd
+    sudo rm -rf ${CEPH_DATA_DIR}/*/*
+    sudo rm -rf ${CEPH_CONF_DIR}/*
+    if egrep -q ${CEPH_DATA_DIR} /proc/mounts; then
+        sudo umount ${CEPH_DATA_DIR}
+    fi
+    if [[ -e ${CEPH_DISK_IMAGE} ]]; then
+        sudo rm -f ${CEPH_DISK_IMAGE}
+    fi
+    uninstall_package ceph ceph-common python-ceph libcephfs1 > /dev/null 2>&1
+    VIRSH_UUID=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
+    sudo virsh secret-undefine ${VIRSH_UUID} >/dev/null 2>&1
+}
+
+# configure_ceph() - Set config files, create data dirs, etc
+function configure_ceph {
+    local count=0
+
+    # create a backing file disk
+    create_disk ${CEPH_DISK_IMAGE} ${CEPH_DATA_DIR} ${CEPH_LOOPBACK_DISK_SIZE}
+
+    # populate ceph directory
+    sudo mkdir -p ${CEPH_DATA_DIR}/{bootstrap-mds,bootstrap-osd,mds,mon,osd,tmp}
+
+    # create ceph monitor initial key and directory
+    sudo ceph-authtool /var/lib/ceph/tmp/keyring.mon.$(hostname) --create-keyring --name=mon. --add-key=$(ceph-authtool --gen-print-key) --cap mon 'allow *'
+    sudo mkdir /var/lib/ceph/mon/ceph-$(hostname)
+
+    # create a default ceph configuration file
+    sudo tee -a ${CEPH_CONF_FILE} > /dev/null <<EOF
+[global]
+fsid = ${CEPH_FSID}
+mon_initial_members = $(hostname)
+mon_host = ${SERVICE_HOST}
+auth_cluster_required = cephx
+auth_service_required = cephx
+auth_client_required = cephx
+filestore_xattr_use_omap = true
+osd crush chooseleaf type = 0
+osd journal size = 100
+EOF
+
+    # bootstrap the ceph monitor
+    sudo ceph-mon -c ${CEPH_CONF_FILE} --mkfs -i $(hostname) --keyring /var/lib/ceph/tmp/keyring.mon.$(hostname)
+    if is_ubuntu; then
+    sudo touch /var/lib/ceph/mon/ceph-$(hostname)/upstart
+        sudo initctl emit ceph-mon id=$(hostname)
+    else
+    sudo touch /var/lib/ceph/mon/ceph-$(hostname)/sysvinit
+        sudo service ceph start mon.$(hostname)
+    fi
+
+    # wait for the admin key to come up otherwise we will not be able to do the actions below
+    until [ -f ${CEPH_CONF_DIR}/ceph.client.admin.keyring ]; do
+        echo_summary "Waiting for the Ceph admin key to be ready..."
+
+        count=$(($count + 1))
+        if [ $count -eq 3 ]; then
+            die $LINENO "Maximum of 3 retries reached"
+        fi
+        sleep 5
+    done
+
+    # change pool replica size according to the CEPH_REPLICAS set by the user
+    sudo ceph -c ${CEPH_CONF_FILE} osd pool set data size ${CEPH_REPLICAS}
+    sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd size ${CEPH_REPLICAS}
+    sudo ceph -c ${CEPH_CONF_FILE} osd pool set metadata size ${CEPH_REPLICAS}
+
+    # create a simple rule to take OSDs instead of host with CRUSH
+    # then apply this rules to the default pool
+    if [[ $CEPH_REPLICAS -ne 1 ]]; then
+        sudo ceph -c ${CEPH_CONF_FILE} osd crush rule create-simple devstack default osd
+        RULE_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd crush rule dump devstack | awk '/rule_id/ {print $3}' | cut -d ',' -f1)
+        sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd crush_ruleset ${RULE_ID}
+        sudo ceph -c ${CEPH_CONF_FILE} osd pool set data crush_ruleset ${RULE_ID}
+        sudo ceph -c ${CEPH_CONF_FILE} osd pool set metadata crush_ruleset ${RULE_ID}
+    fi
+
+    # create the OSD(s)
+    for rep in ${CEPH_REPLICAS_SEQ}; do
+        OSD_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd create)
+        sudo mkdir -p ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}
+        sudo ceph-osd -c ${CEPH_CONF_FILE} -i ${OSD_ID} --mkfs
+        sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create osd.${OSD_ID} mon 'allow profile osd ' osd 'allow *' | sudo tee ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/keyring
+
+        # ceph's init script is parsing ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/ and looking for a file
+        # 'upstart' or 'sysinitv', thanks to these 'touches' we are able to control OSDs daemons
+        # from the init script.
+        if is_ubuntu; then
+            sudo touch ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/upstart
+        else
+            sudo touch ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/sysvinit
+        fi
+    done
+}
+
+# configure_ceph_glance() - Glance config needs to come after Glance is set up
+function configure_ceph_glance {
+    # configure Glance service options, ceph pool, ceph user and ceph key
+    sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP}
+    sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} size ${CEPH_REPLICAS}
+    if [[ $CEPH_REPLICAS -ne 1 ]]; then
+        sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} crush_ruleset ${RULE_ID}
+    fi
+    sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GLANCE_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
+    sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
+    iniset $GLANCE_API_CONF DEFAULT default_store rbd
+    iniset $GLANCE_API_CONF DEFAULT rbd_store_ceph_conf $CEPH_CONF_FILE
+    iniset $GLANCE_API_CONF DEFAULT rbd_store_user $GLANCE_CEPH_USER
+    iniset $GLANCE_API_CONF DEFAULT rbd_store_pool $GLANCE_CEPH_POOL
+    iniset $GLANCE_API_CONF DEFAULT show_image_direct_url True
+}
+
+# configure_ceph_nova() - Nova config needs to come after Nova is set up
+function configure_ceph_nova {
+    # configure Nova service options, ceph pool, ceph user and ceph key
+    sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP}
+    sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} size ${CEPH_REPLICAS}
+    if [[ $CEPH_REPLICAS -ne 1 ]]; then
+        sudo -c ${CEPH_CONF_FILE} ceph osd pool set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID}
+    fi
+    iniset $NOVA_CONF libvirt rbd_user ${CINDER_CEPH_USER}
+    iniset $NOVA_CONF libvirt rbd_secret_uuid ${CINDER_CEPH_UUID}
+    iniset $NOVA_CONF libvirt inject_key false
+    iniset $NOVA_CONF libvirt inject_partition -2
+    iniset $NOVA_CONF libvirt disk_cachemodes "network=writeback"
+    iniset $NOVA_CONF libvirt images_type rbd
+    iniset $NOVA_CONF libvirt images_rbd_pool ${NOVA_CEPH_POOL}
+    iniset $NOVA_CONF libvirt images_rbd_ceph_conf ${CEPH_CONF_FILE}
+}
+
+# configure_ceph_cinder() - Cinder config needs to come after Cinder is set up
+function configure_ceph_cinder {
+    # Configure Cinder service options, ceph pool, ceph user and ceph key
+    sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP}
+    sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} size ${CEPH_REPLICAS}
+    if [[ $CEPH_REPLICAS -ne 1 ]]; then
+        sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} crush_ruleset ${RULE_ID}
+
+    fi
+    sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
+    sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
+}
+
+# init_ceph() - Initialize databases, etc.
+function init_ceph {
+    # clean up from previous (possibly aborted) runs
+    # make sure to kill all ceph processes first
+    sudo pkill -f ceph-mon || true
+    sudo pkill -f ceph-osd || true
+}
+
+# install_ceph() - Collect source and prepare
+function install_ceph {
+    # NOTE(dtroyer): At some point it'll be easier to test for unsupported distros,
+    #                leveraging the list in stack.sh
+    if [[ ${os_CODENAME} =~ trusty ]] || [[ ${os_CODENAME} =~ Schrödinger’sCat ]] || [[ ${os_CODENAME} =~ Heisenbug ]]; then
+        NO_UPDATE_REPOS=False
+        install_package ceph
+    else
+        exit_distro_not_supported "Ceph since your distro doesn't provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 19/20"
+    fi
+}
+
+# start_ceph() - Start running processes, including screen
+function start_ceph {
+    if is_ubuntu; then
+        sudo initctl emit ceph-mon id=$(hostname)
+        for id in $(sudo ceph -c ${CEPH_CONF_FILE} osd ls); do
+            sudo start ceph-osd id=${id}
+        done
+    else
+        sudo service ceph start
+    fi
+}
+
+# stop_ceph() - Stop running processes (non-screen)
+function stop_ceph {
+    if is_ubuntu; then
+        sudo service ceph-mon-all stop > /dev/null 2>&1
+        sudo service ceph-osd-all stop > /dev/null 2>&1
+    else
+        sudo service ceph stop > /dev/null 2>&1
+    fi
+}
+
+
+# Restore xtrace
+$XTRACE
+
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/cinder_backends/ceph b/lib/cinder_backends/ceph
new file mode 100644
index 0000000..e9d2a02
--- /dev/null
+++ b/lib/cinder_backends/ceph
@@ -0,0 +1,79 @@
+# lib/cinder_backends/ceph
+# Configure the ceph backend
+
+# Enable with:
+#
+#   CINDER_ENABLED_BACKENDS+=,ceph:ceph
+#
+# Optional parameters:
+#   CINDER_BAK_CEPH_POOL=<pool-name>
+#   CINDER_BAK_CEPH_USER=<user>
+#   CINDER_BAK_CEPH_POOL_PG=<pg-num>
+#   CINDER_BAK_CEPH_POOL_PGP=<pgp-num>
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``cinder`` configurations
+
+# configure_ceph_backend_lvm - called from configure_cinder()
+
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+
+CINDER_BAK_CEPH_POOL=${CINDER_BAK_CEPH_POOL:-backups}
+CINDER_BAK_CEPH_POOL_PG=${CINDER_BAK_CEPH_POOL_PG:-8}
+CINDER_BAK_CEPH_POOL_PGP=${CINDER_BAK_CEPH_POOL_PGP:-8}
+CINDER_BAK_CEPH_USER=${CINDER_BAK_CEPH_USER:-cinder-bak}
+
+
+# Entry Points
+# ------------
+
+# configure_cinder_backend_ceph - Set config files, create data dirs, etc
+# configure_cinder_backend_ceph $name
+function configure_cinder_backend_ceph {
+    local be_name=$1
+
+    iniset $CINDER_CONF $be_name volume_backend_name $be_name
+    iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.rbd.RBDDriver"
+    iniset $CINDER_CONF $be_name rbd_ceph_conf "$CEPH_CONF"
+    iniset $CINDER_CONF $be_name rbd_pool "$CINDER_CEPH_POOL"
+    iniset $CINDER_CONF $be_name rbd_user "$CINDER_CEPH_USER"
+    iniset $CINDER_CONF $be_name rbd_uuid "$CINDER_CEPH_UUID"
+    iniset $CINDER_CONF $be_name rbd_flatten_volume_from_snapshot False
+    iniset $CINDER_CONF $be_name rbd_max_clone_depth 5
+    iniset $CINDER_CONF DEFAULT glance_api_version 2
+
+    if is_service_enabled c-bak; then
+        # Configure Cinder backup service options, ceph pool, ceph user and ceph key
+        sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP}
+        sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} size ${CEPH_REPLICAS}
+        if [[ $CEPH_REPLICAS -ne 1 ]]; then
+            sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
+        fi
+        sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
+        sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
+
+        iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph"
+        iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF"
+        iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL"
+        iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER"
+        iniset $CINDER_CONF DEFAULT backup_ceph_stripe_unit 0
+        iniset $CINDER_CONF DEFAULT backup_ceph_stripe_count 0
+        iniset $CINDER_CONF DEFAULT restore_discard_excess_bytes True
+    fi
+}
+
+# Restore xtrace
+$MY_XTRACE
+
+# Local variables:
+# mode: shell-script
+# End: