Configure Cinder backup driver
This patch adds a new environment variable, CINDER_BACKUP_DRIVER for
configuring cinder backup driver used when c-bak service is enabled.
This gets cinder backup driver configurable with a similar pattern to
cinder backends. Although the current configurable backup drivers don't
need cleanup functions, the interface for cleanup is prepared for the
future.
The following backup drivers can be configured:
swift:
This is the default backup driver.
ceph:
This already can be configured if ceph backend driver is enabled. For
backward compatibility, ceph backup driver is used if ceph backend
driver is enabled and no backup driver is specified.
s3_swift:
The s3 backup driver gets configurable with this patch. By specifying
's3_swift', the driver is configured for swift s3api.
In the future, lib/cinder_backups/s3 should be created separatedly for
external S3 compatible storage. This file will just set given parameters
such as a URL and credentials.
Change-Id: I356c224d938e1aa59c8589387a03682b3ec6e23d
diff --git a/lib/cinder_backups/ceph b/lib/cinder_backups/ceph
new file mode 100644
index 0000000..26136be
--- /dev/null
+++ b/lib/cinder_backups/ceph
@@ -0,0 +1,57 @@
+#!/bin/bash
+#
+# lib/cinder_backups/ceph
+# Configure the ceph backup driver
+
+# Enable with:
+#
+# CINDER_BACKUP_DRIVER=ceph
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``cinder`` configurations
+
+# Save trace setting
+_XTRACE_CINDER_CEPH=$(set +o | grep xtrace)
+set +o xtrace
+
+# Defaults
+# --------
+
+CINDER_BAK_CEPH_POOL=${CINDER_BAK_CEPH_POOL:-backups}
+CINDER_BAK_CEPH_POOL_PG=${CINDER_BAK_CEPH_POOL_PG:-8}
+CINDER_BAK_CEPH_POOL_PGP=${CINDER_BAK_CEPH_POOL_PGP:-8}
+CINDER_BAK_CEPH_USER=${CINDER_BAK_CEPH_USER:-cinder-bak}
+
+
+function configure_cinder_backup_ceph {
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP}
+ if [ "$REMOTE_CEPH" = "False" ]; then
+ # Configure Cinder backup service options, ceph pool, ceph user and ceph key
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} size ${CEPH_REPLICAS}
+ if [[ $CEPH_REPLICAS -ne 1 ]]; then
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
+ fi
+ fi
+ sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
+ sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
+
+ iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph.CephBackupDriver"
+ iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF_FILE"
+ iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL"
+ iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER"
+ iniset $CINDER_CONF DEFAULT backup_ceph_stripe_unit 0
+ iniset $CINDER_CONF DEFAULT backup_ceph_stripe_count 0
+ iniset $CINDER_CONF DEFAULT restore_discard_excess_bytes True
+}
+
+# init_cinder_backup_ceph: nothing to do
+# cleanup_cinder_backup_ceph: nothing to do
+
+# Restore xtrace
+$_XTRACE_CINDER_CEPH
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/cinder_backups/s3_swift b/lib/cinder_backups/s3_swift
new file mode 100644
index 0000000..6fb2486
--- /dev/null
+++ b/lib/cinder_backups/s3_swift
@@ -0,0 +1,45 @@
+#!/bin/bash
+#
+# lib/cinder_backups/s3_swift
+# Configure the s3 backup driver with swift s3api
+#
+# TODO: create lib/cinder_backup/s3 for external s3 compatible storage
+
+# Enable with:
+#
+# CINDER_BACKUP_DRIVER=s3_swift
+# enable_service s3api s-proxy s-object s-container s-account
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``cinder`` configurations
+
+# Save trace setting
+_XTRACE_CINDER_S3_SWIFT=$(set +o | grep xtrace)
+set +o xtrace
+
+function configure_cinder_backup_s3_swift {
+ # This configuration requires swift and s3api. If we're
+ # on a subnode we might not know if they are enabled
+ iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.s3.S3BackupDriver"
+ iniset $CINDER_CONF DEFAULT backup_s3_endpoint_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$S3_SERVICE_PORT"
+}
+
+function init_cinder_backup_s3_swift {
+ openstack ec2 credential create
+ iniset $CINDER_CONF DEFAULT backup_s3_store_access_key "$(openstack ec2 credential list -c Access -f value)"
+ iniset $CINDER_CONF DEFAULT backup_s3_store_secret_key "$(openstack ec2 credential list -c Secret -f value)"
+ if is_service_enabled tls-proxy; then
+ iniset $CINDER_CONF DEFAULT backup_s3_ca_cert_file "$SSL_BUNDLE_FILE"
+ fi
+}
+
+# cleanup_cinder_backup_s3_swift: nothing to do
+
+# Restore xtrace
+$_XTRACE_CINDER_S3_SWIFT
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/cinder_backups/swift b/lib/cinder_backups/swift
new file mode 100644
index 0000000..d7c977e
--- /dev/null
+++ b/lib/cinder_backups/swift
@@ -0,0 +1,38 @@
+#!/bin/bash
+#
+# lib/cinder_backups/swift
+# Configure the swift backup driver
+
+# Enable with:
+#
+# CINDER_BACKUP_DRIVER=swift
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``cinder`` configurations
+
+# Save trace setting
+_XTRACE_CINDER_SWIFT=$(set +o | grep xtrace)
+set +o xtrace
+
+
+function configure_cinder_backup_swift {
+ # NOTE(mriedem): The default backup driver uses swift and if we're
+ # on a subnode we might not know if swift is enabled, but chances are
+ # good that it is on the controller so configure the backup service
+ # to use it.
+ iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.swift.SwiftBackupDriver"
+ iniset $CINDER_CONF DEFAULT backup_swift_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_"
+}
+
+# init_cinder_backup_swift: nothing to do
+# cleanup_cinder_backup_swift: nothing to do
+
+
+# Restore xtrace
+$_XTRACE_CINDER_SWIFT
+
+# Local variables:
+# mode: shell-script
+# End: