Rely on ceph.conf settings when cinder backup pool is created

Ceph adds the osd pool default size option on ceph.conf via [1];
this means we don't need to specify the size of this pool if
the same value (same variable) is used (CEPH_REPLICAS).
This change is an attempt of removing the size setting, relying
on the implicit declaration of the value provided by ceph.conf.

[1] https://github.com/openstack/devstack-plugin-ceph/blob/master/devstack/lib/ceph#L425

Change-Id: I5fa2105ceb3b97a4e38926d76c1e4028f1108d4a
diff --git a/lib/cinder_backups/ceph b/lib/cinder_backups/ceph
index 26136be..e4003c0 100644
--- a/lib/cinder_backups/ceph
+++ b/lib/cinder_backups/ceph
@@ -27,12 +27,8 @@
 
 function configure_cinder_backup_ceph {
     sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP}
-    if [ "$REMOTE_CEPH" = "False" ]; then
-        # Configure Cinder backup service options, ceph pool, ceph user and ceph key
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} size ${CEPH_REPLICAS}
-        if [[ $CEPH_REPLICAS -ne 1 ]]; then
-            sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
-        fi
+    if [[ "$REMOTE_CEPH" = "False" && "$CEPH_REPLICAS" -ne 1 ]]; then
+        sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
     fi
     sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
     sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring