Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 1 | # lib/ceph |
| 2 | # Functions to control the configuration and operation of the **Ceph** storage service |
| 3 | |
| 4 | # Dependencies: |
| 5 | # |
| 6 | # - ``functions`` file |
| 7 | # - ``CEPH_DATA_DIR`` or ``DATA_DIR`` must be defined |
| 8 | |
| 9 | # ``stack.sh`` calls the entry points in this order (via ``extras.d/60-ceph.sh``): |
| 10 | # |
| 11 | # - install_ceph |
| 12 | # - configure_ceph |
| 13 | # - init_ceph |
| 14 | # - start_ceph |
| 15 | # - stop_ceph |
| 16 | # - cleanup_ceph |
| 17 | |
| 18 | # Save trace setting |
| 19 | XTRACE=$(set +o | grep xtrace) |
| 20 | set +o xtrace |
| 21 | |
| 22 | |
| 23 | # Defaults |
| 24 | # -------- |
| 25 | |
| 26 | # Set ``CEPH_DATA_DIR`` to the location of Ceph drives and objects. |
| 27 | # Default is the common DevStack data directory. |
| 28 | CEPH_DATA_DIR=${CEPH_DATA_DIR:-/var/lib/ceph} |
| 29 | CEPH_DISK_IMAGE=${CEPH_DATA_DIR}/drives/images/ceph.img |
| 30 | |
| 31 | # Set ``CEPH_CONF_DIR`` to the location of the configuration files. |
| 32 | # Default is ``/etc/ceph``. |
| 33 | CEPH_CONF_DIR=${CEPH_CONF_DIR:-/etc/ceph} |
| 34 | |
| 35 | # DevStack will create a loop-back disk formatted as XFS to store the |
| 36 | # Ceph data. Set ``CEPH_LOOPBACK_DISK_SIZE`` to the disk size in |
| 37 | # kilobytes. |
| 38 | # Default is 1 gigabyte. |
Ivan Kolodyazhny | 18b9dcc | 2014-08-22 17:02:40 +0300 | [diff] [blame] | 39 | CEPH_LOOPBACK_DISK_SIZE_DEFAULT=4G |
Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 40 | CEPH_LOOPBACK_DISK_SIZE=${CEPH_LOOPBACK_DISK_SIZE:-$CEPH_LOOPBACK_DISK_SIZE_DEFAULT} |
| 41 | |
| 42 | # Common |
| 43 | CEPH_FSID=$(uuidgen) |
| 44 | CEPH_CONF_FILE=${CEPH_CONF_DIR}/ceph.conf |
| 45 | |
| 46 | # Glance |
| 47 | GLANCE_CEPH_USER=${GLANCE_CEPH_USER:-glance} |
| 48 | GLANCE_CEPH_POOL=${GLANCE_CEPH_POOL:-images} |
| 49 | GLANCE_CEPH_POOL_PG=${GLANCE_CEPH_POOL_PG:-8} |
| 50 | GLANCE_CEPH_POOL_PGP=${GLANCE_CEPH_POOL_PGP:-8} |
| 51 | |
| 52 | # Nova |
| 53 | NOVA_CEPH_POOL=${NOVA_CEPH_POOL:-vms} |
| 54 | NOVA_CEPH_POOL_PG=${NOVA_CEPH_POOL_PG:-8} |
| 55 | NOVA_CEPH_POOL_PGP=${NOVA_CEPH_POOL_PGP:-8} |
| 56 | |
| 57 | # Cinder |
| 58 | CINDER_CEPH_POOL=${CINDER_CEPH_POOL:-volumes} |
| 59 | CINDER_CEPH_POOL_PG=${CINDER_CEPH_POOL_PG:-8} |
| 60 | CINDER_CEPH_POOL_PGP=${CINDER_CEPH_POOL_PGP:-8} |
| 61 | CINDER_CEPH_USER=${CINDER_CEPH_USER:-cinder} |
| 62 | CINDER_CEPH_UUID=${CINDER_CEPH_UUID:-$(uuidgen)} |
| 63 | |
| 64 | # Set ``CEPH_REPLICAS`` to configure how many replicas are to be |
| 65 | # configured for your Ceph cluster. By default we are configuring |
| 66 | # only one replica since this is way less CPU and memory intensive. If |
| 67 | # you are planning to test Ceph replication feel free to increase this value |
| 68 | CEPH_REPLICAS=${CEPH_REPLICAS:-1} |
| 69 | CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS}) |
| 70 | |
Sébastien Han | 4eb04a5 | 2014-12-04 16:22:41 +0100 | [diff] [blame^] | 71 | # Connect to an existing Ceph cluster |
| 72 | REMOTE_CEPH=$(trueorfalse False $REMOTE_CEPH) |
| 73 | REMOTE_CEPH_ADMIN_KEY_PATH=${REMOTE_CEPH_ADMIN_KEY_PATH:-$CEPH_CONF_DIR/ceph.client.admin.keyring} |
| 74 | |
| 75 | |
Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 76 | # Functions |
| 77 | # ------------ |
| 78 | |
Sébastien Han | 90f77fb | 2014-10-31 12:05:20 +0100 | [diff] [blame] | 79 | function get_ceph_version { |
| 80 | local ceph_version_str=$(sudo ceph daemon mon.$(hostname) version | cut -d '"' -f 4) |
| 81 | echo $ceph_version_str |
| 82 | } |
| 83 | |
Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 84 | # import_libvirt_secret_ceph() - Imports Cinder user key into libvirt |
| 85 | # so it can connect to the Ceph cluster while attaching a Cinder block device |
| 86 | function import_libvirt_secret_ceph { |
| 87 | cat > secret.xml <<EOF |
| 88 | <secret ephemeral='no' private='no'> |
| 89 | <uuid>${CINDER_CEPH_UUID}</uuid> |
| 90 | <usage type='ceph'> |
| 91 | <name>client.${CINDER_CEPH_USER} secret</name> |
| 92 | </usage> |
| 93 | </secret> |
| 94 | EOF |
| 95 | sudo virsh secret-define --file secret.xml |
| 96 | sudo virsh secret-set-value --secret ${CINDER_CEPH_UUID} --base64 $(sudo ceph -c ${CEPH_CONF_FILE} auth get-key client.${CINDER_CEPH_USER}) |
| 97 | sudo rm -f secret.xml |
| 98 | } |
| 99 | |
Sébastien Han | 4eb04a5 | 2014-12-04 16:22:41 +0100 | [diff] [blame^] | 100 | # undefine_virsh_secret() - Undefine Cinder key secret from libvirt |
| 101 | function undefine_virsh_secret { |
| 102 | if is_service_enabled cinder || is_service_enabled nova; then |
| 103 | local virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }') |
| 104 | sudo virsh secret-undefine ${virsh_uuid} >/dev/null 2>&1 |
| 105 | fi |
| 106 | } |
| 107 | |
| 108 | |
| 109 | # check_os_support_ceph() - Check if the operating system provides a decent version of Ceph |
| 110 | function check_os_support_ceph { |
| 111 | if [[ ! ${DISTRO} =~ (trusty|f20|f21) ]]; then |
| 112 | echo "WARNING: your distro $DISTRO does not provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 20 (and higher)" |
| 113 | if [[ "$FORCE_CEPH_INSTALL" != "yes" ]]; then |
| 114 | die $LINENO "If you wish to install Ceph on this distribution anyway run with FORCE_CEPH_INSTALL=yes" |
| 115 | fi |
| 116 | NO_UPDATE_REPOS=False |
| 117 | fi |
| 118 | } |
| 119 | |
Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 120 | # cleanup_ceph() - Remove residual data files, anything left over from previous |
| 121 | # runs that a clean run would need to clean up |
Sébastien Han | 4eb04a5 | 2014-12-04 16:22:41 +0100 | [diff] [blame^] | 122 | function cleanup_ceph_remote { |
| 123 | # do a proper cleanup from here to avoid leftover on the remote Ceph cluster |
| 124 | if is_service_enabled glance; then |
| 125 | sudo ceph osd pool delete $GLANCE_CEPH_POOL $GLANCE_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1 |
| 126 | sudo ceph auth del client.$GLANCE_CEPH_USER > /dev/null 2>&1 |
| 127 | fi |
| 128 | if is_service_enabled cinder; then |
| 129 | sudo ceph osd pool delete $CINDER_CEPH_POOL $CINDER_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1 |
| 130 | sudo ceph auth del client.$CINDER_CEPH_USER > /dev/null 2>&1 |
| 131 | fi |
| 132 | if is_service_enabled c-bak; then |
| 133 | sudo ceph osd pool delete $CINDER_BAK_CEPH_POOL $CINDER_BAK_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1 |
| 134 | sudo ceph auth del client.$CINDER_BAK_CEPH_USER > /dev/null 2>&1 |
| 135 | fi |
| 136 | if is_service_enabled nova; then |
| 137 | iniset $NOVA_CONF libvirt rbd_secret_uuid "" |
| 138 | sudo ceph osd pool delete $NOVA_CEPH_POOL $NOVA_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1 |
| 139 | fi |
| 140 | } |
| 141 | |
| 142 | function cleanup_ceph_embedded { |
Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 143 | sudo pkill -f ceph-mon |
| 144 | sudo pkill -f ceph-osd |
| 145 | sudo rm -rf ${CEPH_DATA_DIR}/*/* |
Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 146 | if egrep -q ${CEPH_DATA_DIR} /proc/mounts; then |
| 147 | sudo umount ${CEPH_DATA_DIR} |
| 148 | fi |
| 149 | if [[ -e ${CEPH_DISK_IMAGE} ]]; then |
| 150 | sudo rm -f ${CEPH_DISK_IMAGE} |
| 151 | fi |
Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 152 | } |
| 153 | |
Sébastien Han | 4eb04a5 | 2014-12-04 16:22:41 +0100 | [diff] [blame^] | 154 | function cleanup_ceph_general { |
| 155 | undefine_virsh_secret |
| 156 | uninstall_package ceph ceph-common python-ceph libcephfs1 > /dev/null 2>&1 |
| 157 | |
| 158 | # purge ceph config file and keys |
| 159 | sudo rm -rf ${CEPH_CONF_DIR}/* |
| 160 | } |
| 161 | |
| 162 | |
Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 163 | # configure_ceph() - Set config files, create data dirs, etc |
| 164 | function configure_ceph { |
| 165 | local count=0 |
| 166 | |
| 167 | # create a backing file disk |
| 168 | create_disk ${CEPH_DISK_IMAGE} ${CEPH_DATA_DIR} ${CEPH_LOOPBACK_DISK_SIZE} |
| 169 | |
| 170 | # populate ceph directory |
| 171 | sudo mkdir -p ${CEPH_DATA_DIR}/{bootstrap-mds,bootstrap-osd,mds,mon,osd,tmp} |
| 172 | |
| 173 | # create ceph monitor initial key and directory |
| 174 | sudo ceph-authtool /var/lib/ceph/tmp/keyring.mon.$(hostname) --create-keyring --name=mon. --add-key=$(ceph-authtool --gen-print-key) --cap mon 'allow *' |
| 175 | sudo mkdir /var/lib/ceph/mon/ceph-$(hostname) |
| 176 | |
| 177 | # create a default ceph configuration file |
Sébastien Han | 4eb04a5 | 2014-12-04 16:22:41 +0100 | [diff] [blame^] | 178 | sudo tee ${CEPH_CONF_FILE} > /dev/null <<EOF |
Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 179 | [global] |
| 180 | fsid = ${CEPH_FSID} |
| 181 | mon_initial_members = $(hostname) |
| 182 | mon_host = ${SERVICE_HOST} |
| 183 | auth_cluster_required = cephx |
| 184 | auth_service_required = cephx |
| 185 | auth_client_required = cephx |
| 186 | filestore_xattr_use_omap = true |
| 187 | osd crush chooseleaf type = 0 |
| 188 | osd journal size = 100 |
| 189 | EOF |
| 190 | |
| 191 | # bootstrap the ceph monitor |
| 192 | sudo ceph-mon -c ${CEPH_CONF_FILE} --mkfs -i $(hostname) --keyring /var/lib/ceph/tmp/keyring.mon.$(hostname) |
| 193 | if is_ubuntu; then |
| 194 | sudo touch /var/lib/ceph/mon/ceph-$(hostname)/upstart |
| 195 | sudo initctl emit ceph-mon id=$(hostname) |
| 196 | else |
| 197 | sudo touch /var/lib/ceph/mon/ceph-$(hostname)/sysvinit |
| 198 | sudo service ceph start mon.$(hostname) |
| 199 | fi |
| 200 | |
| 201 | # wait for the admin key to come up otherwise we will not be able to do the actions below |
| 202 | until [ -f ${CEPH_CONF_DIR}/ceph.client.admin.keyring ]; do |
| 203 | echo_summary "Waiting for the Ceph admin key to be ready..." |
| 204 | |
| 205 | count=$(($count + 1)) |
| 206 | if [ $count -eq 3 ]; then |
| 207 | die $LINENO "Maximum of 3 retries reached" |
| 208 | fi |
| 209 | sleep 5 |
| 210 | done |
| 211 | |
Sébastien Han | 90f77fb | 2014-10-31 12:05:20 +0100 | [diff] [blame] | 212 | # pools data and metadata were removed in the Giant release so depending on the version we apply different commands |
| 213 | local ceph_version=$(get_ceph_version) |
Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 214 | # change pool replica size according to the CEPH_REPLICAS set by the user |
Sébastien Han | 90f77fb | 2014-10-31 12:05:20 +0100 | [diff] [blame] | 215 | if [[ ${ceph_version%.*} -eq 0 ]] && [[ ${ceph_version##*.} -lt 87 ]]; then |
| 216 | sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd size ${CEPH_REPLICAS} |
| 217 | sudo ceph -c ${CEPH_CONF_FILE} osd pool set data size ${CEPH_REPLICAS} |
| 218 | sudo ceph -c ${CEPH_CONF_FILE} osd pool set metadata size ${CEPH_REPLICAS} |
| 219 | else |
| 220 | sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd size ${CEPH_REPLICAS} |
| 221 | fi |
Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 222 | |
| 223 | # create a simple rule to take OSDs instead of host with CRUSH |
| 224 | # then apply this rules to the default pool |
| 225 | if [[ $CEPH_REPLICAS -ne 1 ]]; then |
| 226 | sudo ceph -c ${CEPH_CONF_FILE} osd crush rule create-simple devstack default osd |
| 227 | RULE_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd crush rule dump devstack | awk '/rule_id/ {print $3}' | cut -d ',' -f1) |
| 228 | sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd crush_ruleset ${RULE_ID} |
| 229 | sudo ceph -c ${CEPH_CONF_FILE} osd pool set data crush_ruleset ${RULE_ID} |
| 230 | sudo ceph -c ${CEPH_CONF_FILE} osd pool set metadata crush_ruleset ${RULE_ID} |
| 231 | fi |
| 232 | |
| 233 | # create the OSD(s) |
| 234 | for rep in ${CEPH_REPLICAS_SEQ}; do |
| 235 | OSD_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd create) |
| 236 | sudo mkdir -p ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID} |
| 237 | sudo ceph-osd -c ${CEPH_CONF_FILE} -i ${OSD_ID} --mkfs |
| 238 | sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create osd.${OSD_ID} mon 'allow profile osd ' osd 'allow *' | sudo tee ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/keyring |
| 239 | |
| 240 | # ceph's init script is parsing ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/ and looking for a file |
| 241 | # 'upstart' or 'sysinitv', thanks to these 'touches' we are able to control OSDs daemons |
| 242 | # from the init script. |
| 243 | if is_ubuntu; then |
| 244 | sudo touch ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/upstart |
| 245 | else |
| 246 | sudo touch ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/sysvinit |
| 247 | fi |
| 248 | done |
| 249 | } |
| 250 | |
Sébastien Han | 4eb04a5 | 2014-12-04 16:22:41 +0100 | [diff] [blame^] | 251 | function configure_ceph_embedded_glance { |
Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 252 | # configure Glance service options, ceph pool, ceph user and ceph key |
Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 253 | sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} size ${CEPH_REPLICAS} |
| 254 | if [[ $CEPH_REPLICAS -ne 1 ]]; then |
| 255 | sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} crush_ruleset ${RULE_ID} |
| 256 | fi |
Sébastien Han | 4eb04a5 | 2014-12-04 16:22:41 +0100 | [diff] [blame^] | 257 | } |
| 258 | |
| 259 | # configure_ceph_glance() - Glance config needs to come after Glance is set up |
| 260 | function configure_ceph_glance { |
| 261 | sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP} |
Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 262 | sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GLANCE_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring |
| 263 | sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring |
Eric Harney | 9a70610 | 2014-10-08 10:39:46 -0400 | [diff] [blame] | 264 | |
| 265 | # NOTE(eharney): When Glance has fully migrated to Glance store, |
| 266 | # default_store can be removed from [DEFAULT]. (See lib/glance.) |
Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 267 | iniset $GLANCE_API_CONF DEFAULT default_store rbd |
Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 268 | iniset $GLANCE_API_CONF DEFAULT show_image_direct_url True |
Eric Harney | 9a70610 | 2014-10-08 10:39:46 -0400 | [diff] [blame] | 269 | iniset $GLANCE_API_CONF glance_store default_store rbd |
Sébastien Han | 7c9abca | 2014-09-15 16:17:42 +0200 | [diff] [blame] | 270 | iniset $GLANCE_API_CONF glance_store stores "file, http, rbd" |
| 271 | iniset $GLANCE_API_CONF glance_store rbd_store_ceph_conf $CEPH_CONF_FILE |
| 272 | iniset $GLANCE_API_CONF glance_store rbd_store_user $GLANCE_CEPH_USER |
| 273 | iniset $GLANCE_API_CONF glance_store rbd_store_pool $GLANCE_CEPH_POOL |
Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 274 | } |
| 275 | |
Sébastien Han | 4eb04a5 | 2014-12-04 16:22:41 +0100 | [diff] [blame^] | 276 | function configure_ceph_embedded_nova { |
Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 277 | # configure Nova service options, ceph pool, ceph user and ceph key |
Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 278 | sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} size ${CEPH_REPLICAS} |
| 279 | if [[ $CEPH_REPLICAS -ne 1 ]]; then |
| 280 | sudo -c ${CEPH_CONF_FILE} ceph osd pool set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID} |
| 281 | fi |
Sébastien Han | 4eb04a5 | 2014-12-04 16:22:41 +0100 | [diff] [blame^] | 282 | } |
| 283 | |
| 284 | # configure_ceph_nova() - Nova config needs to come after Nova is set up |
| 285 | function configure_ceph_nova { |
| 286 | sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP} |
Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 287 | iniset $NOVA_CONF libvirt rbd_user ${CINDER_CEPH_USER} |
| 288 | iniset $NOVA_CONF libvirt rbd_secret_uuid ${CINDER_CEPH_UUID} |
| 289 | iniset $NOVA_CONF libvirt inject_key false |
| 290 | iniset $NOVA_CONF libvirt inject_partition -2 |
| 291 | iniset $NOVA_CONF libvirt disk_cachemodes "network=writeback" |
| 292 | iniset $NOVA_CONF libvirt images_type rbd |
| 293 | iniset $NOVA_CONF libvirt images_rbd_pool ${NOVA_CEPH_POOL} |
| 294 | iniset $NOVA_CONF libvirt images_rbd_ceph_conf ${CEPH_CONF_FILE} |
Sébastien Han | fa16ae9 | 2014-10-06 00:15:33 +0200 | [diff] [blame] | 295 | |
| 296 | if ! is_service_enabled cinder; then |
| 297 | sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring > /dev/null |
| 298 | sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring |
| 299 | fi |
Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 300 | } |
| 301 | |
Sébastien Han | 4eb04a5 | 2014-12-04 16:22:41 +0100 | [diff] [blame^] | 302 | function configure_ceph_embedded_cinder { |
Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 303 | # Configure Cinder service options, ceph pool, ceph user and ceph key |
Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 304 | sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} size ${CEPH_REPLICAS} |
| 305 | if [[ $CEPH_REPLICAS -ne 1 ]]; then |
| 306 | sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} crush_ruleset ${RULE_ID} |
Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 307 | fi |
Sébastien Han | 4eb04a5 | 2014-12-04 16:22:41 +0100 | [diff] [blame^] | 308 | } |
| 309 | |
| 310 | # configure_ceph_cinder() - Cinder config needs to come after Cinder is set up |
| 311 | function configure_ceph_cinder { |
| 312 | sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP} |
Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 313 | sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring |
| 314 | sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring |
| 315 | } |
| 316 | |
| 317 | # init_ceph() - Initialize databases, etc. |
| 318 | function init_ceph { |
| 319 | # clean up from previous (possibly aborted) runs |
| 320 | # make sure to kill all ceph processes first |
| 321 | sudo pkill -f ceph-mon || true |
| 322 | sudo pkill -f ceph-osd || true |
| 323 | } |
| 324 | |
| 325 | # install_ceph() - Collect source and prepare |
Sébastien Han | 4eb04a5 | 2014-12-04 16:22:41 +0100 | [diff] [blame^] | 326 | function install_ceph_remote { |
| 327 | install_package ceph-common |
| 328 | } |
| 329 | |
Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 330 | function install_ceph { |
Sébastien Han | 4eb04a5 | 2014-12-04 16:22:41 +0100 | [diff] [blame^] | 331 | install_package ceph |
Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 332 | } |
| 333 | |
| 334 | # start_ceph() - Start running processes, including screen |
| 335 | function start_ceph { |
| 336 | if is_ubuntu; then |
| 337 | sudo initctl emit ceph-mon id=$(hostname) |
| 338 | for id in $(sudo ceph -c ${CEPH_CONF_FILE} osd ls); do |
| 339 | sudo start ceph-osd id=${id} |
| 340 | done |
| 341 | else |
| 342 | sudo service ceph start |
| 343 | fi |
| 344 | } |
| 345 | |
| 346 | # stop_ceph() - Stop running processes (non-screen) |
| 347 | function stop_ceph { |
| 348 | if is_ubuntu; then |
| 349 | sudo service ceph-mon-all stop > /dev/null 2>&1 |
| 350 | sudo service ceph-osd-all stop > /dev/null 2>&1 |
| 351 | else |
| 352 | sudo service ceph stop > /dev/null 2>&1 |
| 353 | fi |
| 354 | } |
| 355 | |
| 356 | |
| 357 | # Restore xtrace |
| 358 | $XTRACE |
| 359 | |
| 360 | ## Local variables: |
| 361 | ## mode: shell-script |
| 362 | ## End: |