| Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 1 | # lib/ceph | 
|  | 2 | # Functions to control the configuration and operation of the **Ceph** storage service | 
|  | 3 |  | 
|  | 4 | # Dependencies: | 
|  | 5 | # | 
|  | 6 | # - ``functions`` file | 
|  | 7 | # - ``CEPH_DATA_DIR`` or ``DATA_DIR`` must be defined | 
|  | 8 |  | 
|  | 9 | # ``stack.sh`` calls the entry points in this order (via ``extras.d/60-ceph.sh``): | 
|  | 10 | # | 
|  | 11 | # - install_ceph | 
|  | 12 | # - configure_ceph | 
|  | 13 | # - init_ceph | 
|  | 14 | # - start_ceph | 
|  | 15 | # - stop_ceph | 
|  | 16 | # - cleanup_ceph | 
|  | 17 |  | 
|  | 18 | # Save trace setting | 
|  | 19 | XTRACE=$(set +o | grep xtrace) | 
|  | 20 | set +o xtrace | 
|  | 21 |  | 
|  | 22 |  | 
|  | 23 | # Defaults | 
|  | 24 | # -------- | 
|  | 25 |  | 
|  | 26 | # Set ``CEPH_DATA_DIR`` to the location of Ceph drives and objects. | 
|  | 27 | # Default is the common DevStack data directory. | 
|  | 28 | CEPH_DATA_DIR=${CEPH_DATA_DIR:-/var/lib/ceph} | 
|  | 29 | CEPH_DISK_IMAGE=${CEPH_DATA_DIR}/drives/images/ceph.img | 
|  | 30 |  | 
|  | 31 | # Set ``CEPH_CONF_DIR`` to the location of the configuration files. | 
|  | 32 | # Default is ``/etc/ceph``. | 
|  | 33 | CEPH_CONF_DIR=${CEPH_CONF_DIR:-/etc/ceph} | 
|  | 34 |  | 
|  | 35 | # DevStack will create a loop-back disk formatted as XFS to store the | 
|  | 36 | # Ceph data. Set ``CEPH_LOOPBACK_DISK_SIZE`` to the disk size in | 
|  | 37 | # kilobytes. | 
|  | 38 | # Default is 1 gigabyte. | 
| Ivan Kolodyazhny | 18b9dcc | 2014-08-22 17:02:40 +0300 | [diff] [blame] | 39 | CEPH_LOOPBACK_DISK_SIZE_DEFAULT=4G | 
| Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 40 | CEPH_LOOPBACK_DISK_SIZE=${CEPH_LOOPBACK_DISK_SIZE:-$CEPH_LOOPBACK_DISK_SIZE_DEFAULT} | 
|  | 41 |  | 
|  | 42 | # Common | 
|  | 43 | CEPH_FSID=$(uuidgen) | 
|  | 44 | CEPH_CONF_FILE=${CEPH_CONF_DIR}/ceph.conf | 
|  | 45 |  | 
|  | 46 | # Glance | 
|  | 47 | GLANCE_CEPH_USER=${GLANCE_CEPH_USER:-glance} | 
|  | 48 | GLANCE_CEPH_POOL=${GLANCE_CEPH_POOL:-images} | 
|  | 49 | GLANCE_CEPH_POOL_PG=${GLANCE_CEPH_POOL_PG:-8} | 
|  | 50 | GLANCE_CEPH_POOL_PGP=${GLANCE_CEPH_POOL_PGP:-8} | 
|  | 51 |  | 
|  | 52 | # Nova | 
|  | 53 | NOVA_CEPH_POOL=${NOVA_CEPH_POOL:-vms} | 
|  | 54 | NOVA_CEPH_POOL_PG=${NOVA_CEPH_POOL_PG:-8} | 
|  | 55 | NOVA_CEPH_POOL_PGP=${NOVA_CEPH_POOL_PGP:-8} | 
|  | 56 |  | 
|  | 57 | # Cinder | 
|  | 58 | CINDER_CEPH_POOL=${CINDER_CEPH_POOL:-volumes} | 
|  | 59 | CINDER_CEPH_POOL_PG=${CINDER_CEPH_POOL_PG:-8} | 
|  | 60 | CINDER_CEPH_POOL_PGP=${CINDER_CEPH_POOL_PGP:-8} | 
|  | 61 | CINDER_CEPH_USER=${CINDER_CEPH_USER:-cinder} | 
|  | 62 | CINDER_CEPH_UUID=${CINDER_CEPH_UUID:-$(uuidgen)} | 
|  | 63 |  | 
|  | 64 | # Set ``CEPH_REPLICAS`` to configure how many replicas are to be | 
|  | 65 | # configured for your Ceph cluster. By default we are configuring | 
|  | 66 | # only one replica since this is way less CPU and memory intensive. If | 
|  | 67 | # you are planning to test Ceph replication feel free to increase this value | 
|  | 68 | CEPH_REPLICAS=${CEPH_REPLICAS:-1} | 
|  | 69 | CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS}) | 
|  | 70 |  | 
|  | 71 | # Functions | 
|  | 72 | # ------------ | 
|  | 73 |  | 
| Sébastien Han | 90f77fb | 2014-10-31 12:05:20 +0100 | [diff] [blame] | 74 | function get_ceph_version { | 
| Sébastien Han | 849d0b8 | 2014-11-18 11:10:11 -0800 | [diff] [blame] | 75 | local ceph_version_str=$(sudo ceph daemon mon.$(hostname) version | cut -d '"' -f 4 | cut -f 1,2 -d '.') | 
| Sébastien Han | 90f77fb | 2014-10-31 12:05:20 +0100 | [diff] [blame] | 76 | echo $ceph_version_str | 
|  | 77 | } | 
|  | 78 |  | 
| Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 79 | # import_libvirt_secret_ceph() - Imports Cinder user key into libvirt | 
|  | 80 | # so it can connect to the Ceph cluster while attaching a Cinder block device | 
|  | 81 | function import_libvirt_secret_ceph { | 
|  | 82 | cat > secret.xml <<EOF | 
|  | 83 | <secret ephemeral='no' private='no'> | 
|  | 84 | <uuid>${CINDER_CEPH_UUID}</uuid> | 
|  | 85 | <usage type='ceph'> | 
|  | 86 | <name>client.${CINDER_CEPH_USER} secret</name> | 
|  | 87 | </usage> | 
|  | 88 | </secret> | 
|  | 89 | EOF | 
|  | 90 | sudo virsh secret-define --file secret.xml | 
|  | 91 | sudo virsh secret-set-value --secret ${CINDER_CEPH_UUID} --base64 $(sudo ceph -c ${CEPH_CONF_FILE} auth get-key client.${CINDER_CEPH_USER}) | 
|  | 92 | sudo rm -f secret.xml | 
|  | 93 | } | 
|  | 94 |  | 
|  | 95 | # cleanup_ceph() - Remove residual data files, anything left over from previous | 
|  | 96 | # runs that a clean run would need to clean up | 
|  | 97 | function cleanup_ceph { | 
|  | 98 | sudo pkill -f ceph-mon | 
|  | 99 | sudo pkill -f ceph-osd | 
|  | 100 | sudo rm -rf ${CEPH_DATA_DIR}/*/* | 
|  | 101 | sudo rm -rf ${CEPH_CONF_DIR}/* | 
|  | 102 | if egrep -q ${CEPH_DATA_DIR} /proc/mounts; then | 
|  | 103 | sudo umount ${CEPH_DATA_DIR} | 
|  | 104 | fi | 
|  | 105 | if [[ -e ${CEPH_DISK_IMAGE} ]]; then | 
|  | 106 | sudo rm -f ${CEPH_DISK_IMAGE} | 
|  | 107 | fi | 
|  | 108 | uninstall_package ceph ceph-common python-ceph libcephfs1 > /dev/null 2>&1 | 
| Eric Harney | b01fb94 | 2014-10-01 13:20:50 -0400 | [diff] [blame] | 109 | if is_service_enabled cinder || is_service_enabled nova; then | 
|  | 110 | local virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }') | 
|  | 111 | sudo virsh secret-undefine ${virsh_uuid} >/dev/null 2>&1 | 
|  | 112 | fi | 
|  | 113 | if is_service_enabled nova; then | 
|  | 114 | iniset $NOVA_CONF libvirt rbd_secret_uuid "" | 
|  | 115 | fi | 
| Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 116 | } | 
|  | 117 |  | 
|  | 118 | # configure_ceph() - Set config files, create data dirs, etc | 
|  | 119 | function configure_ceph { | 
|  | 120 | local count=0 | 
|  | 121 |  | 
|  | 122 | # create a backing file disk | 
|  | 123 | create_disk ${CEPH_DISK_IMAGE} ${CEPH_DATA_DIR} ${CEPH_LOOPBACK_DISK_SIZE} | 
|  | 124 |  | 
|  | 125 | # populate ceph directory | 
|  | 126 | sudo mkdir -p ${CEPH_DATA_DIR}/{bootstrap-mds,bootstrap-osd,mds,mon,osd,tmp} | 
|  | 127 |  | 
|  | 128 | # create ceph monitor initial key and directory | 
|  | 129 | sudo ceph-authtool /var/lib/ceph/tmp/keyring.mon.$(hostname) --create-keyring --name=mon. --add-key=$(ceph-authtool --gen-print-key) --cap mon 'allow *' | 
|  | 130 | sudo mkdir /var/lib/ceph/mon/ceph-$(hostname) | 
|  | 131 |  | 
|  | 132 | # create a default ceph configuration file | 
|  | 133 | sudo tee -a ${CEPH_CONF_FILE} > /dev/null <<EOF | 
|  | 134 | [global] | 
|  | 135 | fsid = ${CEPH_FSID} | 
|  | 136 | mon_initial_members = $(hostname) | 
|  | 137 | mon_host = ${SERVICE_HOST} | 
|  | 138 | auth_cluster_required = cephx | 
|  | 139 | auth_service_required = cephx | 
|  | 140 | auth_client_required = cephx | 
|  | 141 | filestore_xattr_use_omap = true | 
|  | 142 | osd crush chooseleaf type = 0 | 
|  | 143 | osd journal size = 100 | 
|  | 144 | EOF | 
|  | 145 |  | 
|  | 146 | # bootstrap the ceph monitor | 
|  | 147 | sudo ceph-mon -c ${CEPH_CONF_FILE} --mkfs -i $(hostname) --keyring /var/lib/ceph/tmp/keyring.mon.$(hostname) | 
|  | 148 | if is_ubuntu; then | 
|  | 149 | sudo touch /var/lib/ceph/mon/ceph-$(hostname)/upstart | 
|  | 150 | sudo initctl emit ceph-mon id=$(hostname) | 
|  | 151 | else | 
|  | 152 | sudo touch /var/lib/ceph/mon/ceph-$(hostname)/sysvinit | 
|  | 153 | sudo service ceph start mon.$(hostname) | 
|  | 154 | fi | 
|  | 155 |  | 
|  | 156 | # wait for the admin key to come up otherwise we will not be able to do the actions below | 
|  | 157 | until [ -f ${CEPH_CONF_DIR}/ceph.client.admin.keyring ]; do | 
|  | 158 | echo_summary "Waiting for the Ceph admin key to be ready..." | 
|  | 159 |  | 
|  | 160 | count=$(($count + 1)) | 
|  | 161 | if [ $count -eq 3 ]; then | 
|  | 162 | die $LINENO "Maximum of 3 retries reached" | 
|  | 163 | fi | 
|  | 164 | sleep 5 | 
|  | 165 | done | 
|  | 166 |  | 
| Sébastien Han | 90f77fb | 2014-10-31 12:05:20 +0100 | [diff] [blame] | 167 | # pools data and metadata were removed in the Giant release so depending on the version we apply different commands | 
|  | 168 | local ceph_version=$(get_ceph_version) | 
| Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 169 | # change pool replica size according to the CEPH_REPLICAS set by the user | 
| Sébastien Han | 849d0b8 | 2014-11-18 11:10:11 -0800 | [diff] [blame] | 170 | if [[ ${ceph_version%%.*} -eq 0 ]] && [[ ${ceph_version##*.} -lt 87 ]]; then | 
| Sébastien Han | 90f77fb | 2014-10-31 12:05:20 +0100 | [diff] [blame] | 171 | sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd size ${CEPH_REPLICAS} | 
|  | 172 | sudo ceph -c ${CEPH_CONF_FILE} osd pool set data size ${CEPH_REPLICAS} | 
|  | 173 | sudo ceph -c ${CEPH_CONF_FILE} osd pool set metadata size ${CEPH_REPLICAS} | 
|  | 174 | else | 
|  | 175 | sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd size ${CEPH_REPLICAS} | 
|  | 176 | fi | 
| Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 177 |  | 
|  | 178 | # create a simple rule to take OSDs instead of host with CRUSH | 
|  | 179 | # then apply this rules to the default pool | 
|  | 180 | if [[ $CEPH_REPLICAS -ne 1 ]]; then | 
|  | 181 | sudo ceph -c ${CEPH_CONF_FILE} osd crush rule create-simple devstack default osd | 
|  | 182 | RULE_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd crush rule dump devstack | awk '/rule_id/ {print $3}' | cut -d ',' -f1) | 
|  | 183 | sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd crush_ruleset ${RULE_ID} | 
|  | 184 | sudo ceph -c ${CEPH_CONF_FILE} osd pool set data crush_ruleset ${RULE_ID} | 
|  | 185 | sudo ceph -c ${CEPH_CONF_FILE} osd pool set metadata crush_ruleset ${RULE_ID} | 
|  | 186 | fi | 
|  | 187 |  | 
|  | 188 | # create the OSD(s) | 
|  | 189 | for rep in ${CEPH_REPLICAS_SEQ}; do | 
|  | 190 | OSD_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd create) | 
|  | 191 | sudo mkdir -p ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID} | 
|  | 192 | sudo ceph-osd -c ${CEPH_CONF_FILE} -i ${OSD_ID} --mkfs | 
|  | 193 | sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create osd.${OSD_ID} mon 'allow profile osd ' osd 'allow *' | sudo tee ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/keyring | 
|  | 194 |  | 
|  | 195 | # ceph's init script is parsing ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/ and looking for a file | 
|  | 196 | # 'upstart' or 'sysinitv', thanks to these 'touches' we are able to control OSDs daemons | 
|  | 197 | # from the init script. | 
|  | 198 | if is_ubuntu; then | 
|  | 199 | sudo touch ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/upstart | 
|  | 200 | else | 
|  | 201 | sudo touch ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/sysvinit | 
|  | 202 | fi | 
|  | 203 | done | 
|  | 204 | } | 
|  | 205 |  | 
|  | 206 | # configure_ceph_glance() - Glance config needs to come after Glance is set up | 
|  | 207 | function configure_ceph_glance { | 
|  | 208 | # configure Glance service options, ceph pool, ceph user and ceph key | 
|  | 209 | sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP} | 
|  | 210 | sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} size ${CEPH_REPLICAS} | 
|  | 211 | if [[ $CEPH_REPLICAS -ne 1 ]]; then | 
|  | 212 | sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} crush_ruleset ${RULE_ID} | 
|  | 213 | fi | 
|  | 214 | sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GLANCE_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring | 
|  | 215 | sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring | 
| Eric Harney | 9a70610 | 2014-10-08 10:39:46 -0400 | [diff] [blame] | 216 |  | 
|  | 217 | # NOTE(eharney): When Glance has fully migrated to Glance store, | 
|  | 218 | # default_store can be removed from [DEFAULT].  (See lib/glance.) | 
| Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 219 | iniset $GLANCE_API_CONF DEFAULT default_store rbd | 
| Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 220 | iniset $GLANCE_API_CONF DEFAULT show_image_direct_url True | 
| Eric Harney | 9a70610 | 2014-10-08 10:39:46 -0400 | [diff] [blame] | 221 | iniset $GLANCE_API_CONF glance_store default_store rbd | 
| Sébastien Han | 7c9abca | 2014-09-15 16:17:42 +0200 | [diff] [blame] | 222 | iniset $GLANCE_API_CONF glance_store stores "file, http, rbd" | 
|  | 223 | iniset $GLANCE_API_CONF glance_store rbd_store_ceph_conf $CEPH_CONF_FILE | 
|  | 224 | iniset $GLANCE_API_CONF glance_store rbd_store_user $GLANCE_CEPH_USER | 
|  | 225 | iniset $GLANCE_API_CONF glance_store rbd_store_pool $GLANCE_CEPH_POOL | 
| Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 226 | } | 
|  | 227 |  | 
|  | 228 | # configure_ceph_nova() - Nova config needs to come after Nova is set up | 
|  | 229 | function configure_ceph_nova { | 
|  | 230 | # configure Nova service options, ceph pool, ceph user and ceph key | 
|  | 231 | sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP} | 
|  | 232 | sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} size ${CEPH_REPLICAS} | 
|  | 233 | if [[ $CEPH_REPLICAS -ne 1 ]]; then | 
|  | 234 | sudo -c ${CEPH_CONF_FILE} ceph osd pool set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID} | 
|  | 235 | fi | 
|  | 236 | iniset $NOVA_CONF libvirt rbd_user ${CINDER_CEPH_USER} | 
|  | 237 | iniset $NOVA_CONF libvirt rbd_secret_uuid ${CINDER_CEPH_UUID} | 
|  | 238 | iniset $NOVA_CONF libvirt inject_key false | 
|  | 239 | iniset $NOVA_CONF libvirt inject_partition -2 | 
|  | 240 | iniset $NOVA_CONF libvirt disk_cachemodes "network=writeback" | 
|  | 241 | iniset $NOVA_CONF libvirt images_type rbd | 
|  | 242 | iniset $NOVA_CONF libvirt images_rbd_pool ${NOVA_CEPH_POOL} | 
|  | 243 | iniset $NOVA_CONF libvirt images_rbd_ceph_conf ${CEPH_CONF_FILE} | 
| Sébastien Han | fa16ae9 | 2014-10-06 00:15:33 +0200 | [diff] [blame] | 244 |  | 
|  | 245 | if ! is_service_enabled cinder; then | 
|  | 246 | sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring > /dev/null | 
|  | 247 | sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring | 
|  | 248 | fi | 
| Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 249 | } | 
|  | 250 |  | 
|  | 251 | # configure_ceph_cinder() - Cinder config needs to come after Cinder is set up | 
|  | 252 | function configure_ceph_cinder { | 
|  | 253 | # Configure Cinder service options, ceph pool, ceph user and ceph key | 
|  | 254 | sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP} | 
|  | 255 | sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} size ${CEPH_REPLICAS} | 
|  | 256 | if [[ $CEPH_REPLICAS -ne 1 ]]; then | 
|  | 257 | sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} crush_ruleset ${RULE_ID} | 
|  | 258 |  | 
|  | 259 | fi | 
|  | 260 | sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring | 
|  | 261 | sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring | 
|  | 262 | } | 
|  | 263 |  | 
|  | 264 | # init_ceph() - Initialize databases, etc. | 
|  | 265 | function init_ceph { | 
|  | 266 | # clean up from previous (possibly aborted) runs | 
|  | 267 | # make sure to kill all ceph processes first | 
|  | 268 | sudo pkill -f ceph-mon || true | 
|  | 269 | sudo pkill -f ceph-osd || true | 
|  | 270 | } | 
|  | 271 |  | 
|  | 272 | # install_ceph() - Collect source and prepare | 
|  | 273 | function install_ceph { | 
|  | 274 | # NOTE(dtroyer): At some point it'll be easier to test for unsupported distros, | 
|  | 275 | #                leveraging the list in stack.sh | 
|  | 276 | if [[ ${os_CODENAME} =~ trusty ]] || [[ ${os_CODENAME} =~ Schrödinger’sCat ]] || [[ ${os_CODENAME} =~ Heisenbug ]]; then | 
|  | 277 | NO_UPDATE_REPOS=False | 
|  | 278 | install_package ceph | 
|  | 279 | else | 
|  | 280 | exit_distro_not_supported "Ceph since your distro doesn't provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 19/20" | 
|  | 281 | fi | 
|  | 282 | } | 
|  | 283 |  | 
|  | 284 | # start_ceph() - Start running processes, including screen | 
|  | 285 | function start_ceph { | 
|  | 286 | if is_ubuntu; then | 
|  | 287 | sudo initctl emit ceph-mon id=$(hostname) | 
|  | 288 | for id in $(sudo ceph -c ${CEPH_CONF_FILE} osd ls); do | 
|  | 289 | sudo start ceph-osd id=${id} | 
|  | 290 | done | 
|  | 291 | else | 
|  | 292 | sudo service ceph start | 
|  | 293 | fi | 
|  | 294 | } | 
|  | 295 |  | 
|  | 296 | # stop_ceph() - Stop running processes (non-screen) | 
|  | 297 | function stop_ceph { | 
|  | 298 | if is_ubuntu; then | 
|  | 299 | sudo service ceph-mon-all stop > /dev/null 2>&1 | 
|  | 300 | sudo service ceph-osd-all stop > /dev/null 2>&1 | 
|  | 301 | else | 
|  | 302 | sudo service ceph stop > /dev/null 2>&1 | 
|  | 303 | fi | 
|  | 304 | } | 
|  | 305 |  | 
|  | 306 |  | 
|  | 307 | # Restore xtrace | 
|  | 308 | $XTRACE | 
|  | 309 |  | 
|  | 310 | ## Local variables: | 
|  | 311 | ## mode: shell-script | 
|  | 312 | ## End: |