| Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 1 | # lib/ceph | 
 | 2 | # Functions to control the configuration and operation of the **Ceph** storage service | 
 | 3 |  | 
 | 4 | # Dependencies: | 
 | 5 | # | 
 | 6 | # - ``functions`` file | 
 | 7 | # - ``CEPH_DATA_DIR`` or ``DATA_DIR`` must be defined | 
 | 8 |  | 
 | 9 | # ``stack.sh`` calls the entry points in this order (via ``extras.d/60-ceph.sh``): | 
 | 10 | # | 
 | 11 | # - install_ceph | 
 | 12 | # - configure_ceph | 
 | 13 | # - init_ceph | 
 | 14 | # - start_ceph | 
 | 15 | # - stop_ceph | 
 | 16 | # - cleanup_ceph | 
 | 17 |  | 
 | 18 | # Save trace setting | 
 | 19 | XTRACE=$(set +o | grep xtrace) | 
 | 20 | set +o xtrace | 
 | 21 |  | 
 | 22 |  | 
 | 23 | # Defaults | 
 | 24 | # -------- | 
 | 25 |  | 
 | 26 | # Set ``CEPH_DATA_DIR`` to the location of Ceph drives and objects. | 
 | 27 | # Default is the common DevStack data directory. | 
 | 28 | CEPH_DATA_DIR=${CEPH_DATA_DIR:-/var/lib/ceph} | 
 | 29 | CEPH_DISK_IMAGE=${CEPH_DATA_DIR}/drives/images/ceph.img | 
 | 30 |  | 
 | 31 | # Set ``CEPH_CONF_DIR`` to the location of the configuration files. | 
 | 32 | # Default is ``/etc/ceph``. | 
 | 33 | CEPH_CONF_DIR=${CEPH_CONF_DIR:-/etc/ceph} | 
 | 34 |  | 
 | 35 | # DevStack will create a loop-back disk formatted as XFS to store the | 
 | 36 | # Ceph data. Set ``CEPH_LOOPBACK_DISK_SIZE`` to the disk size in | 
 | 37 | # kilobytes. | 
 | 38 | # Default is 1 gigabyte. | 
| Ivan Kolodyazhny | 18b9dcc | 2014-08-22 17:02:40 +0300 | [diff] [blame^] | 39 | CEPH_LOOPBACK_DISK_SIZE_DEFAULT=4G | 
| Sébastien Han | 36f2f02 | 2014-01-06 18:09:26 +0100 | [diff] [blame] | 40 | CEPH_LOOPBACK_DISK_SIZE=${CEPH_LOOPBACK_DISK_SIZE:-$CEPH_LOOPBACK_DISK_SIZE_DEFAULT} | 
 | 41 |  | 
 | 42 | # Common | 
 | 43 | CEPH_FSID=$(uuidgen) | 
 | 44 | CEPH_CONF_FILE=${CEPH_CONF_DIR}/ceph.conf | 
 | 45 |  | 
 | 46 | # Glance | 
 | 47 | GLANCE_CEPH_USER=${GLANCE_CEPH_USER:-glance} | 
 | 48 | GLANCE_CEPH_POOL=${GLANCE_CEPH_POOL:-images} | 
 | 49 | GLANCE_CEPH_POOL_PG=${GLANCE_CEPH_POOL_PG:-8} | 
 | 50 | GLANCE_CEPH_POOL_PGP=${GLANCE_CEPH_POOL_PGP:-8} | 
 | 51 |  | 
 | 52 | # Nova | 
 | 53 | NOVA_CEPH_POOL=${NOVA_CEPH_POOL:-vms} | 
 | 54 | NOVA_CEPH_POOL_PG=${NOVA_CEPH_POOL_PG:-8} | 
 | 55 | NOVA_CEPH_POOL_PGP=${NOVA_CEPH_POOL_PGP:-8} | 
 | 56 |  | 
 | 57 | # Cinder | 
 | 58 | CINDER_CEPH_POOL=${CINDER_CEPH_POOL:-volumes} | 
 | 59 | CINDER_CEPH_POOL_PG=${CINDER_CEPH_POOL_PG:-8} | 
 | 60 | CINDER_CEPH_POOL_PGP=${CINDER_CEPH_POOL_PGP:-8} | 
 | 61 | CINDER_CEPH_USER=${CINDER_CEPH_USER:-cinder} | 
 | 62 | CINDER_CEPH_UUID=${CINDER_CEPH_UUID:-$(uuidgen)} | 
 | 63 |  | 
 | 64 | # Set ``CEPH_REPLICAS`` to configure how many replicas are to be | 
 | 65 | # configured for your Ceph cluster. By default we are configuring | 
 | 66 | # only one replica since this is way less CPU and memory intensive. If | 
 | 67 | # you are planning to test Ceph replication feel free to increase this value | 
 | 68 | CEPH_REPLICAS=${CEPH_REPLICAS:-1} | 
 | 69 | CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS}) | 
 | 70 |  | 
 | 71 | # Functions | 
 | 72 | # ------------ | 
 | 73 |  | 
 | 74 | # import_libvirt_secret_ceph() - Imports Cinder user key into libvirt | 
 | 75 | # so it can connect to the Ceph cluster while attaching a Cinder block device | 
 | 76 | function import_libvirt_secret_ceph { | 
 | 77 |     cat > secret.xml <<EOF | 
 | 78 | <secret ephemeral='no' private='no'> | 
 | 79 |    <uuid>${CINDER_CEPH_UUID}</uuid> | 
 | 80 |    <usage type='ceph'> | 
 | 81 |      <name>client.${CINDER_CEPH_USER} secret</name> | 
 | 82 |    </usage> | 
 | 83 | </secret> | 
 | 84 | EOF | 
 | 85 |     sudo virsh secret-define --file secret.xml | 
 | 86 |     sudo virsh secret-set-value --secret ${CINDER_CEPH_UUID} --base64 $(sudo ceph -c ${CEPH_CONF_FILE} auth get-key client.${CINDER_CEPH_USER}) | 
 | 87 |     sudo rm -f secret.xml | 
 | 88 | } | 
 | 89 |  | 
 | 90 | # cleanup_ceph() - Remove residual data files, anything left over from previous | 
 | 91 | # runs that a clean run would need to clean up | 
 | 92 | function cleanup_ceph { | 
 | 93 |     sudo pkill -f ceph-mon | 
 | 94 |     sudo pkill -f ceph-osd | 
 | 95 |     sudo rm -rf ${CEPH_DATA_DIR}/*/* | 
 | 96 |     sudo rm -rf ${CEPH_CONF_DIR}/* | 
 | 97 |     if egrep -q ${CEPH_DATA_DIR} /proc/mounts; then | 
 | 98 |         sudo umount ${CEPH_DATA_DIR} | 
 | 99 |     fi | 
 | 100 |     if [[ -e ${CEPH_DISK_IMAGE} ]]; then | 
 | 101 |         sudo rm -f ${CEPH_DISK_IMAGE} | 
 | 102 |     fi | 
 | 103 |     uninstall_package ceph ceph-common python-ceph libcephfs1 > /dev/null 2>&1 | 
 | 104 |     VIRSH_UUID=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }') | 
 | 105 |     sudo virsh secret-undefine ${VIRSH_UUID} >/dev/null 2>&1 | 
 | 106 | } | 
 | 107 |  | 
 | 108 | # configure_ceph() - Set config files, create data dirs, etc | 
 | 109 | function configure_ceph { | 
 | 110 |     local count=0 | 
 | 111 |  | 
 | 112 |     # create a backing file disk | 
 | 113 |     create_disk ${CEPH_DISK_IMAGE} ${CEPH_DATA_DIR} ${CEPH_LOOPBACK_DISK_SIZE} | 
 | 114 |  | 
 | 115 |     # populate ceph directory | 
 | 116 |     sudo mkdir -p ${CEPH_DATA_DIR}/{bootstrap-mds,bootstrap-osd,mds,mon,osd,tmp} | 
 | 117 |  | 
 | 118 |     # create ceph monitor initial key and directory | 
 | 119 |     sudo ceph-authtool /var/lib/ceph/tmp/keyring.mon.$(hostname) --create-keyring --name=mon. --add-key=$(ceph-authtool --gen-print-key) --cap mon 'allow *' | 
 | 120 |     sudo mkdir /var/lib/ceph/mon/ceph-$(hostname) | 
 | 121 |  | 
 | 122 |     # create a default ceph configuration file | 
 | 123 |     sudo tee -a ${CEPH_CONF_FILE} > /dev/null <<EOF | 
 | 124 | [global] | 
 | 125 | fsid = ${CEPH_FSID} | 
 | 126 | mon_initial_members = $(hostname) | 
 | 127 | mon_host = ${SERVICE_HOST} | 
 | 128 | auth_cluster_required = cephx | 
 | 129 | auth_service_required = cephx | 
 | 130 | auth_client_required = cephx | 
 | 131 | filestore_xattr_use_omap = true | 
 | 132 | osd crush chooseleaf type = 0 | 
 | 133 | osd journal size = 100 | 
 | 134 | EOF | 
 | 135 |  | 
 | 136 |     # bootstrap the ceph monitor | 
 | 137 |     sudo ceph-mon -c ${CEPH_CONF_FILE} --mkfs -i $(hostname) --keyring /var/lib/ceph/tmp/keyring.mon.$(hostname) | 
 | 138 |     if is_ubuntu; then | 
 | 139 |     sudo touch /var/lib/ceph/mon/ceph-$(hostname)/upstart | 
 | 140 |         sudo initctl emit ceph-mon id=$(hostname) | 
 | 141 |     else | 
 | 142 |     sudo touch /var/lib/ceph/mon/ceph-$(hostname)/sysvinit | 
 | 143 |         sudo service ceph start mon.$(hostname) | 
 | 144 |     fi | 
 | 145 |  | 
 | 146 |     # wait for the admin key to come up otherwise we will not be able to do the actions below | 
 | 147 |     until [ -f ${CEPH_CONF_DIR}/ceph.client.admin.keyring ]; do | 
 | 148 |         echo_summary "Waiting for the Ceph admin key to be ready..." | 
 | 149 |  | 
 | 150 |         count=$(($count + 1)) | 
 | 151 |         if [ $count -eq 3 ]; then | 
 | 152 |             die $LINENO "Maximum of 3 retries reached" | 
 | 153 |         fi | 
 | 154 |         sleep 5 | 
 | 155 |     done | 
 | 156 |  | 
 | 157 |     # change pool replica size according to the CEPH_REPLICAS set by the user | 
 | 158 |     sudo ceph -c ${CEPH_CONF_FILE} osd pool set data size ${CEPH_REPLICAS} | 
 | 159 |     sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd size ${CEPH_REPLICAS} | 
 | 160 |     sudo ceph -c ${CEPH_CONF_FILE} osd pool set metadata size ${CEPH_REPLICAS} | 
 | 161 |  | 
 | 162 |     # create a simple rule to take OSDs instead of host with CRUSH | 
 | 163 |     # then apply this rules to the default pool | 
 | 164 |     if [[ $CEPH_REPLICAS -ne 1 ]]; then | 
 | 165 |         sudo ceph -c ${CEPH_CONF_FILE} osd crush rule create-simple devstack default osd | 
 | 166 |         RULE_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd crush rule dump devstack | awk '/rule_id/ {print $3}' | cut -d ',' -f1) | 
 | 167 |         sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd crush_ruleset ${RULE_ID} | 
 | 168 |         sudo ceph -c ${CEPH_CONF_FILE} osd pool set data crush_ruleset ${RULE_ID} | 
 | 169 |         sudo ceph -c ${CEPH_CONF_FILE} osd pool set metadata crush_ruleset ${RULE_ID} | 
 | 170 |     fi | 
 | 171 |  | 
 | 172 |     # create the OSD(s) | 
 | 173 |     for rep in ${CEPH_REPLICAS_SEQ}; do | 
 | 174 |         OSD_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd create) | 
 | 175 |         sudo mkdir -p ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID} | 
 | 176 |         sudo ceph-osd -c ${CEPH_CONF_FILE} -i ${OSD_ID} --mkfs | 
 | 177 |         sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create osd.${OSD_ID} mon 'allow profile osd ' osd 'allow *' | sudo tee ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/keyring | 
 | 178 |  | 
 | 179 |         # ceph's init script is parsing ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/ and looking for a file | 
 | 180 |         # 'upstart' or 'sysinitv', thanks to these 'touches' we are able to control OSDs daemons | 
 | 181 |         # from the init script. | 
 | 182 |         if is_ubuntu; then | 
 | 183 |             sudo touch ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/upstart | 
 | 184 |         else | 
 | 185 |             sudo touch ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/sysvinit | 
 | 186 |         fi | 
 | 187 |     done | 
 | 188 | } | 
 | 189 |  | 
 | 190 | # configure_ceph_glance() - Glance config needs to come after Glance is set up | 
 | 191 | function configure_ceph_glance { | 
 | 192 |     # configure Glance service options, ceph pool, ceph user and ceph key | 
 | 193 |     sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP} | 
 | 194 |     sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} size ${CEPH_REPLICAS} | 
 | 195 |     if [[ $CEPH_REPLICAS -ne 1 ]]; then | 
 | 196 |         sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} crush_ruleset ${RULE_ID} | 
 | 197 |     fi | 
 | 198 |     sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GLANCE_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring | 
 | 199 |     sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring | 
 | 200 |     iniset $GLANCE_API_CONF DEFAULT default_store rbd | 
 | 201 |     iniset $GLANCE_API_CONF DEFAULT rbd_store_ceph_conf $CEPH_CONF_FILE | 
 | 202 |     iniset $GLANCE_API_CONF DEFAULT rbd_store_user $GLANCE_CEPH_USER | 
 | 203 |     iniset $GLANCE_API_CONF DEFAULT rbd_store_pool $GLANCE_CEPH_POOL | 
 | 204 |     iniset $GLANCE_API_CONF DEFAULT show_image_direct_url True | 
 | 205 | } | 
 | 206 |  | 
 | 207 | # configure_ceph_nova() - Nova config needs to come after Nova is set up | 
 | 208 | function configure_ceph_nova { | 
 | 209 |     # configure Nova service options, ceph pool, ceph user and ceph key | 
 | 210 |     sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP} | 
 | 211 |     sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} size ${CEPH_REPLICAS} | 
 | 212 |     if [[ $CEPH_REPLICAS -ne 1 ]]; then | 
 | 213 |         sudo -c ${CEPH_CONF_FILE} ceph osd pool set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID} | 
 | 214 |     fi | 
 | 215 |     iniset $NOVA_CONF libvirt rbd_user ${CINDER_CEPH_USER} | 
 | 216 |     iniset $NOVA_CONF libvirt rbd_secret_uuid ${CINDER_CEPH_UUID} | 
 | 217 |     iniset $NOVA_CONF libvirt inject_key false | 
 | 218 |     iniset $NOVA_CONF libvirt inject_partition -2 | 
 | 219 |     iniset $NOVA_CONF libvirt disk_cachemodes "network=writeback" | 
 | 220 |     iniset $NOVA_CONF libvirt images_type rbd | 
 | 221 |     iniset $NOVA_CONF libvirt images_rbd_pool ${NOVA_CEPH_POOL} | 
 | 222 |     iniset $NOVA_CONF libvirt images_rbd_ceph_conf ${CEPH_CONF_FILE} | 
 | 223 | } | 
 | 224 |  | 
 | 225 | # configure_ceph_cinder() - Cinder config needs to come after Cinder is set up | 
 | 226 | function configure_ceph_cinder { | 
 | 227 |     # Configure Cinder service options, ceph pool, ceph user and ceph key | 
 | 228 |     sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP} | 
 | 229 |     sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} size ${CEPH_REPLICAS} | 
 | 230 |     if [[ $CEPH_REPLICAS -ne 1 ]]; then | 
 | 231 |         sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} crush_ruleset ${RULE_ID} | 
 | 232 |  | 
 | 233 |     fi | 
 | 234 |     sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring | 
 | 235 |     sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring | 
 | 236 | } | 
 | 237 |  | 
 | 238 | # init_ceph() - Initialize databases, etc. | 
 | 239 | function init_ceph { | 
 | 240 |     # clean up from previous (possibly aborted) runs | 
 | 241 |     # make sure to kill all ceph processes first | 
 | 242 |     sudo pkill -f ceph-mon || true | 
 | 243 |     sudo pkill -f ceph-osd || true | 
 | 244 | } | 
 | 245 |  | 
 | 246 | # install_ceph() - Collect source and prepare | 
 | 247 | function install_ceph { | 
 | 248 |     # NOTE(dtroyer): At some point it'll be easier to test for unsupported distros, | 
 | 249 |     #                leveraging the list in stack.sh | 
 | 250 |     if [[ ${os_CODENAME} =~ trusty ]] || [[ ${os_CODENAME} =~ Schrödinger’sCat ]] || [[ ${os_CODENAME} =~ Heisenbug ]]; then | 
 | 251 |         NO_UPDATE_REPOS=False | 
 | 252 |         install_package ceph | 
 | 253 |     else | 
 | 254 |         exit_distro_not_supported "Ceph since your distro doesn't provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 19/20" | 
 | 255 |     fi | 
 | 256 | } | 
 | 257 |  | 
 | 258 | # start_ceph() - Start running processes, including screen | 
 | 259 | function start_ceph { | 
 | 260 |     if is_ubuntu; then | 
 | 261 |         sudo initctl emit ceph-mon id=$(hostname) | 
 | 262 |         for id in $(sudo ceph -c ${CEPH_CONF_FILE} osd ls); do | 
 | 263 |             sudo start ceph-osd id=${id} | 
 | 264 |         done | 
 | 265 |     else | 
 | 266 |         sudo service ceph start | 
 | 267 |     fi | 
 | 268 | } | 
 | 269 |  | 
 | 270 | # stop_ceph() - Stop running processes (non-screen) | 
 | 271 | function stop_ceph { | 
 | 272 |     if is_ubuntu; then | 
 | 273 |         sudo service ceph-mon-all stop > /dev/null 2>&1 | 
 | 274 |         sudo service ceph-osd-all stop > /dev/null 2>&1 | 
 | 275 |     else | 
 | 276 |         sudo service ceph stop > /dev/null 2>&1 | 
 | 277 |     fi | 
 | 278 | } | 
 | 279 |  | 
 | 280 |  | 
 | 281 | # Restore xtrace | 
 | 282 | $XTRACE | 
 | 283 |  | 
 | 284 | ## Local variables: | 
 | 285 | ## mode: shell-script | 
 | 286 | ## End: |