blob: d5c916f084dce656a137d03e9f9971c0360f73af [file] [log] [blame]
Sébastien Han36f2f022014-01-06 18:09:26 +01001# lib/ceph
2# Functions to control the configuration and operation of the **Ceph** storage service
3
4# Dependencies:
5#
6# - ``functions`` file
7# - ``CEPH_DATA_DIR`` or ``DATA_DIR`` must be defined
8
9# ``stack.sh`` calls the entry points in this order (via ``extras.d/60-ceph.sh``):
10#
11# - install_ceph
12# - configure_ceph
13# - init_ceph
14# - start_ceph
15# - stop_ceph
16# - cleanup_ceph
17
18# Save trace setting
19XTRACE=$(set +o | grep xtrace)
20set +o xtrace
21
22
23# Defaults
24# --------
25
26# Set ``CEPH_DATA_DIR`` to the location of Ceph drives and objects.
27# Default is the common DevStack data directory.
28CEPH_DATA_DIR=${CEPH_DATA_DIR:-/var/lib/ceph}
29CEPH_DISK_IMAGE=${CEPH_DATA_DIR}/drives/images/ceph.img
30
31# Set ``CEPH_CONF_DIR`` to the location of the configuration files.
32# Default is ``/etc/ceph``.
33CEPH_CONF_DIR=${CEPH_CONF_DIR:-/etc/ceph}
34
35# DevStack will create a loop-back disk formatted as XFS to store the
36# Ceph data. Set ``CEPH_LOOPBACK_DISK_SIZE`` to the disk size in
37# kilobytes.
38# Default is 1 gigabyte.
Ivan Kolodyazhny18b9dcc2014-08-22 17:02:40 +030039CEPH_LOOPBACK_DISK_SIZE_DEFAULT=4G
Sébastien Han36f2f022014-01-06 18:09:26 +010040CEPH_LOOPBACK_DISK_SIZE=${CEPH_LOOPBACK_DISK_SIZE:-$CEPH_LOOPBACK_DISK_SIZE_DEFAULT}
41
42# Common
43CEPH_FSID=$(uuidgen)
44CEPH_CONF_FILE=${CEPH_CONF_DIR}/ceph.conf
45
46# Glance
47GLANCE_CEPH_USER=${GLANCE_CEPH_USER:-glance}
48GLANCE_CEPH_POOL=${GLANCE_CEPH_POOL:-images}
49GLANCE_CEPH_POOL_PG=${GLANCE_CEPH_POOL_PG:-8}
50GLANCE_CEPH_POOL_PGP=${GLANCE_CEPH_POOL_PGP:-8}
51
52# Nova
53NOVA_CEPH_POOL=${NOVA_CEPH_POOL:-vms}
54NOVA_CEPH_POOL_PG=${NOVA_CEPH_POOL_PG:-8}
55NOVA_CEPH_POOL_PGP=${NOVA_CEPH_POOL_PGP:-8}
56
57# Cinder
58CINDER_CEPH_POOL=${CINDER_CEPH_POOL:-volumes}
59CINDER_CEPH_POOL_PG=${CINDER_CEPH_POOL_PG:-8}
60CINDER_CEPH_POOL_PGP=${CINDER_CEPH_POOL_PGP:-8}
61CINDER_CEPH_USER=${CINDER_CEPH_USER:-cinder}
62CINDER_CEPH_UUID=${CINDER_CEPH_UUID:-$(uuidgen)}
63
64# Set ``CEPH_REPLICAS`` to configure how many replicas are to be
65# configured for your Ceph cluster. By default we are configuring
66# only one replica since this is way less CPU and memory intensive. If
67# you are planning to test Ceph replication feel free to increase this value
68CEPH_REPLICAS=${CEPH_REPLICAS:-1}
69CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS})
70
Sébastien Han4eb04a52014-12-04 16:22:41 +010071# Connect to an existing Ceph cluster
72REMOTE_CEPH=$(trueorfalse False $REMOTE_CEPH)
73REMOTE_CEPH_ADMIN_KEY_PATH=${REMOTE_CEPH_ADMIN_KEY_PATH:-$CEPH_CONF_DIR/ceph.client.admin.keyring}
74
75
Sébastien Han36f2f022014-01-06 18:09:26 +010076# Functions
77# ------------
78
Sébastien Han90f77fb2014-10-31 12:05:20 +010079function get_ceph_version {
80 local ceph_version_str=$(sudo ceph daemon mon.$(hostname) version | cut -d '"' -f 4)
81 echo $ceph_version_str
82}
83
Sébastien Han36f2f022014-01-06 18:09:26 +010084# import_libvirt_secret_ceph() - Imports Cinder user key into libvirt
85# so it can connect to the Ceph cluster while attaching a Cinder block device
86function import_libvirt_secret_ceph {
87 cat > secret.xml <<EOF
88<secret ephemeral='no' private='no'>
89 <uuid>${CINDER_CEPH_UUID}</uuid>
90 <usage type='ceph'>
91 <name>client.${CINDER_CEPH_USER} secret</name>
92 </usage>
93</secret>
94EOF
95 sudo virsh secret-define --file secret.xml
96 sudo virsh secret-set-value --secret ${CINDER_CEPH_UUID} --base64 $(sudo ceph -c ${CEPH_CONF_FILE} auth get-key client.${CINDER_CEPH_USER})
97 sudo rm -f secret.xml
98}
99
Sébastien Han4eb04a52014-12-04 16:22:41 +0100100# undefine_virsh_secret() - Undefine Cinder key secret from libvirt
101function undefine_virsh_secret {
102 if is_service_enabled cinder || is_service_enabled nova; then
103 local virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
104 sudo virsh secret-undefine ${virsh_uuid} >/dev/null 2>&1
105 fi
106}
107
108
109# check_os_support_ceph() - Check if the operating system provides a decent version of Ceph
110function check_os_support_ceph {
111 if [[ ! ${DISTRO} =~ (trusty|f20|f21) ]]; then
112 echo "WARNING: your distro $DISTRO does not provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 20 (and higher)"
113 if [[ "$FORCE_CEPH_INSTALL" != "yes" ]]; then
114 die $LINENO "If you wish to install Ceph on this distribution anyway run with FORCE_CEPH_INSTALL=yes"
115 fi
116 NO_UPDATE_REPOS=False
117 fi
118}
119
Sébastien Han36f2f022014-01-06 18:09:26 +0100120# cleanup_ceph() - Remove residual data files, anything left over from previous
121# runs that a clean run would need to clean up
Sébastien Han4eb04a52014-12-04 16:22:41 +0100122function cleanup_ceph_remote {
123 # do a proper cleanup from here to avoid leftover on the remote Ceph cluster
124 if is_service_enabled glance; then
125 sudo ceph osd pool delete $GLANCE_CEPH_POOL $GLANCE_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
126 sudo ceph auth del client.$GLANCE_CEPH_USER > /dev/null 2>&1
127 fi
128 if is_service_enabled cinder; then
129 sudo ceph osd pool delete $CINDER_CEPH_POOL $CINDER_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
130 sudo ceph auth del client.$CINDER_CEPH_USER > /dev/null 2>&1
131 fi
132 if is_service_enabled c-bak; then
133 sudo ceph osd pool delete $CINDER_BAK_CEPH_POOL $CINDER_BAK_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
134 sudo ceph auth del client.$CINDER_BAK_CEPH_USER > /dev/null 2>&1
135 fi
136 if is_service_enabled nova; then
137 iniset $NOVA_CONF libvirt rbd_secret_uuid ""
138 sudo ceph osd pool delete $NOVA_CEPH_POOL $NOVA_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
139 fi
140}
141
142function cleanup_ceph_embedded {
Sébastien Han36f2f022014-01-06 18:09:26 +0100143 sudo pkill -f ceph-mon
144 sudo pkill -f ceph-osd
145 sudo rm -rf ${CEPH_DATA_DIR}/*/*
Sébastien Han36f2f022014-01-06 18:09:26 +0100146 if egrep -q ${CEPH_DATA_DIR} /proc/mounts; then
147 sudo umount ${CEPH_DATA_DIR}
148 fi
149 if [[ -e ${CEPH_DISK_IMAGE} ]]; then
150 sudo rm -f ${CEPH_DISK_IMAGE}
151 fi
Sébastien Han36f2f022014-01-06 18:09:26 +0100152}
153
Sébastien Han4eb04a52014-12-04 16:22:41 +0100154function cleanup_ceph_general {
155 undefine_virsh_secret
156 uninstall_package ceph ceph-common python-ceph libcephfs1 > /dev/null 2>&1
157
158 # purge ceph config file and keys
159 sudo rm -rf ${CEPH_CONF_DIR}/*
160}
161
162
Sébastien Han36f2f022014-01-06 18:09:26 +0100163# configure_ceph() - Set config files, create data dirs, etc
164function configure_ceph {
165 local count=0
166
167 # create a backing file disk
168 create_disk ${CEPH_DISK_IMAGE} ${CEPH_DATA_DIR} ${CEPH_LOOPBACK_DISK_SIZE}
169
170 # populate ceph directory
171 sudo mkdir -p ${CEPH_DATA_DIR}/{bootstrap-mds,bootstrap-osd,mds,mon,osd,tmp}
172
173 # create ceph monitor initial key and directory
174 sudo ceph-authtool /var/lib/ceph/tmp/keyring.mon.$(hostname) --create-keyring --name=mon. --add-key=$(ceph-authtool --gen-print-key) --cap mon 'allow *'
175 sudo mkdir /var/lib/ceph/mon/ceph-$(hostname)
176
177 # create a default ceph configuration file
Sébastien Han4eb04a52014-12-04 16:22:41 +0100178 sudo tee ${CEPH_CONF_FILE} > /dev/null <<EOF
Sébastien Han36f2f022014-01-06 18:09:26 +0100179[global]
180fsid = ${CEPH_FSID}
181mon_initial_members = $(hostname)
182mon_host = ${SERVICE_HOST}
183auth_cluster_required = cephx
184auth_service_required = cephx
185auth_client_required = cephx
186filestore_xattr_use_omap = true
187osd crush chooseleaf type = 0
188osd journal size = 100
189EOF
190
191 # bootstrap the ceph monitor
192 sudo ceph-mon -c ${CEPH_CONF_FILE} --mkfs -i $(hostname) --keyring /var/lib/ceph/tmp/keyring.mon.$(hostname)
193 if is_ubuntu; then
194 sudo touch /var/lib/ceph/mon/ceph-$(hostname)/upstart
195 sudo initctl emit ceph-mon id=$(hostname)
196 else
197 sudo touch /var/lib/ceph/mon/ceph-$(hostname)/sysvinit
198 sudo service ceph start mon.$(hostname)
199 fi
200
201 # wait for the admin key to come up otherwise we will not be able to do the actions below
202 until [ -f ${CEPH_CONF_DIR}/ceph.client.admin.keyring ]; do
203 echo_summary "Waiting for the Ceph admin key to be ready..."
204
205 count=$(($count + 1))
206 if [ $count -eq 3 ]; then
207 die $LINENO "Maximum of 3 retries reached"
208 fi
209 sleep 5
210 done
211
Sébastien Han90f77fb2014-10-31 12:05:20 +0100212 # pools data and metadata were removed in the Giant release so depending on the version we apply different commands
213 local ceph_version=$(get_ceph_version)
Sébastien Han36f2f022014-01-06 18:09:26 +0100214 # change pool replica size according to the CEPH_REPLICAS set by the user
Sébastien Han90f77fb2014-10-31 12:05:20 +0100215 if [[ ${ceph_version%.*} -eq 0 ]] && [[ ${ceph_version##*.} -lt 87 ]]; then
216 sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd size ${CEPH_REPLICAS}
217 sudo ceph -c ${CEPH_CONF_FILE} osd pool set data size ${CEPH_REPLICAS}
218 sudo ceph -c ${CEPH_CONF_FILE} osd pool set metadata size ${CEPH_REPLICAS}
219 else
220 sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd size ${CEPH_REPLICAS}
221 fi
Sébastien Han36f2f022014-01-06 18:09:26 +0100222
223 # create a simple rule to take OSDs instead of host with CRUSH
224 # then apply this rules to the default pool
225 if [[ $CEPH_REPLICAS -ne 1 ]]; then
226 sudo ceph -c ${CEPH_CONF_FILE} osd crush rule create-simple devstack default osd
227 RULE_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd crush rule dump devstack | awk '/rule_id/ {print $3}' | cut -d ',' -f1)
228 sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd crush_ruleset ${RULE_ID}
229 sudo ceph -c ${CEPH_CONF_FILE} osd pool set data crush_ruleset ${RULE_ID}
230 sudo ceph -c ${CEPH_CONF_FILE} osd pool set metadata crush_ruleset ${RULE_ID}
231 fi
232
233 # create the OSD(s)
234 for rep in ${CEPH_REPLICAS_SEQ}; do
235 OSD_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd create)
236 sudo mkdir -p ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}
237 sudo ceph-osd -c ${CEPH_CONF_FILE} -i ${OSD_ID} --mkfs
238 sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create osd.${OSD_ID} mon 'allow profile osd ' osd 'allow *' | sudo tee ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/keyring
239
240 # ceph's init script is parsing ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/ and looking for a file
241 # 'upstart' or 'sysinitv', thanks to these 'touches' we are able to control OSDs daemons
242 # from the init script.
243 if is_ubuntu; then
244 sudo touch ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/upstart
245 else
246 sudo touch ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/sysvinit
247 fi
248 done
249}
250
Sébastien Han4eb04a52014-12-04 16:22:41 +0100251function configure_ceph_embedded_glance {
Sébastien Han36f2f022014-01-06 18:09:26 +0100252 # configure Glance service options, ceph pool, ceph user and ceph key
Sébastien Han36f2f022014-01-06 18:09:26 +0100253 sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} size ${CEPH_REPLICAS}
254 if [[ $CEPH_REPLICAS -ne 1 ]]; then
255 sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} crush_ruleset ${RULE_ID}
256 fi
Sébastien Han4eb04a52014-12-04 16:22:41 +0100257}
258
259# configure_ceph_glance() - Glance config needs to come after Glance is set up
260function configure_ceph_glance {
261 sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP}
Sébastien Han36f2f022014-01-06 18:09:26 +0100262 sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GLANCE_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
263 sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
Eric Harney9a706102014-10-08 10:39:46 -0400264
265 # NOTE(eharney): When Glance has fully migrated to Glance store,
266 # default_store can be removed from [DEFAULT]. (See lib/glance.)
Sébastien Han36f2f022014-01-06 18:09:26 +0100267 iniset $GLANCE_API_CONF DEFAULT default_store rbd
Sébastien Han36f2f022014-01-06 18:09:26 +0100268 iniset $GLANCE_API_CONF DEFAULT show_image_direct_url True
Eric Harney9a706102014-10-08 10:39:46 -0400269 iniset $GLANCE_API_CONF glance_store default_store rbd
Sébastien Han7c9abca2014-09-15 16:17:42 +0200270 iniset $GLANCE_API_CONF glance_store stores "file, http, rbd"
271 iniset $GLANCE_API_CONF glance_store rbd_store_ceph_conf $CEPH_CONF_FILE
272 iniset $GLANCE_API_CONF glance_store rbd_store_user $GLANCE_CEPH_USER
273 iniset $GLANCE_API_CONF glance_store rbd_store_pool $GLANCE_CEPH_POOL
Sébastien Han36f2f022014-01-06 18:09:26 +0100274}
275
Sébastien Han4eb04a52014-12-04 16:22:41 +0100276function configure_ceph_embedded_nova {
Sébastien Han36f2f022014-01-06 18:09:26 +0100277 # configure Nova service options, ceph pool, ceph user and ceph key
Sébastien Han36f2f022014-01-06 18:09:26 +0100278 sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} size ${CEPH_REPLICAS}
279 if [[ $CEPH_REPLICAS -ne 1 ]]; then
280 sudo -c ${CEPH_CONF_FILE} ceph osd pool set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID}
281 fi
Sébastien Han4eb04a52014-12-04 16:22:41 +0100282}
283
284# configure_ceph_nova() - Nova config needs to come after Nova is set up
285function configure_ceph_nova {
286 sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP}
Sébastien Han36f2f022014-01-06 18:09:26 +0100287 iniset $NOVA_CONF libvirt rbd_user ${CINDER_CEPH_USER}
288 iniset $NOVA_CONF libvirt rbd_secret_uuid ${CINDER_CEPH_UUID}
289 iniset $NOVA_CONF libvirt inject_key false
290 iniset $NOVA_CONF libvirt inject_partition -2
291 iniset $NOVA_CONF libvirt disk_cachemodes "network=writeback"
292 iniset $NOVA_CONF libvirt images_type rbd
293 iniset $NOVA_CONF libvirt images_rbd_pool ${NOVA_CEPH_POOL}
294 iniset $NOVA_CONF libvirt images_rbd_ceph_conf ${CEPH_CONF_FILE}
Sébastien Hanfa16ae92014-10-06 00:15:33 +0200295
296 if ! is_service_enabled cinder; then
297 sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring > /dev/null
298 sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
299 fi
Sébastien Han36f2f022014-01-06 18:09:26 +0100300}
301
Sébastien Han4eb04a52014-12-04 16:22:41 +0100302function configure_ceph_embedded_cinder {
Sébastien Han36f2f022014-01-06 18:09:26 +0100303 # Configure Cinder service options, ceph pool, ceph user and ceph key
Sébastien Han36f2f022014-01-06 18:09:26 +0100304 sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} size ${CEPH_REPLICAS}
305 if [[ $CEPH_REPLICAS -ne 1 ]]; then
306 sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} crush_ruleset ${RULE_ID}
Sébastien Han36f2f022014-01-06 18:09:26 +0100307 fi
Sébastien Han4eb04a52014-12-04 16:22:41 +0100308}
309
310# configure_ceph_cinder() - Cinder config needs to come after Cinder is set up
311function configure_ceph_cinder {
312 sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP}
Sébastien Han36f2f022014-01-06 18:09:26 +0100313 sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
314 sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
315}
316
317# init_ceph() - Initialize databases, etc.
318function init_ceph {
319 # clean up from previous (possibly aborted) runs
320 # make sure to kill all ceph processes first
321 sudo pkill -f ceph-mon || true
322 sudo pkill -f ceph-osd || true
323}
324
325# install_ceph() - Collect source and prepare
Sébastien Han4eb04a52014-12-04 16:22:41 +0100326function install_ceph_remote {
327 install_package ceph-common
328}
329
Sébastien Han36f2f022014-01-06 18:09:26 +0100330function install_ceph {
Sébastien Han4eb04a52014-12-04 16:22:41 +0100331 install_package ceph
Sébastien Han36f2f022014-01-06 18:09:26 +0100332}
333
334# start_ceph() - Start running processes, including screen
335function start_ceph {
336 if is_ubuntu; then
337 sudo initctl emit ceph-mon id=$(hostname)
338 for id in $(sudo ceph -c ${CEPH_CONF_FILE} osd ls); do
339 sudo start ceph-osd id=${id}
340 done
341 else
342 sudo service ceph start
343 fi
344}
345
346# stop_ceph() - Stop running processes (non-screen)
347function stop_ceph {
348 if is_ubuntu; then
349 sudo service ceph-mon-all stop > /dev/null 2>&1
350 sudo service ceph-osd-all stop > /dev/null 2>&1
351 else
352 sudo service ceph stop > /dev/null 2>&1
353 fi
354}
355
356
357# Restore xtrace
358$XTRACE
359
360## Local variables:
361## mode: shell-script
362## End: