blob: 2ddf5dbb6ac6e825da14959cae4465ddc3e7639f [file] [log] [blame]
Sébastien Han36f2f022014-01-06 18:09:26 +01001# lib/ceph
2# Functions to control the configuration and operation of the **Ceph** storage service
3
4# Dependencies:
5#
6# - ``functions`` file
7# - ``CEPH_DATA_DIR`` or ``DATA_DIR`` must be defined
8
9# ``stack.sh`` calls the entry points in this order (via ``extras.d/60-ceph.sh``):
10#
11# - install_ceph
12# - configure_ceph
13# - init_ceph
14# - start_ceph
15# - stop_ceph
16# - cleanup_ceph
17
18# Save trace setting
19XTRACE=$(set +o | grep xtrace)
20set +o xtrace
21
22
23# Defaults
24# --------
25
26# Set ``CEPH_DATA_DIR`` to the location of Ceph drives and objects.
27# Default is the common DevStack data directory.
28CEPH_DATA_DIR=${CEPH_DATA_DIR:-/var/lib/ceph}
29CEPH_DISK_IMAGE=${CEPH_DATA_DIR}/drives/images/ceph.img
30
31# Set ``CEPH_CONF_DIR`` to the location of the configuration files.
32# Default is ``/etc/ceph``.
33CEPH_CONF_DIR=${CEPH_CONF_DIR:-/etc/ceph}
34
35# DevStack will create a loop-back disk formatted as XFS to store the
36# Ceph data. Set ``CEPH_LOOPBACK_DISK_SIZE`` to the disk size in
37# kilobytes.
38# Default is 1 gigabyte.
Ivan Kolodyazhny18b9dcc2014-08-22 17:02:40 +030039CEPH_LOOPBACK_DISK_SIZE_DEFAULT=4G
Sébastien Han36f2f022014-01-06 18:09:26 +010040CEPH_LOOPBACK_DISK_SIZE=${CEPH_LOOPBACK_DISK_SIZE:-$CEPH_LOOPBACK_DISK_SIZE_DEFAULT}
41
42# Common
43CEPH_FSID=$(uuidgen)
44CEPH_CONF_FILE=${CEPH_CONF_DIR}/ceph.conf
45
46# Glance
47GLANCE_CEPH_USER=${GLANCE_CEPH_USER:-glance}
48GLANCE_CEPH_POOL=${GLANCE_CEPH_POOL:-images}
49GLANCE_CEPH_POOL_PG=${GLANCE_CEPH_POOL_PG:-8}
50GLANCE_CEPH_POOL_PGP=${GLANCE_CEPH_POOL_PGP:-8}
51
52# Nova
53NOVA_CEPH_POOL=${NOVA_CEPH_POOL:-vms}
54NOVA_CEPH_POOL_PG=${NOVA_CEPH_POOL_PG:-8}
55NOVA_CEPH_POOL_PGP=${NOVA_CEPH_POOL_PGP:-8}
56
57# Cinder
58CINDER_CEPH_POOL=${CINDER_CEPH_POOL:-volumes}
59CINDER_CEPH_POOL_PG=${CINDER_CEPH_POOL_PG:-8}
60CINDER_CEPH_POOL_PGP=${CINDER_CEPH_POOL_PGP:-8}
61CINDER_CEPH_USER=${CINDER_CEPH_USER:-cinder}
62CINDER_CEPH_UUID=${CINDER_CEPH_UUID:-$(uuidgen)}
63
64# Set ``CEPH_REPLICAS`` to configure how many replicas are to be
65# configured for your Ceph cluster. By default we are configuring
66# only one replica since this is way less CPU and memory intensive. If
67# you are planning to test Ceph replication feel free to increase this value
68CEPH_REPLICAS=${CEPH_REPLICAS:-1}
69CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS})
70
71# Functions
72# ------------
73
Sébastien Han90f77fb2014-10-31 12:05:20 +010074function get_ceph_version {
75 local ceph_version_str=$(sudo ceph daemon mon.$(hostname) version | cut -d '"' -f 4)
76 echo $ceph_version_str
77}
78
Sébastien Han36f2f022014-01-06 18:09:26 +010079# import_libvirt_secret_ceph() - Imports Cinder user key into libvirt
80# so it can connect to the Ceph cluster while attaching a Cinder block device
81function import_libvirt_secret_ceph {
82 cat > secret.xml <<EOF
83<secret ephemeral='no' private='no'>
84 <uuid>${CINDER_CEPH_UUID}</uuid>
85 <usage type='ceph'>
86 <name>client.${CINDER_CEPH_USER} secret</name>
87 </usage>
88</secret>
89EOF
90 sudo virsh secret-define --file secret.xml
91 sudo virsh secret-set-value --secret ${CINDER_CEPH_UUID} --base64 $(sudo ceph -c ${CEPH_CONF_FILE} auth get-key client.${CINDER_CEPH_USER})
92 sudo rm -f secret.xml
93}
94
95# cleanup_ceph() - Remove residual data files, anything left over from previous
96# runs that a clean run would need to clean up
97function cleanup_ceph {
98 sudo pkill -f ceph-mon
99 sudo pkill -f ceph-osd
100 sudo rm -rf ${CEPH_DATA_DIR}/*/*
101 sudo rm -rf ${CEPH_CONF_DIR}/*
102 if egrep -q ${CEPH_DATA_DIR} /proc/mounts; then
103 sudo umount ${CEPH_DATA_DIR}
104 fi
105 if [[ -e ${CEPH_DISK_IMAGE} ]]; then
106 sudo rm -f ${CEPH_DISK_IMAGE}
107 fi
108 uninstall_package ceph ceph-common python-ceph libcephfs1 > /dev/null 2>&1
109 VIRSH_UUID=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
110 sudo virsh secret-undefine ${VIRSH_UUID} >/dev/null 2>&1
111}
112
113# configure_ceph() - Set config files, create data dirs, etc
114function configure_ceph {
115 local count=0
116
117 # create a backing file disk
118 create_disk ${CEPH_DISK_IMAGE} ${CEPH_DATA_DIR} ${CEPH_LOOPBACK_DISK_SIZE}
119
120 # populate ceph directory
121 sudo mkdir -p ${CEPH_DATA_DIR}/{bootstrap-mds,bootstrap-osd,mds,mon,osd,tmp}
122
123 # create ceph monitor initial key and directory
124 sudo ceph-authtool /var/lib/ceph/tmp/keyring.mon.$(hostname) --create-keyring --name=mon. --add-key=$(ceph-authtool --gen-print-key) --cap mon 'allow *'
125 sudo mkdir /var/lib/ceph/mon/ceph-$(hostname)
126
127 # create a default ceph configuration file
128 sudo tee -a ${CEPH_CONF_FILE} > /dev/null <<EOF
129[global]
130fsid = ${CEPH_FSID}
131mon_initial_members = $(hostname)
132mon_host = ${SERVICE_HOST}
133auth_cluster_required = cephx
134auth_service_required = cephx
135auth_client_required = cephx
136filestore_xattr_use_omap = true
137osd crush chooseleaf type = 0
138osd journal size = 100
139EOF
140
141 # bootstrap the ceph monitor
142 sudo ceph-mon -c ${CEPH_CONF_FILE} --mkfs -i $(hostname) --keyring /var/lib/ceph/tmp/keyring.mon.$(hostname)
143 if is_ubuntu; then
144 sudo touch /var/lib/ceph/mon/ceph-$(hostname)/upstart
145 sudo initctl emit ceph-mon id=$(hostname)
146 else
147 sudo touch /var/lib/ceph/mon/ceph-$(hostname)/sysvinit
148 sudo service ceph start mon.$(hostname)
149 fi
150
151 # wait for the admin key to come up otherwise we will not be able to do the actions below
152 until [ -f ${CEPH_CONF_DIR}/ceph.client.admin.keyring ]; do
153 echo_summary "Waiting for the Ceph admin key to be ready..."
154
155 count=$(($count + 1))
156 if [ $count -eq 3 ]; then
157 die $LINENO "Maximum of 3 retries reached"
158 fi
159 sleep 5
160 done
161
Sébastien Han90f77fb2014-10-31 12:05:20 +0100162 # pools data and metadata were removed in the Giant release so depending on the version we apply different commands
163 local ceph_version=$(get_ceph_version)
Sébastien Han36f2f022014-01-06 18:09:26 +0100164 # change pool replica size according to the CEPH_REPLICAS set by the user
Sébastien Han90f77fb2014-10-31 12:05:20 +0100165 if [[ ${ceph_version%.*} -eq 0 ]] && [[ ${ceph_version##*.} -lt 87 ]]; then
166 sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd size ${CEPH_REPLICAS}
167 sudo ceph -c ${CEPH_CONF_FILE} osd pool set data size ${CEPH_REPLICAS}
168 sudo ceph -c ${CEPH_CONF_FILE} osd pool set metadata size ${CEPH_REPLICAS}
169 else
170 sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd size ${CEPH_REPLICAS}
171 fi
Sébastien Han36f2f022014-01-06 18:09:26 +0100172
173 # create a simple rule to take OSDs instead of host with CRUSH
174 # then apply this rules to the default pool
175 if [[ $CEPH_REPLICAS -ne 1 ]]; then
176 sudo ceph -c ${CEPH_CONF_FILE} osd crush rule create-simple devstack default osd
177 RULE_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd crush rule dump devstack | awk '/rule_id/ {print $3}' | cut -d ',' -f1)
178 sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd crush_ruleset ${RULE_ID}
179 sudo ceph -c ${CEPH_CONF_FILE} osd pool set data crush_ruleset ${RULE_ID}
180 sudo ceph -c ${CEPH_CONF_FILE} osd pool set metadata crush_ruleset ${RULE_ID}
181 fi
182
183 # create the OSD(s)
184 for rep in ${CEPH_REPLICAS_SEQ}; do
185 OSD_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd create)
186 sudo mkdir -p ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}
187 sudo ceph-osd -c ${CEPH_CONF_FILE} -i ${OSD_ID} --mkfs
188 sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create osd.${OSD_ID} mon 'allow profile osd ' osd 'allow *' | sudo tee ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/keyring
189
190 # ceph's init script is parsing ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/ and looking for a file
191 # 'upstart' or 'sysinitv', thanks to these 'touches' we are able to control OSDs daemons
192 # from the init script.
193 if is_ubuntu; then
194 sudo touch ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/upstart
195 else
196 sudo touch ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/sysvinit
197 fi
198 done
199}
200
201# configure_ceph_glance() - Glance config needs to come after Glance is set up
202function configure_ceph_glance {
203 # configure Glance service options, ceph pool, ceph user and ceph key
204 sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP}
205 sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} size ${CEPH_REPLICAS}
206 if [[ $CEPH_REPLICAS -ne 1 ]]; then
207 sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} crush_ruleset ${RULE_ID}
208 fi
209 sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GLANCE_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
210 sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
Eric Harney9a706102014-10-08 10:39:46 -0400211
212 # NOTE(eharney): When Glance has fully migrated to Glance store,
213 # default_store can be removed from [DEFAULT]. (See lib/glance.)
Sébastien Han36f2f022014-01-06 18:09:26 +0100214 iniset $GLANCE_API_CONF DEFAULT default_store rbd
Sébastien Han36f2f022014-01-06 18:09:26 +0100215 iniset $GLANCE_API_CONF DEFAULT show_image_direct_url True
Eric Harney9a706102014-10-08 10:39:46 -0400216 iniset $GLANCE_API_CONF glance_store default_store rbd
Sébastien Han7c9abca2014-09-15 16:17:42 +0200217 iniset $GLANCE_API_CONF glance_store stores "file, http, rbd"
218 iniset $GLANCE_API_CONF glance_store rbd_store_ceph_conf $CEPH_CONF_FILE
219 iniset $GLANCE_API_CONF glance_store rbd_store_user $GLANCE_CEPH_USER
220 iniset $GLANCE_API_CONF glance_store rbd_store_pool $GLANCE_CEPH_POOL
Sébastien Han36f2f022014-01-06 18:09:26 +0100221}
222
223# configure_ceph_nova() - Nova config needs to come after Nova is set up
224function configure_ceph_nova {
225 # configure Nova service options, ceph pool, ceph user and ceph key
226 sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP}
227 sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} size ${CEPH_REPLICAS}
228 if [[ $CEPH_REPLICAS -ne 1 ]]; then
229 sudo -c ${CEPH_CONF_FILE} ceph osd pool set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID}
230 fi
231 iniset $NOVA_CONF libvirt rbd_user ${CINDER_CEPH_USER}
232 iniset $NOVA_CONF libvirt rbd_secret_uuid ${CINDER_CEPH_UUID}
233 iniset $NOVA_CONF libvirt inject_key false
234 iniset $NOVA_CONF libvirt inject_partition -2
235 iniset $NOVA_CONF libvirt disk_cachemodes "network=writeback"
236 iniset $NOVA_CONF libvirt images_type rbd
237 iniset $NOVA_CONF libvirt images_rbd_pool ${NOVA_CEPH_POOL}
238 iniset $NOVA_CONF libvirt images_rbd_ceph_conf ${CEPH_CONF_FILE}
Sébastien Hanfa16ae92014-10-06 00:15:33 +0200239
240 if ! is_service_enabled cinder; then
241 sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring > /dev/null
242 sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
243 fi
Sébastien Han36f2f022014-01-06 18:09:26 +0100244}
245
246# configure_ceph_cinder() - Cinder config needs to come after Cinder is set up
247function configure_ceph_cinder {
248 # Configure Cinder service options, ceph pool, ceph user and ceph key
249 sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP}
250 sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} size ${CEPH_REPLICAS}
251 if [[ $CEPH_REPLICAS -ne 1 ]]; then
252 sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} crush_ruleset ${RULE_ID}
253
254 fi
255 sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
256 sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
257}
258
259# init_ceph() - Initialize databases, etc.
260function init_ceph {
261 # clean up from previous (possibly aborted) runs
262 # make sure to kill all ceph processes first
263 sudo pkill -f ceph-mon || true
264 sudo pkill -f ceph-osd || true
265}
266
267# install_ceph() - Collect source and prepare
268function install_ceph {
269 # NOTE(dtroyer): At some point it'll be easier to test for unsupported distros,
270 # leveraging the list in stack.sh
271 if [[ ${os_CODENAME} =~ trusty ]] || [[ ${os_CODENAME} =~ Schrödinger’sCat ]] || [[ ${os_CODENAME} =~ Heisenbug ]]; then
272 NO_UPDATE_REPOS=False
273 install_package ceph
274 else
275 exit_distro_not_supported "Ceph since your distro doesn't provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 19/20"
276 fi
277}
278
279# start_ceph() - Start running processes, including screen
280function start_ceph {
281 if is_ubuntu; then
282 sudo initctl emit ceph-mon id=$(hostname)
283 for id in $(sudo ceph -c ${CEPH_CONF_FILE} osd ls); do
284 sudo start ceph-osd id=${id}
285 done
286 else
287 sudo service ceph start
288 fi
289}
290
291# stop_ceph() - Stop running processes (non-screen)
292function stop_ceph {
293 if is_ubuntu; then
294 sudo service ceph-mon-all stop > /dev/null 2>&1
295 sudo service ceph-osd-all stop > /dev/null 2>&1
296 else
297 sudo service ceph stop > /dev/null 2>&1
298 fi
299}
300
301
302# Restore xtrace
303$XTRACE
304
305## Local variables:
306## mode: shell-script
307## End: