blob: 4ac498ab975aaebfe3b212f79bd472af1fcbb437 [file] [log] [blame]
Sean Daguee263c822014-12-05 14:25:28 -05001#!/bin/bash
2#
Sébastien Han36f2f022014-01-06 18:09:26 +01003# lib/ceph
4# Functions to control the configuration and operation of the **Ceph** storage service
5
6# Dependencies:
7#
8# - ``functions`` file
9# - ``CEPH_DATA_DIR`` or ``DATA_DIR`` must be defined
10
11# ``stack.sh`` calls the entry points in this order (via ``extras.d/60-ceph.sh``):
12#
13# - install_ceph
14# - configure_ceph
15# - init_ceph
16# - start_ceph
17# - stop_ceph
18# - cleanup_ceph
19
20# Save trace setting
Ian Wienand523f4882015-10-13 11:03:03 +110021_XTRACE_LIB_CEPH=$(set +o | grep xtrace)
Sébastien Han36f2f022014-01-06 18:09:26 +010022set +o xtrace
23
24
25# Defaults
26# --------
27
28# Set ``CEPH_DATA_DIR`` to the location of Ceph drives and objects.
29# Default is the common DevStack data directory.
30CEPH_DATA_DIR=${CEPH_DATA_DIR:-/var/lib/ceph}
31CEPH_DISK_IMAGE=${CEPH_DATA_DIR}/drives/images/ceph.img
32
33# Set ``CEPH_CONF_DIR`` to the location of the configuration files.
34# Default is ``/etc/ceph``.
35CEPH_CONF_DIR=${CEPH_CONF_DIR:-/etc/ceph}
36
37# DevStack will create a loop-back disk formatted as XFS to store the
38# Ceph data. Set ``CEPH_LOOPBACK_DISK_SIZE`` to the disk size in
39# kilobytes.
40# Default is 1 gigabyte.
Ivan Kolodyazhny18b9dcc2014-08-22 17:02:40 +030041CEPH_LOOPBACK_DISK_SIZE_DEFAULT=4G
Sébastien Han36f2f022014-01-06 18:09:26 +010042CEPH_LOOPBACK_DISK_SIZE=${CEPH_LOOPBACK_DISK_SIZE:-$CEPH_LOOPBACK_DISK_SIZE_DEFAULT}
43
44# Common
45CEPH_FSID=$(uuidgen)
46CEPH_CONF_FILE=${CEPH_CONF_DIR}/ceph.conf
47
48# Glance
49GLANCE_CEPH_USER=${GLANCE_CEPH_USER:-glance}
50GLANCE_CEPH_POOL=${GLANCE_CEPH_POOL:-images}
51GLANCE_CEPH_POOL_PG=${GLANCE_CEPH_POOL_PG:-8}
52GLANCE_CEPH_POOL_PGP=${GLANCE_CEPH_POOL_PGP:-8}
53
54# Nova
55NOVA_CEPH_POOL=${NOVA_CEPH_POOL:-vms}
56NOVA_CEPH_POOL_PG=${NOVA_CEPH_POOL_PG:-8}
57NOVA_CEPH_POOL_PGP=${NOVA_CEPH_POOL_PGP:-8}
58
59# Cinder
60CINDER_CEPH_POOL=${CINDER_CEPH_POOL:-volumes}
61CINDER_CEPH_POOL_PG=${CINDER_CEPH_POOL_PG:-8}
62CINDER_CEPH_POOL_PGP=${CINDER_CEPH_POOL_PGP:-8}
63CINDER_CEPH_USER=${CINDER_CEPH_USER:-cinder}
64CINDER_CEPH_UUID=${CINDER_CEPH_UUID:-$(uuidgen)}
65
66# Set ``CEPH_REPLICAS`` to configure how many replicas are to be
67# configured for your Ceph cluster. By default we are configuring
68# only one replica since this is way less CPU and memory intensive. If
69# you are planning to test Ceph replication feel free to increase this value
70CEPH_REPLICAS=${CEPH_REPLICAS:-1}
71CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS})
72
Sébastien Han4eb04a52014-12-04 16:22:41 +010073# Connect to an existing Ceph cluster
Matthew Boothe3ceaed2015-03-03 16:13:31 +000074REMOTE_CEPH=$(trueorfalse False REMOTE_CEPH)
Sébastien Han4eb04a52014-12-04 16:22:41 +010075REMOTE_CEPH_ADMIN_KEY_PATH=${REMOTE_CEPH_ADMIN_KEY_PATH:-$CEPH_CONF_DIR/ceph.client.admin.keyring}
76
Matt Riedemanne446fc32015-07-08 12:10:15 -070077# Cinder encrypted volume tests are not supported with a Ceph backend due to
78# bug 1463525.
79ATTACH_ENCRYPTED_VOLUME_AVAILABLE=False
80
Sébastien Han4eb04a52014-12-04 16:22:41 +010081
Sébastien Han36f2f022014-01-06 18:09:26 +010082# Functions
83# ------------
84
Sébastien Han90f77fb2014-10-31 12:05:20 +010085function get_ceph_version {
Ian Wienandada886d2015-10-07 14:06:26 +110086 local ceph_version_str
87 ceph_version_str=$(sudo ceph daemon mon.$(hostname) version | cut -d '"' -f 4 | cut -f 1,2 -d '.')
Sébastien Han90f77fb2014-10-31 12:05:20 +010088 echo $ceph_version_str
89}
90
Sébastien Han36f2f022014-01-06 18:09:26 +010091# import_libvirt_secret_ceph() - Imports Cinder user key into libvirt
92# so it can connect to the Ceph cluster while attaching a Cinder block device
93function import_libvirt_secret_ceph {
94 cat > secret.xml <<EOF
95<secret ephemeral='no' private='no'>
96 <uuid>${CINDER_CEPH_UUID}</uuid>
97 <usage type='ceph'>
98 <name>client.${CINDER_CEPH_USER} secret</name>
99 </usage>
100</secret>
101EOF
102 sudo virsh secret-define --file secret.xml
103 sudo virsh secret-set-value --secret ${CINDER_CEPH_UUID} --base64 $(sudo ceph -c ${CEPH_CONF_FILE} auth get-key client.${CINDER_CEPH_USER})
104 sudo rm -f secret.xml
105}
106
Sébastien Han4eb04a52014-12-04 16:22:41 +0100107# undefine_virsh_secret() - Undefine Cinder key secret from libvirt
108function undefine_virsh_secret {
109 if is_service_enabled cinder || is_service_enabled nova; then
Ian Wienandada886d2015-10-07 14:06:26 +1100110 local virsh_uuid
111 virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
Sébastien Han4eb04a52014-12-04 16:22:41 +0100112 sudo virsh secret-undefine ${virsh_uuid} >/dev/null 2>&1
113 fi
114}
115
116
117# check_os_support_ceph() - Check if the operating system provides a decent version of Ceph
118function check_os_support_ceph {
Attila Fazekase0129f32015-11-19 10:47:58 +0100119 if [[ ! ${DISTRO} =~ (trusty|f21|f22|f23) ]]; then
Sébastien Han4eb04a52014-12-04 16:22:41 +0100120 echo "WARNING: your distro $DISTRO does not provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 20 (and higher)"
121 if [[ "$FORCE_CEPH_INSTALL" != "yes" ]]; then
122 die $LINENO "If you wish to install Ceph on this distribution anyway run with FORCE_CEPH_INSTALL=yes"
123 fi
124 NO_UPDATE_REPOS=False
125 fi
126}
127
Sébastien Han36f2f022014-01-06 18:09:26 +0100128# cleanup_ceph() - Remove residual data files, anything left over from previous
129# runs that a clean run would need to clean up
Sébastien Han4eb04a52014-12-04 16:22:41 +0100130function cleanup_ceph_remote {
131 # do a proper cleanup from here to avoid leftover on the remote Ceph cluster
132 if is_service_enabled glance; then
133 sudo ceph osd pool delete $GLANCE_CEPH_POOL $GLANCE_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
134 sudo ceph auth del client.$GLANCE_CEPH_USER > /dev/null 2>&1
135 fi
136 if is_service_enabled cinder; then
137 sudo ceph osd pool delete $CINDER_CEPH_POOL $CINDER_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
138 sudo ceph auth del client.$CINDER_CEPH_USER > /dev/null 2>&1
139 fi
140 if is_service_enabled c-bak; then
141 sudo ceph osd pool delete $CINDER_BAK_CEPH_POOL $CINDER_BAK_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
142 sudo ceph auth del client.$CINDER_BAK_CEPH_USER > /dev/null 2>&1
143 fi
144 if is_service_enabled nova; then
145 iniset $NOVA_CONF libvirt rbd_secret_uuid ""
146 sudo ceph osd pool delete $NOVA_CEPH_POOL $NOVA_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
147 fi
148}
149
150function cleanup_ceph_embedded {
Eric Harneya27b74c2014-09-18 13:02:55 -0400151 sudo killall -w -9 ceph-mon
152 sudo killall -w -9 ceph-osd
Sébastien Han36f2f022014-01-06 18:09:26 +0100153 sudo rm -rf ${CEPH_DATA_DIR}/*/*
Sébastien Han36f2f022014-01-06 18:09:26 +0100154 if egrep -q ${CEPH_DATA_DIR} /proc/mounts; then
155 sudo umount ${CEPH_DATA_DIR}
156 fi
157 if [[ -e ${CEPH_DISK_IMAGE} ]]; then
158 sudo rm -f ${CEPH_DISK_IMAGE}
159 fi
Matthew Booth0fdf3492015-03-03 16:37:35 +0000160
161 # purge ceph config file and keys
162 sudo rm -rf ${CEPH_CONF_DIR}/*
Sébastien Han36f2f022014-01-06 18:09:26 +0100163}
164
Sébastien Han4eb04a52014-12-04 16:22:41 +0100165function cleanup_ceph_general {
166 undefine_virsh_secret
Sébastien Han4eb04a52014-12-04 16:22:41 +0100167}
168
169
Sébastien Han36f2f022014-01-06 18:09:26 +0100170# configure_ceph() - Set config files, create data dirs, etc
171function configure_ceph {
172 local count=0
173
174 # create a backing file disk
175 create_disk ${CEPH_DISK_IMAGE} ${CEPH_DATA_DIR} ${CEPH_LOOPBACK_DISK_SIZE}
176
177 # populate ceph directory
178 sudo mkdir -p ${CEPH_DATA_DIR}/{bootstrap-mds,bootstrap-osd,mds,mon,osd,tmp}
179
180 # create ceph monitor initial key and directory
Sean Daguedd07c482015-07-27 13:10:44 -0400181 sudo ceph-authtool /var/lib/ceph/tmp/keyring.mon.$(hostname) \
182 --create-keyring --name=mon. --add-key=$(ceph-authtool --gen-print-key) \
183 --cap mon 'allow *'
Sébastien Han36f2f022014-01-06 18:09:26 +0100184 sudo mkdir /var/lib/ceph/mon/ceph-$(hostname)
185
186 # create a default ceph configuration file
Sébastien Han4eb04a52014-12-04 16:22:41 +0100187 sudo tee ${CEPH_CONF_FILE} > /dev/null <<EOF
Sébastien Han36f2f022014-01-06 18:09:26 +0100188[global]
189fsid = ${CEPH_FSID}
190mon_initial_members = $(hostname)
191mon_host = ${SERVICE_HOST}
192auth_cluster_required = cephx
193auth_service_required = cephx
194auth_client_required = cephx
195filestore_xattr_use_omap = true
196osd crush chooseleaf type = 0
197osd journal size = 100
198EOF
199
200 # bootstrap the ceph monitor
Sean Daguedd07c482015-07-27 13:10:44 -0400201 sudo ceph-mon -c ${CEPH_CONF_FILE} --mkfs -i $(hostname) \
202 --keyring /var/lib/ceph/tmp/keyring.mon.$(hostname)
203
Sébastien Han36f2f022014-01-06 18:09:26 +0100204 if is_ubuntu; then
Sean Daguedd07c482015-07-27 13:10:44 -0400205 sudo touch /var/lib/ceph/mon/ceph-$(hostname)/upstart
Sébastien Han36f2f022014-01-06 18:09:26 +0100206 sudo initctl emit ceph-mon id=$(hostname)
207 else
Sean Daguedd07c482015-07-27 13:10:44 -0400208 sudo touch /var/lib/ceph/mon/ceph-$(hostname)/sysvinit
Sébastien Han36f2f022014-01-06 18:09:26 +0100209 sudo service ceph start mon.$(hostname)
210 fi
211
212 # wait for the admin key to come up otherwise we will not be able to do the actions below
213 until [ -f ${CEPH_CONF_DIR}/ceph.client.admin.keyring ]; do
214 echo_summary "Waiting for the Ceph admin key to be ready..."
215
216 count=$(($count + 1))
217 if [ $count -eq 3 ]; then
218 die $LINENO "Maximum of 3 retries reached"
219 fi
220 sleep 5
221 done
222
Sébastien Han90f77fb2014-10-31 12:05:20 +0100223 # pools data and metadata were removed in the Giant release so depending on the version we apply different commands
Ian Wienandada886d2015-10-07 14:06:26 +1100224 local ceph_version
225 ceph_version=$(get_ceph_version)
Sébastien Han36f2f022014-01-06 18:09:26 +0100226 # change pool replica size according to the CEPH_REPLICAS set by the user
Sébastien Han849d0b82014-11-18 11:10:11 -0800227 if [[ ${ceph_version%%.*} -eq 0 ]] && [[ ${ceph_version##*.} -lt 87 ]]; then
Sébastien Han90f77fb2014-10-31 12:05:20 +0100228 sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd size ${CEPH_REPLICAS}
229 sudo ceph -c ${CEPH_CONF_FILE} osd pool set data size ${CEPH_REPLICAS}
230 sudo ceph -c ${CEPH_CONF_FILE} osd pool set metadata size ${CEPH_REPLICAS}
231 else
232 sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd size ${CEPH_REPLICAS}
233 fi
Sébastien Han36f2f022014-01-06 18:09:26 +0100234
235 # create a simple rule to take OSDs instead of host with CRUSH
236 # then apply this rules to the default pool
237 if [[ $CEPH_REPLICAS -ne 1 ]]; then
238 sudo ceph -c ${CEPH_CONF_FILE} osd crush rule create-simple devstack default osd
239 RULE_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd crush rule dump devstack | awk '/rule_id/ {print $3}' | cut -d ',' -f1)
240 sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd crush_ruleset ${RULE_ID}
241 sudo ceph -c ${CEPH_CONF_FILE} osd pool set data crush_ruleset ${RULE_ID}
242 sudo ceph -c ${CEPH_CONF_FILE} osd pool set metadata crush_ruleset ${RULE_ID}
243 fi
244
245 # create the OSD(s)
246 for rep in ${CEPH_REPLICAS_SEQ}; do
247 OSD_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd create)
248 sudo mkdir -p ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}
249 sudo ceph-osd -c ${CEPH_CONF_FILE} -i ${OSD_ID} --mkfs
Sean Daguedd07c482015-07-27 13:10:44 -0400250 sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create osd.${OSD_ID} \
251 mon 'allow profile osd ' osd 'allow *' | \
252 sudo tee ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/keyring
Sébastien Han36f2f022014-01-06 18:09:26 +0100253
254 # ceph's init script is parsing ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/ and looking for a file
255 # 'upstart' or 'sysinitv', thanks to these 'touches' we are able to control OSDs daemons
256 # from the init script.
257 if is_ubuntu; then
258 sudo touch ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/upstart
259 else
260 sudo touch ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/sysvinit
261 fi
262 done
263}
264
Sébastien Han4eb04a52014-12-04 16:22:41 +0100265function configure_ceph_embedded_glance {
Sébastien Han36f2f022014-01-06 18:09:26 +0100266 # configure Glance service options, ceph pool, ceph user and ceph key
Sébastien Han36f2f022014-01-06 18:09:26 +0100267 sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} size ${CEPH_REPLICAS}
268 if [[ $CEPH_REPLICAS -ne 1 ]]; then
269 sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} crush_ruleset ${RULE_ID}
270 fi
Sébastien Han4eb04a52014-12-04 16:22:41 +0100271}
272
273# configure_ceph_glance() - Glance config needs to come after Glance is set up
274function configure_ceph_glance {
275 sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP}
Sean Daguedd07c482015-07-27 13:10:44 -0400276 sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GLANCE_CEPH_USER} \
277 mon "allow r" \
278 osd "allow class-read object_prefix rbd_children, allow rwx pool=${GLANCE_CEPH_POOL}" | \
279 sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
Sébastien Han36f2f022014-01-06 18:09:26 +0100280 sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
Eric Harney9a706102014-10-08 10:39:46 -0400281
Sébastien Han64ab8d12015-07-27 14:29:57 +0200282 iniset $GLANCE_API_CONF DEFAULT show_image_direct_url True
Eric Harney9a706102014-10-08 10:39:46 -0400283 iniset $GLANCE_API_CONF glance_store default_store rbd
Sébastien Han7c9abca2014-09-15 16:17:42 +0200284 iniset $GLANCE_API_CONF glance_store stores "file, http, rbd"
285 iniset $GLANCE_API_CONF glance_store rbd_store_ceph_conf $CEPH_CONF_FILE
286 iniset $GLANCE_API_CONF glance_store rbd_store_user $GLANCE_CEPH_USER
287 iniset $GLANCE_API_CONF glance_store rbd_store_pool $GLANCE_CEPH_POOL
Sébastien Han36f2f022014-01-06 18:09:26 +0100288}
289
Sébastien Han4eb04a52014-12-04 16:22:41 +0100290function configure_ceph_embedded_nova {
Sébastien Han36f2f022014-01-06 18:09:26 +0100291 # configure Nova service options, ceph pool, ceph user and ceph key
Sébastien Han36f2f022014-01-06 18:09:26 +0100292 sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} size ${CEPH_REPLICAS}
293 if [[ $CEPH_REPLICAS -ne 1 ]]; then
Accela Zhao99de7cc2015-05-08 18:14:11 +0800294 sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID}
Sébastien Han36f2f022014-01-06 18:09:26 +0100295 fi
Sébastien Han4eb04a52014-12-04 16:22:41 +0100296}
297
298# configure_ceph_nova() - Nova config needs to come after Nova is set up
299function configure_ceph_nova {
300 sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP}
Sébastien Han36f2f022014-01-06 18:09:26 +0100301 iniset $NOVA_CONF libvirt rbd_user ${CINDER_CEPH_USER}
302 iniset $NOVA_CONF libvirt rbd_secret_uuid ${CINDER_CEPH_UUID}
303 iniset $NOVA_CONF libvirt inject_key false
304 iniset $NOVA_CONF libvirt inject_partition -2
305 iniset $NOVA_CONF libvirt disk_cachemodes "network=writeback"
306 iniset $NOVA_CONF libvirt images_type rbd
307 iniset $NOVA_CONF libvirt images_rbd_pool ${NOVA_CEPH_POOL}
308 iniset $NOVA_CONF libvirt images_rbd_ceph_conf ${CEPH_CONF_FILE}
Sébastien Hanfa16ae92014-10-06 00:15:33 +0200309
310 if ! is_service_enabled cinder; then
Sean Daguedd07c482015-07-27 13:10:44 -0400311 sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} \
312 mon "allow r" \
313 osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rwx pool=${GLANCE_CEPH_POOL}" | \
314 sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring > /dev/null
Sébastien Hanfa16ae92014-10-06 00:15:33 +0200315 sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
316 fi
Sébastien Han36f2f022014-01-06 18:09:26 +0100317}
318
Sébastien Han4eb04a52014-12-04 16:22:41 +0100319function configure_ceph_embedded_cinder {
Sébastien Han36f2f022014-01-06 18:09:26 +0100320 # Configure Cinder service options, ceph pool, ceph user and ceph key
Sébastien Han36f2f022014-01-06 18:09:26 +0100321 sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} size ${CEPH_REPLICAS}
322 if [[ $CEPH_REPLICAS -ne 1 ]]; then
323 sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} crush_ruleset ${RULE_ID}
Sébastien Han36f2f022014-01-06 18:09:26 +0100324 fi
Sébastien Han4eb04a52014-12-04 16:22:41 +0100325}
326
327# configure_ceph_cinder() - Cinder config needs to come after Cinder is set up
328function configure_ceph_cinder {
329 sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP}
Sean Daguedd07c482015-07-27 13:10:44 -0400330 sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} \
331 mon "allow r" \
Nicolas Simondse97cb822015-07-28 11:46:46 -0700332 osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rwx pool=${GLANCE_CEPH_POOL}" | \
Sean Daguedd07c482015-07-27 13:10:44 -0400333 sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
Sébastien Han36f2f022014-01-06 18:09:26 +0100334 sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
335}
336
337# init_ceph() - Initialize databases, etc.
338function init_ceph {
339 # clean up from previous (possibly aborted) runs
340 # make sure to kill all ceph processes first
341 sudo pkill -f ceph-mon || true
342 sudo pkill -f ceph-osd || true
343}
344
345# install_ceph() - Collect source and prepare
Sébastien Han4eb04a52014-12-04 16:22:41 +0100346function install_ceph_remote {
347 install_package ceph-common
348}
349
Sébastien Han36f2f022014-01-06 18:09:26 +0100350function install_ceph {
Sébastien Han4eb04a52014-12-04 16:22:41 +0100351 install_package ceph
Sébastien Han36f2f022014-01-06 18:09:26 +0100352}
353
354# start_ceph() - Start running processes, including screen
355function start_ceph {
356 if is_ubuntu; then
357 sudo initctl emit ceph-mon id=$(hostname)
358 for id in $(sudo ceph -c ${CEPH_CONF_FILE} osd ls); do
359 sudo start ceph-osd id=${id}
360 done
361 else
362 sudo service ceph start
363 fi
364}
365
366# stop_ceph() - Stop running processes (non-screen)
367function stop_ceph {
368 if is_ubuntu; then
369 sudo service ceph-mon-all stop > /dev/null 2>&1
370 sudo service ceph-osd-all stop > /dev/null 2>&1
371 else
372 sudo service ceph stop > /dev/null 2>&1
373 fi
374}
375
376
377# Restore xtrace
Ian Wienand523f4882015-10-13 11:03:03 +1100378$_XTRACE_LIB_CEPH
Sébastien Han36f2f022014-01-06 18:09:26 +0100379
380## Local variables:
381## mode: shell-script
382## End: