Merge "Allow specification of ironic callback timeout"
diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst
index 485cd0f..d1f7377 100644
--- a/doc/source/plugins.rst
+++ b/doc/source/plugins.rst
@@ -92,6 +92,45 @@
- **clean** - Called by ``clean.sh`` before other services are cleaned,
but after ``unstack.sh`` has been called.
+
+Externally Hosted Plugins
+=========================
+
+Based on the extras.d hooks, DevStack supports a standard mechansim
+for including plugins from external repositories. The plugin interface
+assumes the following:
+
+An external git repository that includes a ``devstack/`` top level
+directory. Inside this directory there can be 2 files.
+
+- ``settings`` - a file containing global variables that will be
+ sourced very early in the process. This is helpful if other plugins
+ might depend on this one, and need access to global variables to do
+ their work.
+- ``plugin.sh`` - the actual plugin. It will be executed by devstack
+ during it's run. The run order will be done in the registration
+ order for these plugins, and will occur immediately after all in
+ tree extras.d dispatch at the phase in question. The plugin.sh
+ looks like the extras.d dispatcher above **except** it should not
+ include the is_service_enabled conditional. All external plugins are
+ always assumed to be enabled.
+
+Plugins are registered by adding the following to the localrc section
+of ``local.conf``.
+
+They are added in the following format::
+
+ enable_plugin <NAME> <GITURL> [GITREF]
+
+- ``name`` - an arbitrary name. (ex: glustfs, docker, zaqar, congress)
+- ``giturl`` - a valid git url that can be cloned
+- ``gitref`` - an optional git ref (branch / ref / tag) that will be
+ cloned. Defaults to master.
+
+An example would be as follows::
+
+ enable_plugin glusterfs https://github.com/sdague/devstack-plugins glusterfs
+
Hypervisor
==========
diff --git a/driver_certs/cinder_driver_cert.sh b/driver_certs/cinder_driver_cert.sh
index 2757038..d066e06 100755
--- a/driver_certs/cinder_driver_cert.sh
+++ b/driver_certs/cinder_driver_cert.sh
@@ -94,7 +94,7 @@
# run tempest api/volume/test_*
log_message "Run the actual tempest volume tests (./tools/pretty_tox.sh volume)...", True
-./tools/pretty_tox.sh api.volume 2>&1 | tee -a $TEMPFILE
+./tools/pretty_tox.sh volume 2>&1 | tee -a $TEMPFILE
if [[ $? = 0 ]]; then
log_message "CONGRATULATIONS!!! Device driver PASSED!", True
log_message "Submit output: ($TEMPFILE)"
diff --git a/extras.d/60-ceph.sh b/extras.d/60-ceph.sh
index 50bdfae..38b901b 100644
--- a/extras.d/60-ceph.sh
+++ b/extras.d/60-ceph.sh
@@ -6,14 +6,19 @@
source $TOP_DIR/lib/ceph
elif [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
echo_summary "Installing Ceph"
- install_ceph
- echo_summary "Configuring Ceph"
- configure_ceph
- # NOTE (leseb): Do everything here because we need to have Ceph started before the main
- # OpenStack components. Ceph OSD must start here otherwise we can't upload any images.
- echo_summary "Initializing Ceph"
- init_ceph
- start_ceph
+ check_os_support_ceph
+ if [ "$REMOTE_CEPH" = "False" ]; then
+ install_ceph
+ echo_summary "Configuring Ceph"
+ configure_ceph
+ # NOTE (leseb): Do everything here because we need to have Ceph started before the main
+ # OpenStack components. Ceph OSD must start here otherwise we can't upload any images.
+ echo_summary "Initializing Ceph"
+ init_ceph
+ start_ceph
+ else
+ install_ceph_remote
+ fi
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
if is_service_enabled glance; then
echo_summary "Configuring Glance for Ceph"
@@ -32,14 +37,39 @@
echo_summary "Configuring libvirt secret"
import_libvirt_secret_ceph
fi
+
+ if [ "$REMOTE_CEPH" = "False" ]; then
+ if is_service_enabled glance; then
+ echo_summary "Configuring Glance for Ceph"
+ configure_ceph_embedded_glance
+ fi
+ if is_service_enabled nova; then
+ echo_summary "Configuring Nova for Ceph"
+ configure_ceph_embedded_nova
+ fi
+ if is_service_enabled cinder; then
+ echo_summary "Configuring Cinder for Ceph"
+ configure_ceph_embedded_cinder
+ fi
+ fi
fi
if [[ "$1" == "unstack" ]]; then
- stop_ceph
- cleanup_ceph
+ if [ "$REMOTE_CEPH" = "True" ]; then
+ cleanup_ceph_remote
+ else
+ cleanup_ceph_embedded
+ stop_ceph
+ fi
+ cleanup_ceph_general
fi
if [[ "$1" == "clean" ]]; then
- cleanup_ceph
+ if [ "$REMOTE_CEPH" = "True" ]; then
+ cleanup_ceph_remote
+ else
+ cleanup_ceph_embedded
+ fi
+ cleanup_ceph_general
fi
fi
diff --git a/files/debs/neutron b/files/debs/neutron
index a48a800..5a59b22 100644
--- a/files/debs/neutron
+++ b/files/debs/neutron
@@ -1,3 +1,4 @@
+acl # testonly
ebtables
iptables
iputils-ping
@@ -24,3 +25,4 @@
sqlite3
vlan
radvd # NOPRIME
+uuid-runtime
diff --git a/files/rpms-suse/neutron b/files/rpms-suse/neutron
index 8431bd1..50ee145 100644
--- a/files/rpms-suse/neutron
+++ b/files/rpms-suse/neutron
@@ -1,3 +1,4 @@
+acl # testonly
dnsmasq
dnsmasq-utils # dist:opensuse-12.3,opensuse-13.1
ebtables
diff --git a/files/rpms/neutron b/files/rpms/neutron
index f2473fb..5450408 100644
--- a/files/rpms/neutron
+++ b/files/rpms/neutron
@@ -1,4 +1,5 @@
MySQL-python
+acl # testonly
dnsmasq # for q-dhcp
dnsmasq-utils # for dhcp_release
ebtables
diff --git a/functions b/functions
index c7a3b9d..12be160 100644
--- a/functions
+++ b/functions
@@ -42,7 +42,7 @@
if [[ $image_url != file* ]]; then
# Downloads the image (uec ami+akistyle), then extracts it.
if [[ ! -f $FILES/$image_fname || "$(stat -c "%s" $FILES/$image_fname)" = "0" ]]; then
- wget -c $image_url -O $FILES/$image_fname
+ wget --progress=dot:giga -c $image_url -O $FILES/$image_fname
if [[ $? -ne 0 ]]; then
echo "Not found: $image_url"
return
@@ -116,7 +116,7 @@
if [[ $flat_url != file* ]]; then
if [[ ! -f $FILES/$flat_fname || \
"$(stat -c "%s" $FILES/$flat_fname)" = "0" ]]; then
- wget -c $flat_url -O $FILES/$flat_fname
+ wget --progress=dot:giga -c $flat_url -O $FILES/$flat_fname
fi
image="$FILES/${flat_fname}"
else
diff --git a/functions-common b/functions-common
index 40a0d2b..b0b2622 100644
--- a/functions-common
+++ b/functions-common
@@ -26,7 +26,6 @@
# - ``ERROR_ON_CLONE``
# - ``FILES``
# - ``OFFLINE``
-# - ``PIP_DOWNLOAD_CACHE``
# - ``RECLONE``
# - ``REQUIREMENTS_DIR``
# - ``STACK_USER``
@@ -44,7 +43,6 @@
declare -A GITBRANCH
declare -A GITDIR
-
# Config Functions
# ================
@@ -148,6 +146,21 @@
$xtrace
}
+function inidelete {
+ local xtrace=$(set +o | grep xtrace)
+ set +o xtrace
+ local file=$1
+ local section=$2
+ local option=$3
+
+ [[ -z $section || -z $option ]] && return
+
+ # Remove old values
+ sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file"
+
+ $xtrace
+}
+
# Set an option in an INI file
# iniset config-file section option value
function iniset {
@@ -997,7 +1010,6 @@
fi
if [[ -z "$DISTRO" ]]; then
GetDistro
- echo "Found Distro $DISTRO"
fi
for service in ${services//,/ }; do
# Allow individual services to specify dependencies
@@ -1555,8 +1567,7 @@
}
# Wrapper for ``pip install`` to set cache and proxy environment variables
-# Uses globals ``OFFLINE``, ``PIP_DOWNLOAD_CACHE``,
-# ``TRACK_DEPENDS``, ``*_proxy``
+# Uses globals ``OFFLINE``, ``TRACK_DEPENDS``, ``*_proxy``
# pip_install package [package ...]
function pip_install {
local xtrace=$(set +o | grep xtrace)
@@ -1581,8 +1592,15 @@
local sudo_pip="sudo -H"
fi
+ local pip_version=$(python -c "import pip; \
+ print(pip.__version__.strip('.')[0])")
+ if (( pip_version<6 )); then
+ die $LINENO "Currently installed pip version ${pip_version} does not" \
+ "meet minimum requirements (>=6)."
+ fi
+
$xtrace
- $sudo_pip PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \
+ $sudo_pip \
http_proxy=$http_proxy \
https_proxy=$https_proxy \
no_proxy=$no_proxy \
@@ -1593,7 +1611,7 @@
if [[ "$INSTALL_TESTONLY_PACKAGES" == "True" ]]; then
local test_req="$@/test-requirements.txt"
if [[ -e "$test_req" ]]; then
- $sudo_pip PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \
+ $sudo_pip \
http_proxy=$http_proxy \
https_proxy=$https_proxy \
no_proxy=$no_proxy \
@@ -1722,6 +1740,100 @@
fi
}
+# Plugin Functions
+# =================
+
+DEVSTACK_PLUGINS=${DEVSTACK_PLUGINS:-""}
+
+# enable_plugin <name> <url> [branch]
+#
+# ``name`` is an arbitrary name - (aka: glusterfs, nova-docker, zaqar)
+# ``url`` is a git url
+# ``branch`` is a gitref. If it's not set, defaults to master
+function enable_plugin {
+ local name=$1
+ local url=$2
+ local branch=${3:-master}
+ DEVSTACK_PLUGINS+=",$name"
+ GITREPO[$name]=$url
+ GITDIR[$name]=$DEST/$name
+ GITBRANCH[$name]=$branch
+}
+
+# fetch_plugins
+#
+# clones all plugins
+function fetch_plugins {
+ local plugins="${DEVSTACK_PLUGINS}"
+ local plugin
+
+ # short circuit if nothing to do
+ if [[ -z $plugins ]]; then
+ return
+ fi
+
+ echo "Fetching devstack plugins"
+ for plugin in ${plugins//,/ }; do
+ git_clone_by_name $plugin
+ done
+}
+
+# load_plugin_settings
+#
+# Load settings from plugins in the order that they were registered
+function load_plugin_settings {
+ local plugins="${DEVSTACK_PLUGINS}"
+ local plugin
+
+ # short circuit if nothing to do
+ if [[ -z $plugins ]]; then
+ return
+ fi
+
+ echo "Loading plugin settings"
+ for plugin in ${plugins//,/ }; do
+ local dir=${GITDIR[$plugin]}
+ # source any known settings
+ if [[ -f $dir/devstack/settings ]]; then
+ source $dir/devstack/settings
+ fi
+ done
+}
+
+# run_plugins
+#
+# Run the devstack/plugin.sh in all the plugin directories. These are
+# run in registration order.
+function run_plugins {
+ local mode=$1
+ local phase=$2
+
+ local plugins="${DEVSTACK_PLUGINS}"
+ local plugin
+ for plugin in ${plugins//,/ }; do
+ local dir=${GITDIR[$plugin]}
+ if [[ -f $dir/devstack/plugin.sh ]]; then
+ source $dir/devstack/plugin.sh $mode $phase
+ fi
+ done
+}
+
+function run_phase {
+ local mode=$1
+ local phase=$2
+ if [[ -d $TOP_DIR/extras.d ]]; then
+ for i in $TOP_DIR/extras.d/*.sh; do
+ [[ -r $i ]] && source $i $mode $phase
+ done
+ fi
+ # the source phase corresponds to settings loading in plugins
+ if [[ "$mode" == "source" ]]; then
+ load_plugin_settings
+ else
+ run_plugins $mode $phase
+ fi
+}
+
# Service Functions
# =================
diff --git a/lib/ceph b/lib/ceph
index 3b62a91..77b5726 100644
--- a/lib/ceph
+++ b/lib/ceph
@@ -70,6 +70,11 @@
CEPH_REPLICAS=${CEPH_REPLICAS:-1}
CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS})
+# Connect to an existing Ceph cluster
+REMOTE_CEPH=$(trueorfalse False $REMOTE_CEPH)
+REMOTE_CEPH_ADMIN_KEY_PATH=${REMOTE_CEPH_ADMIN_KEY_PATH:-$CEPH_CONF_DIR/ceph.client.admin.keyring}
+
+
# Functions
# ------------
@@ -94,29 +99,69 @@
sudo rm -f secret.xml
}
+# undefine_virsh_secret() - Undefine Cinder key secret from libvirt
+function undefine_virsh_secret {
+ if is_service_enabled cinder || is_service_enabled nova; then
+ local virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
+ sudo virsh secret-undefine ${virsh_uuid} >/dev/null 2>&1
+ fi
+}
+
+
+# check_os_support_ceph() - Check if the operating system provides a decent version of Ceph
+function check_os_support_ceph {
+ if [[ ! ${DISTRO} =~ (trusty|f20|f21) ]]; then
+ echo "WARNING: your distro $DISTRO does not provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 20 (and higher)"
+ if [[ "$FORCE_CEPH_INSTALL" != "yes" ]]; then
+ die $LINENO "If you wish to install Ceph on this distribution anyway run with FORCE_CEPH_INSTALL=yes"
+ fi
+ NO_UPDATE_REPOS=False
+ fi
+}
+
# cleanup_ceph() - Remove residual data files, anything left over from previous
# runs that a clean run would need to clean up
-function cleanup_ceph {
+function cleanup_ceph_remote {
+ # do a proper cleanup from here to avoid leftover on the remote Ceph cluster
+ if is_service_enabled glance; then
+ sudo ceph osd pool delete $GLANCE_CEPH_POOL $GLANCE_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
+ sudo ceph auth del client.$GLANCE_CEPH_USER > /dev/null 2>&1
+ fi
+ if is_service_enabled cinder; then
+ sudo ceph osd pool delete $CINDER_CEPH_POOL $CINDER_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
+ sudo ceph auth del client.$CINDER_CEPH_USER > /dev/null 2>&1
+ fi
+ if is_service_enabled c-bak; then
+ sudo ceph osd pool delete $CINDER_BAK_CEPH_POOL $CINDER_BAK_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
+ sudo ceph auth del client.$CINDER_BAK_CEPH_USER > /dev/null 2>&1
+ fi
+ if is_service_enabled nova; then
+ iniset $NOVA_CONF libvirt rbd_secret_uuid ""
+ sudo ceph osd pool delete $NOVA_CEPH_POOL $NOVA_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
+ fi
+}
+
+function cleanup_ceph_embedded {
sudo pkill -f ceph-mon
sudo pkill -f ceph-osd
sudo rm -rf ${CEPH_DATA_DIR}/*/*
- sudo rm -rf ${CEPH_CONF_DIR}/*
if egrep -q ${CEPH_DATA_DIR} /proc/mounts; then
sudo umount ${CEPH_DATA_DIR}
fi
if [[ -e ${CEPH_DISK_IMAGE} ]]; then
sudo rm -f ${CEPH_DISK_IMAGE}
fi
- uninstall_package ceph ceph-common python-ceph libcephfs1 > /dev/null 2>&1
- if is_service_enabled cinder || is_service_enabled nova; then
- local virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
- sudo virsh secret-undefine ${virsh_uuid} >/dev/null 2>&1
- fi
- if is_service_enabled nova; then
- iniset $NOVA_CONF libvirt rbd_secret_uuid ""
- fi
}
+function cleanup_ceph_general {
+ undefine_virsh_secret
+ uninstall_package ceph ceph-common python-ceph libcephfs1 > /dev/null 2>&1
+
+ # purge ceph config file and keys
+ sudo rm -rf ${CEPH_CONF_DIR}/*
+}
+
+
# configure_ceph() - Set config files, create data dirs, etc
function configure_ceph {
local count=0
@@ -132,7 +177,7 @@
sudo mkdir /var/lib/ceph/mon/ceph-$(hostname)
# create a default ceph configuration file
- sudo tee -a ${CEPH_CONF_FILE} > /dev/null <<EOF
+ sudo tee ${CEPH_CONF_FILE} > /dev/null <<EOF
[global]
fsid = ${CEPH_FSID}
mon_initial_members = $(hostname)
@@ -205,14 +250,17 @@
done
}
-# configure_ceph_glance() - Glance config needs to come after Glance is set up
-function configure_ceph_glance {
+function configure_ceph_embedded_glance {
# configure Glance service options, ceph pool, ceph user and ceph key
- sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP}
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} size ${CEPH_REPLICAS}
if [[ $CEPH_REPLICAS -ne 1 ]]; then
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} crush_ruleset ${RULE_ID}
fi
+}
+
+# configure_ceph_glance() - Glance config needs to come after Glance is set up
+function configure_ceph_glance {
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP}
sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GLANCE_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
@@ -227,14 +275,17 @@
iniset $GLANCE_API_CONF glance_store rbd_store_pool $GLANCE_CEPH_POOL
}
-# configure_ceph_nova() - Nova config needs to come after Nova is set up
-function configure_ceph_nova {
+function configure_ceph_embedded_nova {
# configure Nova service options, ceph pool, ceph user and ceph key
- sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP}
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} size ${CEPH_REPLICAS}
if [[ $CEPH_REPLICAS -ne 1 ]]; then
sudo -c ${CEPH_CONF_FILE} ceph osd pool set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID}
fi
+}
+
+# configure_ceph_nova() - Nova config needs to come after Nova is set up
+function configure_ceph_nova {
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP}
iniset $NOVA_CONF libvirt rbd_user ${CINDER_CEPH_USER}
iniset $NOVA_CONF libvirt rbd_secret_uuid ${CINDER_CEPH_UUID}
iniset $NOVA_CONF libvirt inject_key false
@@ -250,15 +301,17 @@
fi
}
-# configure_ceph_cinder() - Cinder config needs to come after Cinder is set up
-function configure_ceph_cinder {
+function configure_ceph_embedded_cinder {
# Configure Cinder service options, ceph pool, ceph user and ceph key
- sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP}
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} size ${CEPH_REPLICAS}
if [[ $CEPH_REPLICAS -ne 1 ]]; then
sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} crush_ruleset ${RULE_ID}
-
fi
+}
+
+# configure_ceph_cinder() - Cinder config needs to come after Cinder is set up
+function configure_ceph_cinder {
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP}
sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
}
@@ -272,15 +325,12 @@
}
# install_ceph() - Collect source and prepare
+function install_ceph_remote {
+ install_package ceph-common
+}
+
function install_ceph {
- # NOTE(dtroyer): At some point it'll be easier to test for unsupported distros,
- # leveraging the list in stack.sh
- if [[ ${os_CODENAME} =~ trusty ]] || [[ ${os_CODENAME} =~ Schrödinger’sCat ]] || [[ ${os_CODENAME} =~ Heisenbug ]]; then
- NO_UPDATE_REPOS=False
- install_package ceph
- else
- exit_distro_not_supported "Ceph since your distro doesn't provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 19/20"
- fi
+ install_package ceph
}
# start_ceph() - Start running processes, including screen
diff --git a/lib/cinder b/lib/cinder
index c106424..177ddf0 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -110,6 +110,12 @@
done
fi
+# Change the default nova_catalog_info and nova_catalog_admin_info values in
+# cinder so that the service name cinder is searching for matches that set for
+# nova in keystone.
+CINDER_NOVA_CATALOG_INFO=${CINDER_NOVA_CATALOG_INFO:-compute:nova:publicURL}
+CINDER_NOVA_CATALOG_ADMIN_INFO=${CINDER_NOVA_CATALOG_ADMIN_INFO:-compute:nova:adminURL}
+
# Functions
# ---------
@@ -205,6 +211,8 @@
cp -p $CINDER_DIR/etc/cinder/policy.json $CINDER_CONF_DIR
+ rm -f $CINDER_CONF
+
configure_cinder_rootwrap
cp $CINDER_DIR/etc/cinder/api-paste.ini $CINDER_API_PASTE_INI
@@ -220,6 +228,9 @@
configure_auth_token_middleware $CINDER_CONF cinder $CINDER_AUTH_CACHE_DIR
+ iniset $CINDER_CONF DEFAULT nova_catalog_info $CINDER_NOVA_CATALOG_INFO
+ iniset $CINDER_CONF DEFAULT nova_catalog_admin_info $CINDER_NOVA_CATALOG_ADMIN_INFO
+
iniset $CINDER_CONF DEFAULT auth_strategy keystone
iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
iniset $CINDER_CONF DEFAULT verbose True
diff --git a/lib/cinder_backends/ceph b/lib/cinder_backends/ceph
index 415ce94..7e9d2d3 100644
--- a/lib/cinder_backends/ceph
+++ b/lib/cinder_backends/ceph
@@ -54,11 +54,13 @@
iniset $CINDER_CONF DEFAULT glance_api_version 2
if is_service_enabled c-bak; then
- # Configure Cinder backup service options, ceph pool, ceph user and ceph key
sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP}
- sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} size ${CEPH_REPLICAS}
- if [[ $CEPH_REPLICAS -ne 1 ]]; then
- sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
+ if [ "$REMOTE_CEPH" = "False" ]; then
+ # Configure Cinder backup service options, ceph pool, ceph user and ceph key
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} size ${CEPH_REPLICAS}
+ if [[ $CEPH_REPLICAS -ne 1 ]]; then
+ sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
+ fi
fi
sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
diff --git a/lib/config b/lib/config
index c0756bf..31c6fa6 100644
--- a/lib/config
+++ b/lib/config
@@ -144,6 +144,7 @@
else {
# For multiline, invoke the ini routines in the reverse order
count = cfg_attr_count[section, attr]
+ print "inidelete " configfile " " section " " attr
print "iniset " configfile " " section " " attr " \"" cfg_attr[section, attr, count - 1] "\""
for (l = count -2; l >= 0; l--)
print "iniadd_literal " configfile " " section " " attr " \"" cfg_attr[section, attr, l] "\""
diff --git a/lib/databases/mysql b/lib/databases/mysql
index d39d966..62c3d4c 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -23,22 +23,27 @@
if is_ubuntu; then
# Get ruthless with mysql
stop_service $MYSQL
- apt_get purge -y mysql*
+ uninstall_package mysql-common mariadb-common
sudo rm -rf /var/lib/mysql
sudo rm -rf /etc/mysql
return
elif is_fedora; then
if [[ $DISTRO =~ (rhel6) ]]; then
- MYSQL=mysqld
+ stop_service mysqld
+ uninstall_package mysql-server
+ sudo rm -rf /var/lib/mysql
else
- MYSQL=mariadb
+ stop_service mariadb
+ uninstall_package mariadb-server
+ sudo rm -rf /var/lib/mysql
fi
elif is_suse; then
- MYSQL=mysql
+ stop_service mysql
+ uninstall_package mysql-community-server
+ sudo rm -rf /var/lib/mysql
else
return
fi
- stop_service $MYSQL
}
function recreate_database_mysql {
diff --git a/lib/dstat b/lib/dstat
index 8f456a8..73ca279 100644
--- a/lib/dstat
+++ b/lib/dstat
@@ -26,7 +26,7 @@
# start_dstat() - Start running processes, including screen
function start_dstat {
# A better kind of sysstat, with the top process per time slice
- DSTAT_OPTS="-tcmndrylpg --top-cpu-adv"
+ DSTAT_OPTS="-tcmndrylpg --top-cpu-adv --top-io-adv"
if [[ -n ${SCREEN_LOGDIR} ]]; then
screen_it dstat "cd $TOP_DIR; dstat $DSTAT_OPTS | tee $SCREEN_LOGDIR/$DSTAT_FILE"
else
diff --git a/lib/heat b/lib/heat
index 49ed533..544ec45 100644
--- a/lib/heat
+++ b/lib/heat
@@ -38,6 +38,7 @@
HEAT_TEMPLATES_REPO_DIR=$DEST/heat-templates
HEAT_AUTH_CACHE_DIR=${HEAT_AUTH_CACHE_DIR:-/var/cache/heat}
HEAT_STANDALONE=`trueorfalse False $HEAT_STANDALONE`
+HEAT_ENABLE_ADOPT_ABANDON=`trueorfalse False $HEAT_ENABLE_ADOPT_ABANDON`
HEAT_CONF_DIR=/etc/heat
HEAT_CONF=$HEAT_CONF_DIR/heat.conf
HEAT_ENV_DIR=$HEAT_CONF_DIR/environment.d
@@ -73,7 +74,6 @@
# configure_heat() - Set config files, create data dirs, etc
function configure_heat {
- setup_develop $HEAT_DIR
if [[ "$HEAT_STANDALONE" = "True" ]]; then
setup_develop $HEAT_DIR/contrib/heat_keystoneclient_v2
fi
@@ -152,6 +152,11 @@
iniset $HEAT_CONF clients_cinder ca_file $SSL_BUNDLE_FILE
fi
+ if [[ "$HEAT_ENABLE_ADOPT_ABANDON" = "True" ]]; then
+ iniset $HEAT_CONF DEFAULT enable_stack_adopt true
+ iniset $HEAT_CONF DEFAULT enable_stack_abandon true
+ fi
+
# heat environment
sudo mkdir -p $HEAT_ENV_DIR
sudo chown $STACK_USER $HEAT_ENV_DIR
@@ -195,6 +200,7 @@
# install_heat() - Collect source and prepare
function install_heat {
git_clone $HEAT_REPO $HEAT_DIR $HEAT_BRANCH
+ setup_develop $HEAT_DIR
}
# install_heat_other() - Collect source and prepare
diff --git a/lib/ironic b/lib/ironic
index 6864142..c1140b5 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -542,7 +542,7 @@
-i ssh_address=$IRONIC_VM_SSH_ADDRESS \
-i ssh_port=$IRONIC_VM_SSH_PORT \
-i ssh_username=$IRONIC_SSH_USERNAME \
- -i ssh_key_filename=$IRONIC_SSH_KEY_DIR/$IRONIC_SSH_KEY_FILENAME"
+ -i ssh_key_filename=$IRONIC_KEY_FILE"
else
local ironic_node_cpu=$IRONIC_HW_NODE_CPU
local ironic_node_ram=$IRONIC_HW_NODE_RAM
diff --git a/lib/keystone b/lib/keystone
index 071dc90..1599fa5 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -116,7 +116,7 @@
# _cleanup_keystone_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file
function _cleanup_keystone_apache_wsgi {
- sudo rm -f $KEYSTONE_WSGI_DIR/*.wsgi
+ sudo rm -f $KEYSTONE_WSGI_DIR/*
sudo rm -f $(apache_site_config_for keystone)
}
diff --git a/lib/neutron b/lib/neutron
index 5678769..b4d0b8c 100755
--- a/lib/neutron
+++ b/lib/neutron
@@ -908,7 +908,7 @@
Q_L3_ROUTER_PER_TENANT=$Q_USE_NAMESPACE
if is_service_enabled q-vpn; then
- cp $NEUTRON_DIR/etc/vpn_agent.ini $Q_VPN_CONF_FILE
+ cp $NEUTRON_VPNAAS_DIR/etc/vpn_agent.ini $Q_VPN_CONF_FILE
fi
cp $NEUTRON_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE
@@ -1034,22 +1034,28 @@
fi
}
+# _neutron_deploy_rootwrap_filters() - deploy rootwrap filters to $Q_CONF_ROOTWRAP_D (owned by root).
+function _neutron_deploy_rootwrap_filters {
+ local srcdir=$1
+ mkdir -p -m 755 $Q_CONF_ROOTWRAP_D
+ sudo cp -pr $srcdir/etc/neutron/rootwrap.d/* $Q_CONF_ROOTWRAP_D/
+ sudo chown -R root:root $Q_CONF_ROOTWRAP_D
+ sudo chmod 644 $Q_CONF_ROOTWRAP_D/*
+}
+
# _neutron_setup_rootwrap() - configure Neutron's rootwrap
function _neutron_setup_rootwrap {
if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then
return
fi
- # Deploy new rootwrap filters files (owned by root).
# Wipe any existing ``rootwrap.d`` files first
Q_CONF_ROOTWRAP_D=$NEUTRON_CONF_DIR/rootwrap.d
if [[ -d $Q_CONF_ROOTWRAP_D ]]; then
sudo rm -rf $Q_CONF_ROOTWRAP_D
fi
- # Deploy filters to ``$NEUTRON_CONF_DIR/rootwrap.d``
- mkdir -p -m 755 $Q_CONF_ROOTWRAP_D
- cp -pr $NEUTRON_DIR/etc/neutron/rootwrap.d/* $Q_CONF_ROOTWRAP_D/
- sudo chown -R root:root $Q_CONF_ROOTWRAP_D
- sudo chmod 644 $Q_CONF_ROOTWRAP_D/*
+
+ _neutron_deploy_rootwrap_filters $NEUTRON_DIR
+
# Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d``
# location moved in newer versions, prefer new location
if test -r $NEUTRON_DIR/etc/neutron/rootwrap.conf; then
diff --git a/lib/neutron_plugins/services/firewall b/lib/neutron_plugins/services/firewall
index a1c13ed..61a148e 100644
--- a/lib/neutron_plugins/services/firewall
+++ b/lib/neutron_plugins/services/firewall
@@ -13,7 +13,7 @@
function neutron_fwaas_configure_driver {
FWAAS_DRIVER_CONF_FILENAME=/etc/neutron/fwaas_driver.ini
- cp $NEUTRON_DIR/etc/fwaas_driver.ini $FWAAS_DRIVER_CONF_FILENAME
+ cp $NEUTRON_FWAAS_DIR/etc/fwaas_driver.ini $FWAAS_DRIVER_CONF_FILENAME
iniset_multiline $FWAAS_DRIVER_CONF_FILENAME fwaas enabled True
iniset_multiline $FWAAS_DRIVER_CONF_FILENAME fwaas driver "neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver"
diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer
index bd9dc87..f465cc9 100644
--- a/lib/neutron_plugins/services/loadbalancer
+++ b/lib/neutron_plugins/services/loadbalancer
@@ -17,6 +17,7 @@
function neutron_agent_lbaas_configure_common {
_neutron_service_plugin_class_add $LBAAS_PLUGIN
+ _neutron_deploy_rootwrap_filters $NEUTRON_LBAAS_DIR
}
function neutron_agent_lbaas_configure_agent {
@@ -25,7 +26,7 @@
LBAAS_AGENT_CONF_FILENAME="$LBAAS_AGENT_CONF_PATH/lbaas_agent.ini"
- cp $NEUTRON_DIR/etc/lbaas_agent.ini $LBAAS_AGENT_CONF_FILENAME
+ cp $NEUTRON_LBAAS_DIR/etc/lbaas_agent.ini $LBAAS_AGENT_CONF_FILENAME
# ovs_use_veth needs to be set before the plugin configuration
# occurs to allow plugins to override the setting.
diff --git a/lib/neutron_plugins/services/vpn b/lib/neutron_plugins/services/vpn
index 07f1f35..7e80b5b 100644
--- a/lib/neutron_plugins/services/vpn
+++ b/lib/neutron_plugins/services/vpn
@@ -16,6 +16,7 @@
function neutron_vpn_configure_common {
_neutron_service_plugin_class_add $VPN_PLUGIN
+ _neutron_deploy_rootwrap_filters $NEUTRON_VPNAAS_DIR
}
function neutron_vpn_stop {
diff --git a/lib/nova b/lib/nova
index cbfbdfa..5ac9ff1 100644
--- a/lib/nova
+++ b/lib/nova
@@ -769,8 +769,8 @@
}
function start_nova {
- start_nova_compute
start_nova_rest
+ start_nova_compute
}
function stop_nova_compute {
diff --git a/lib/rpc_backend b/lib/rpc_backend
index 400204a..778d466 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -87,11 +87,20 @@
fi
elif is_service_enabled zeromq; then
if is_fedora; then
- uninstall_package zeromq python-zmq redis
+ uninstall_package zeromq python-zmq
+ if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then
+ uninstall_package redis python-redis
+ fi
elif is_ubuntu; then
- uninstall_package libzmq1 python-zmq redis-server
+ uninstall_package libzmq1 python-zmq
+ if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then
+ uninstall_package redis-server python-redis
+ fi
elif is_suse; then
- uninstall_package libzmq1 python-pyzmq redis
+ uninstall_package libzmq1 python-pyzmq
+ if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then
+ uninstall_package redis python-redis
+ fi
else
exit_distro_not_supported "zeromq installation"
fi
@@ -150,11 +159,20 @@
# but there is a matchmaker driver that works
# really well & out of the box for multi-node.
if is_fedora; then
- install_package zeromq python-zmq redis
+ install_package zeromq python-zmq
+ if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then
+ install_package redis python-redis
+ fi
elif is_ubuntu; then
- install_package libzmq1 python-zmq redis-server
+ install_package libzmq1 python-zmq
+ if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then
+ install_package redis-server python-redis
+ fi
elif is_suse; then
- install_package libzmq1 python-pyzmq redis
+ install_package libzmq1 python-pyzmq
+ if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then
+ install_package redis python-redis
+ fi
else
exit_distro_not_supported "zeromq installation"
fi
@@ -221,9 +239,9 @@
local file=$2
local section=$3
if is_service_enabled zeromq; then
- iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_zmq
+ iniset $file $section rpc_backend "zmq"
iniset $file $section rpc_zmq_matchmaker \
- ${package}.openstack.common.rpc.matchmaker_redis.MatchMakerRedis
+ oslo.messaging._drivers.matchmaker_redis.MatchMakerRedis
# Set MATCHMAKER_REDIS_HOST if running multi-node.
MATCHMAKER_REDIS_HOST=${MATCHMAKER_REDIS_HOST:-127.0.0.1}
iniset $file matchmaker_redis host $MATCHMAKER_REDIS_HOST
diff --git a/lib/tempest b/lib/tempest
index 7cac6dd..d31119b 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -464,6 +464,13 @@
iniset $TEMPEST_CONFIG compute-feature-enabled suspend False
fi
+ # Libvirt-LXC
+ if [ "$VIRT_DRIVER" = "libvirt" ] && [ "$LIBVIRT_TYPE" = "lxc" ]; then
+ iniset $TEMPEST_CONFIG compute-feature-enabled rescue False
+ iniset $TEMPEST_CONFIG compute-feature-enabled resize False
+ iniset $TEMPEST_CONFIG compute-feature-enabled suspend False
+ fi
+
# service_available
for service in ${TEMPEST_SERVICES//,/ }; do
if is_service_enabled $service ; then
diff --git a/lib/trove b/lib/trove
index d889b05..abf4e87 100644
--- a/lib/trove
+++ b/lib/trove
@@ -123,11 +123,8 @@
sudo chown -R $STACK_USER: ${TROVE_CONF_DIR}
sudo chown -R $STACK_USER: ${TROVE_AUTH_CACHE_DIR}
- # Copy api-paste file over to the trove conf dir and configure it
+ # Copy api-paste file over to the trove conf dir
cp $TROVE_LOCAL_CONF_DIR/api-paste.ini $TROVE_CONF_DIR/api-paste.ini
- TROVE_API_PASTE_INI=$TROVE_CONF_DIR/api-paste.ini
-
- configure_auth_token_middleware $TROVE_API_PASTE_INI trove $TROVE_AUTH_CACHE_DIR filter:authtoken
# (Re)create trove conf files
rm -f $TROVE_CONF_DIR/trove.conf
@@ -141,6 +138,7 @@
setup_trove_logging $TROVE_CONF_DIR/trove.conf
iniset $TROVE_CONF_DIR/trove.conf DEFAULT trove_api_workers "$API_WORKERS"
+ configure_auth_token_middleware $TROVE_CONF_DIR/trove.conf trove $TROVE_AUTH_CACHE_DIR
# (Re)create trove taskmanager conf file if needed
if is_service_enabled tr-tmgr; then
diff --git a/stack.sh b/stack.sh
index d6699f5..048e5d1 100755
--- a/stack.sh
+++ b/stack.sh
@@ -143,7 +143,7 @@
# Warn users who aren't on an explicitly supported distro, but allow them to
# override check and attempt installation with ``FORCE=yes ./stack``
-if [[ ! ${DISTRO} =~ (precise|trusty|7.0|wheezy|sid|testing|jessie|f19|f20|f21|rhel6|rhel7) ]]; then
+if [[ ! ${DISTRO} =~ (precise|trusty|7.0|wheezy|sid|testing|jessie|f20|f21|rhel6|rhel7) ]]; then
echo "WARNING: this script has not been tested on $DISTRO"
if [[ "$FORCE" != "yes" ]]; then
die $LINENO "If you wish to run this script anyway run with FORCE=yes"
@@ -570,15 +570,14 @@
source $TOP_DIR/lib/ldap
source $TOP_DIR/lib/dstat
+# Clone all external plugins
+fetch_plugins
+
# Extras Source
# --------------
# Phase: source
-if [[ -d $TOP_DIR/extras.d ]]; then
- for i in $TOP_DIR/extras.d/*.sh; do
- [[ -r $i ]] && source $i source
- done
-fi
+run_phase source
# Interactive Configuration
# -------------------------
@@ -712,31 +711,6 @@
PYPI_ALTERNATIVE_URL=$PYPI_ALTERNATIVE_URL $TOP_DIR/tools/install_pip.sh
fi
-# Do the ugly hacks for broken packages and distros
-source $TOP_DIR/tools/fixup_stuff.sh
-
-
-# Extras Pre-install
-# ------------------
-
-# Phase: pre-install
-if [[ -d $TOP_DIR/extras.d ]]; then
- for i in $TOP_DIR/extras.d/*.sh; do
- [[ -r $i ]] && source $i stack pre-install
- done
-fi
-
-
-install_rpc_backend
-
-if is_service_enabled $DATABASE_BACKENDS; then
- install_database
-fi
-
-if is_service_enabled neutron; then
- install_neutron_agent_packages
-fi
-
TRACK_DEPENDS=${TRACK_DEPENDS:-False}
# Install python packages into a virtualenv so that we can track them
@@ -750,6 +724,26 @@
$DEST/.venv/bin/pip freeze > $DEST/requires-pre-pip
fi
+# Do the ugly hacks for broken packages and distros
+source $TOP_DIR/tools/fixup_stuff.sh
+
+
+# Extras Pre-install
+# ------------------
+
+# Phase: pre-install
+run_phase stack pre-install
+
+install_rpc_backend
+
+if is_service_enabled $DATABASE_BACKENDS; then
+ install_database
+fi
+
+if is_service_enabled neutron; then
+ install_neutron_agent_packages
+fi
+
# Check Out and Install Source
# ----------------------------
@@ -871,11 +865,7 @@
# --------------
# Phase: install
-if [[ -d $TOP_DIR/extras.d ]]; then
- for i in $TOP_DIR/extras.d/*.sh; do
- [[ -r $i ]] && source $i stack install
- done
-fi
+run_phase stack install
if [[ $TRACK_DEPENDS = True ]]; then
$DEST/.venv/bin/pip freeze > $DEST/requires-post-pip
@@ -1040,6 +1030,14 @@
fi
+# ZeroMQ
+# ------
+if is_service_enabled zeromq; then
+ echo_summary "Starting zeromq receiver"
+ run_process zeromq "$OSLO_BIN_DIR/oslo-messaging-zmq-receiver"
+fi
+
+
# Horizon
# -------
@@ -1148,11 +1146,7 @@
# ====================
# Phase: post-config
-if [[ -d $TOP_DIR/extras.d ]]; then
- for i in $TOP_DIR/extras.d/*.sh; do
- [[ -r $i ]] && source $i stack post-config
- done
-fi
+run_phase stack post-config
# Local Configuration
@@ -1222,11 +1216,6 @@
iniset $NOVA_CONF keymgr fixed_key $(generate_hex_string 32)
fi
-if is_service_enabled zeromq; then
- echo_summary "Starting zermomq receiver"
- run_process zeromq "$OSLO_BIN_DIR/oslo-messaging-zmq-receiver"
-fi
-
# Launch the nova-api and wait for it to answer before continuing
if is_service_enabled n-api; then
echo_summary "Starting Nova API"
@@ -1334,11 +1323,7 @@
# ==========
# Phase: extra
-if [[ -d $TOP_DIR/extras.d ]]; then
- for i in $TOP_DIR/extras.d/*.sh; do
- [[ -r $i ]] && source $i stack extra
- done
-fi
+run_phase stack extra
# Local Configuration
# ===================
diff --git a/stackrc b/stackrc
index be27dce..355c0dc 100644
--- a/stackrc
+++ b/stackrc
@@ -33,7 +33,8 @@
# For example, to enable Swift add this to ``local.conf``:
# enable_service s-proxy s-object s-container s-account
# In order to enable Neutron (a single node setup) add the following
-# settings in `` localrc``:
+# settings in ``local.conf``:
+# [[local|localrc]]
# disable_service n-net
# enable_service q-svc
# enable_service q-agt
@@ -529,8 +530,8 @@
CIRROS_ARCH=${CIRROS_ARCH:-"x86_64"}
# Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of
-# which may be set in ``localrc``. Also allow ``DEFAULT_IMAGE_NAME`` and
-# ``IMAGE_URLS`` to be set directly in ``localrc``.
+# which may be set in ``local.conf``. Also allow ``DEFAULT_IMAGE_NAME`` and
+# ``IMAGE_URLS`` to be set in the `localrc` section of ``local.conf``.
case "$VIRT_DRIVER" in
openvz)
DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-12.04-x86_64}
diff --git a/tests/test_ini.sh b/tests/test_ini.sh
index 598cd57..106cc95 100755
--- a/tests/test_ini.sh
+++ b/tests/test_ini.sh
@@ -34,6 +34,32 @@
[eee]
multi = foo1
multi = foo2
+
+# inidelete(a)
+[del_separate_options]
+a=b
+b=c
+
+# inidelete(a)
+[del_same_option]
+a=b
+a=c
+
+# inidelete(a)
+[del_missing_option]
+b=c
+
+# inidelete(a)
+[del_missing_option_multi]
+b=c
+b=d
+
+# inidelete(a)
+[del_no_options]
+
+# inidelete(a)
+# no section - del_no_section
+
EOF
# Test with missing arguments
@@ -237,4 +263,33 @@
echo "iniadd with non-exsting failed: $VAL"
fi
+# Test inidelete
+del_cases="
+ del_separate_options
+ del_same_option
+ del_missing_option
+ del_missing_option_multi
+ del_no_options
+ del_no_section"
+
+for x in $del_cases; do
+ inidelete test.ini $x a
+ VAL=$(iniget_multiline test.ini $x a)
+ if [ -z "$VAL" ]; then
+ echo "OK: inidelete $x"
+ else
+ echo "inidelete $x failed: $VAL"
+ fi
+ if [ "$x" = "del_separate_options" -o \
+ "$x" = "del_missing_option" -o \
+ "$x" = "del_missing_option_multi" ]; then
+ VAL=$(iniget_multiline test.ini $x b)
+ if [ "$VAL" = "c" -o "$VAL" = "c d" ]; then
+ echo "OK: inidelete other_options $x"
+ else
+ echo "inidelete other_option $x failed: $VAL"
+ fi
+ fi
+done
+
rm test.ini
diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh
index 26aae82..c7f1efa 100755
--- a/tools/fixup_stuff.sh
+++ b/tools/fixup_stuff.sh
@@ -111,8 +111,8 @@
fi
FORCE_FIREWALLD=$(trueorfalse False $FORCE_FIREWALLD)
- if [[ ${DISTRO} =~ (f19|f20) && $FORCE_FIREWALLD == "False" ]]; then
- # On Fedora 19 and 20 firewalld interacts badly with libvirt and
+ if [[ ${DISTRO} =~ (f20) && $FORCE_FIREWALLD == "False" ]]; then
+ # On Fedora 20 firewalld interacts badly with libvirt and
# slows things down significantly. However, for those cases
# where that combination is desired, allow this fix to be skipped.
diff --git a/unstack.sh b/unstack.sh
index 3403919..ea45da9 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -66,6 +66,8 @@
done
fi
+load_plugin_settings
+
# Determine what system we are running on. This provides ``os_VENDOR``,
# ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME``
GetOSVersion
@@ -78,11 +80,7 @@
# ==========
# Phase: unstack
-if [[ -d $TOP_DIR/extras.d ]]; then
- for i in $TOP_DIR/extras.d/*.sh; do
- [[ -r $i ]] && source $i unstack
- done
-fi
+run_phase unstack
if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then
source $TOP_DIR/openrc