Merge "Replace NvpPluginV2 with NsxPlugin"
diff --git a/HACKING.rst b/HACKING.rst
index 103b579..5c15537 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -275,3 +275,5 @@
 - local variables should be lower case, global variables should be
   upper case
 - function names should_have_underscores, NotCamelCase.
+- functions should be declared as per the regex ^function foo {$
+  with code starting on the next line
diff --git a/clean.sh b/clean.sh
index e16bdb7..3707d84 100755
--- a/clean.sh
+++ b/clean.sh
@@ -97,15 +97,10 @@
 fi
 
 # Do the hypervisor cleanup until this can be moved back into lib/nova
-if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
+if is_service_enabled nova && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
     cleanup_nova_hypervisor
 fi
 
-#if mount | grep $DATA_DIR/swift/drives; then
-#  sudo umount $DATA_DIR/swift/drives/sdb1
-#fi
-
-
 # Clean out /etc
 sudo rm -rf /etc/keystone /etc/glance /etc/nova /etc/cinder /etc/swift
 
@@ -123,9 +118,11 @@
     sudo rm -rf $SCREEN_LOGDIR
 fi
 
-# Clean up networking...
-# should this be in nova?
-# FIXED_IP_ADDR in br100
-
 # Clean up files
-rm -f $TOP_DIR/.stackenv
+
+FILES_TO_CLEAN=".localrc.auto docs-files docs/ shocco/ stack-screenrc test*.conf* test.ini*"
+FILES_TO_CLEAN+=".stackenv .prereqs"
+
+for file in $FILES_TO_CLEAN; do
+    rm -f $TOP_DIR/$file
+done
diff --git a/driver_certs/cinder_driver_cert.sh b/driver_certs/cinder_driver_cert.sh
index edcc6d4..d2c636f 100755
--- a/driver_certs/cinder_driver_cert.sh
+++ b/driver_certs/cinder_driver_cert.sh
@@ -16,6 +16,7 @@
 # It also assumes default install location (/opt/stack/xxx)
 # to aid in debug, you should also verify that you've added
 # an output directory for screen logs:
+#
 #     SCREEN_LOGDIR=/opt/stack/screen-logs
 
 CERT_DIR=$(cd $(dirname "$0") && pwd)
@@ -24,13 +25,14 @@
 source $TOP_DIR/functions
 source $TOP_DIR/stackrc
 source $TOP_DIR/openrc
+source $TOP_DIR/lib/infra
 source $TOP_DIR/lib/tempest
 source $TOP_DIR/lib/cinder
 
 TEMPFILE=`mktemp`
 RECLONE=True
 
-function log_message() {
+function log_message {
     MESSAGE=$1
     STEP_HEADER=$2
     if [[ "$STEP_HEADER" = "True" ]]; then
@@ -89,9 +91,8 @@
 sleep 5
 
 # run tempest api/volume/test_*
-log_message "Run the actual tempest volume tests (./tools/pretty_tox.sh api.volume_*)...", True
-exec 2> >(tee -a $TEMPFILE)
-`./tools/pretty_tox.sh api.volume`
+log_message "Run the actual tempest volume tests (./tools/pretty_tox.sh api.volume)...", True
+./tools/pretty_tox.sh api.volume 2>&1 | tee -a $TEMPFILE
 if [[ $? = 0 ]]; then
     log_message "CONGRATULATIONS!!!  Device driver PASSED!", True
     log_message "Submit output: ($TEMPFILE)"
diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh
index d223301..01d548d 100755
--- a/exercises/aggregates.sh
+++ b/exercises/aggregates.sh
@@ -57,7 +57,7 @@
 AGGREGATE2_NAME=test_aggregate_$RANDOM
 AGGREGATE_A_ZONE=nova
 
-exit_if_aggregate_present() {
+function exit_if_aggregate_present {
     aggregate_name=$1
 
     if [ $(nova aggregate-list | grep -c " $aggregate_name ") == 0 ]; then
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
index ed8ba63..f679669 100755
--- a/exercises/boot_from_volume.sh
+++ b/exercises/boot_from_volume.sh
@@ -30,14 +30,13 @@
 # Import common functions
 source $TOP_DIR/functions
 
+# Import project functions
+source $TOP_DIR/lib/cinder
+source $TOP_DIR/lib/neutron
+
 # Import configuration
 source $TOP_DIR/openrc
 
-# Import neutron functions if needed
-if is_service_enabled neutron; then
-    source $TOP_DIR/lib/neutron
-fi
-
 # Import exercise configuration
 source $TOP_DIR/exerciserc
 
diff --git a/exercises/client-args.sh b/exercises/client-args.sh
index e79774f..b360f1e 100755
--- a/exercises/client-args.sh
+++ b/exercises/client-args.sh
@@ -154,7 +154,7 @@
 # Results
 # =======
 
-function report() {
+function report {
     if [[ -n "$2" ]]; then
         echo "$1: $2"
     fi
diff --git a/exercises/client-env.sh b/exercises/client-env.sh
index 6c6fe12..d955e4d 100755
--- a/exercises/client-env.sh
+++ b/exercises/client-env.sh
@@ -165,7 +165,7 @@
 # Results
 # =======
 
-function report() {
+function report {
     if [[ -n "$2" ]]; then
         echo "$1: $2"
     fi
diff --git a/exercises/euca.sh b/exercises/euca.sh
index 51b2644..ad852a4 100755
--- a/exercises/euca.sh
+++ b/exercises/euca.sh
@@ -33,11 +33,6 @@
 # Import EC2 configuration
 source $TOP_DIR/eucarc
 
-# Import neutron functions if needed
-if is_service_enabled neutron; then
-    source $TOP_DIR/lib/neutron
-fi
-
 # Import exercise configuration
 source $TOP_DIR/exerciserc
 
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index 4ca90a5..8dc44ef 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -30,10 +30,8 @@
 # Import configuration
 source $TOP_DIR/openrc
 
-# Import neutron functions if needed
-if is_service_enabled neutron; then
-    source $TOP_DIR/lib/neutron
-fi
+# Import project functions
+source $TOP_DIR/lib/neutron
 
 # Import exercise configuration
 source $TOP_DIR/exerciserc
diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh
index a9199e6..0a24fe9 100755
--- a/exercises/neutron-adv-test.sh
+++ b/exercises/neutron-adv-test.sh
@@ -20,7 +20,7 @@
 set -o errtrace
 
 trap failed ERR
-failed() {
+function failed {
     local r=$?
     set +o errtrace
     set +o xtrace
@@ -395,7 +395,7 @@
 # Usage and main
 # --------------
 
-usage() {
+function usage {
     echo "$0: [-h]"
     echo "  -h, --help              Display help message"
     echo "  -t, --tenant            Create tenants"
@@ -408,7 +408,7 @@
     echo "  -T, --test              Test functions"
 }
 
-main() {
+function main {
 
     echo Description
 
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index 21b5d21..83d25c7 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -30,10 +30,9 @@
 # Import configuration
 source $TOP_DIR/openrc
 
-# Import neutron functions if needed
-if is_service_enabled neutron; then
-    source $TOP_DIR/lib/neutron
-fi
+# Import project functions
+source $TOP_DIR/lib/cinder
+source $TOP_DIR/lib/neutron
 
 # Import exercise configuration
 source $TOP_DIR/exerciserc
diff --git a/extras.d/50-ironic.sh b/extras.d/50-ironic.sh
new file mode 100644
index 0000000..9e61dc5
--- /dev/null
+++ b/extras.d/50-ironic.sh
@@ -0,0 +1,36 @@
+# ironic.sh - Devstack extras script to install ironic
+
+if is_service_enabled ir-api ir-cond; then
+    if [[ "$1" == "source" ]]; then
+        # Initial source
+        source $TOP_DIR/lib/ironic
+    elif [[ "$1" == "stack" && "$2" == "install" ]]; then
+        echo_summary "Installing Ironic"
+        install_ironic
+        install_ironicclient
+        cleanup_ironic
+    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
+        echo_summary "Configuring Ironic"
+        configure_ironic
+
+        if is_service_enabled key; then
+            create_ironic_accounts
+        fi
+
+    elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
+        # Initialize ironic
+        init_ironic
+
+        # Start the ironic API and ironic taskmgr components
+        echo_summary "Starting Ironic"
+        start_ironic
+    fi
+
+    if [[ "$1" == "unstack" ]]; then
+        stop_ironic
+    fi
+
+    if [[ "$1" == "clean" ]]; then
+        cleanup_ironic
+    fi
+fi
diff --git a/extras.d/70-savanna.sh b/extras.d/70-savanna.sh
index 6bbe113..edc1376 100644
--- a/extras.d/70-savanna.sh
+++ b/extras.d/70-savanna.sh
@@ -8,6 +8,7 @@
     elif [[ "$1" == "stack" && "$2" == "install" ]]; then
         echo_summary "Installing Savanna"
         install_savanna
+        cleanup_savanna
         if is_service_enabled horizon; then
             install_savanna_dashboard
         fi
@@ -29,4 +30,8 @@
             cleanup_savanna_dashboard
         fi
     fi
+
+    if [[ "$1" == "clean" ]]; then
+        cleanup_savanna
+    fi
 fi
diff --git a/files/apts/dstat b/files/apts/dstat
new file mode 100644
index 0000000..2b643b8
--- /dev/null
+++ b/files/apts/dstat
@@ -0,0 +1 @@
+dstat
diff --git a/files/apts/glance b/files/apts/glance
index 22787bc..6dc878e 100644
--- a/files/apts/glance
+++ b/files/apts/glance
@@ -1,5 +1,5 @@
 gcc
-libffi-dev          # testonly
+libffi-dev
 libmysqlclient-dev  # testonly
 libpq-dev           # testonly
 libssl-dev          # testonly
diff --git a/files/apts/n-cpu b/files/apts/n-cpu
index b287107..a82304d 100644
--- a/files/apts/n-cpu
+++ b/files/apts/n-cpu
@@ -5,4 +5,4 @@
 genisoimage
 sysfsutils
 sg3-utils
-python-guestfs
+python-guestfs # NOPRIME
diff --git a/files/apts/sysstat b/files/apts/sysstat
deleted file mode 100644
index ea0c342..0000000
--- a/files/apts/sysstat
+++ /dev/null
@@ -1 +0,0 @@
-sysstat
diff --git a/files/default_catalog.templates b/files/default_catalog.templates
index e64f68f..ff00e38 100644
--- a/files/default_catalog.templates
+++ b/files/default_catalog.templates
@@ -50,12 +50,12 @@
 catalog.RegionOne.cloudformation.publicURL = http://%SERVICE_HOST%:8000/v1
 catalog.RegionOne.cloudformation.adminURL = http://%SERVICE_HOST%:8000/v1
 catalog.RegionOne.cloudformation.internalURL = http://%SERVICE_HOST%:8000/v1
-catalog.RegionOne.cloudformation.name = Heat CloudFormation Service
+catalog.RegionOne.cloudformation.name = CloudFormation service
 
 catalog.RegionOne.orchestration.publicURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s
 catalog.RegionOne.orchestration.adminURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s
 catalog.RegionOne.orchestration.internalURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s
-catalog.RegionOne.orchestration.name = Heat Service
+catalog.RegionOne.orchestration.name = Orchestration Service
 
 catalog.RegionOne.metering.publicURL = http://%SERVICE_HOST%:8777/v1
 catalog.RegionOne.metering.adminURL = http://%SERVICE_HOST%:8777/v1
diff --git a/files/rpms-suse/dstat b/files/rpms-suse/dstat
new file mode 100644
index 0000000..2b643b8
--- /dev/null
+++ b/files/rpms-suse/dstat
@@ -0,0 +1 @@
+dstat
diff --git a/files/rpms-suse/sysstat b/files/rpms-suse/sysstat
deleted file mode 100644
index ea0c342..0000000
--- a/files/rpms-suse/sysstat
+++ /dev/null
@@ -1 +0,0 @@
-sysstat
diff --git a/files/rpms/cinder b/files/rpms/cinder
index 623c13e..199ae10 100644
--- a/files/rpms/cinder
+++ b/files/rpms/cinder
@@ -4,4 +4,4 @@
 python-devel
 postgresql-devel
 iscsi-initiator-utils
-python-lxml         #dist:f18,f19,f20
+python-lxml         #dist:f18,f19,f20,rhel7
diff --git a/files/rpms/dstat b/files/rpms/dstat
new file mode 100644
index 0000000..8a8f8fe
--- /dev/null
+++ b/files/rpms/dstat
@@ -0,0 +1 @@
+dstat
\ No newline at end of file
diff --git a/files/rpms/glance b/files/rpms/glance
index fffd9c8..25c5d39 100644
--- a/files/rpms/glance
+++ b/files/rpms/glance
@@ -1,5 +1,5 @@
 gcc
-libffi-devel        # testonly
+libffi-devel
 libxml2-devel       # testonly
 libxslt-devel       # testonly
 mysql-devel         # testonly
@@ -9,8 +9,8 @@
 python-devel
 python-eventlet
 python-greenlet
-python-lxml         #dist:f18,f19,f20
-python-paste-deploy #dist:f18,f19,f20
+python-lxml         #dist:f18,f19,f20,rhel7
+python-paste-deploy #dist:f18,f19,f20,rhel7
 python-routes
 python-sqlalchemy
 python-wsgiref
diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu
index e4fdaf4..32b1546 100644
--- a/files/rpms/n-cpu
+++ b/files/rpms/n-cpu
@@ -4,4 +4,4 @@
 genisoimage
 sysfsutils
 sg3_utils
-python-libguestfs
+python-libguestfs # NOPRIME
diff --git a/files/rpms/neutron b/files/rpms/neutron
index 67bf523..42d7f68 100644
--- a/files/rpms/neutron
+++ b/files/rpms/neutron
@@ -11,8 +11,8 @@
 python-iso8601
 python-kombu
 #rhel6 gets via pip
-python-paste        # dist:f18,f19,f20
-python-paste-deploy # dist:f18,f19,f20
+python-paste        # dist:f18,f19,f20,rhel7
+python-paste-deploy # dist:f18,f19,f20,rhel7
 python-qpid
 python-routes
 python-sqlalchemy
diff --git a/files/rpms/nova b/files/rpms/nova
index ac70ac5..a607d92 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -28,11 +28,11 @@
 python-lockfile
 python-migrate
 python-mox
-python-paramiko # dist:f18,f19,f20
-# ^ on RHEL, brings in python-crypto which conflicts with version from
+python-paramiko # dist:f18,f19,f20,rhel7
+# ^ on RHEL6, brings in python-crypto which conflicts with version from
 # pip we need
-python-paste        # dist:f18,f19,f20
-python-paste-deploy # dist:f18,f19,f20
+python-paste        # dist:f18,f19,f20,rhel7
+python-paste-deploy # dist:f18,f19,f20,rhel7
 python-qpid
 python-routes
 python-sqlalchemy
diff --git a/files/rpms/swift b/files/rpms/swift
index 32432bc..72253f7 100644
--- a/files/rpms/swift
+++ b/files/rpms/swift
@@ -9,7 +9,7 @@
 python-greenlet
 python-netifaces
 python-nose
-python-paste-deploy # dist:f18,f19,f20
+python-paste-deploy # dist:f18,f19,f20,rhel7
 python-simplejson
 python-webob
 pyxattr
diff --git a/files/rpms/sysstat b/files/rpms/sysstat
deleted file mode 100644
index ea0c342..0000000
--- a/files/rpms/sysstat
+++ /dev/null
@@ -1 +0,0 @@
-sysstat
diff --git a/functions b/functions
index 281b676..a844b1c 100644
--- a/functions
+++ b/functions
@@ -1,563 +1,22 @@
-# functions - Common functions used by DevStack components
+# functions - DevStack-specific functions
 #
 # The following variables are assumed to be defined by certain functions:
 #
+# - ``DATABASE_BACKENDS``
 # - ``ENABLED_SERVICES``
-# - ``ERROR_ON_CLONE``
 # - ``FILES``
 # - ``GLANCE_HOSTPORT``
-# - ``OFFLINE``
-# - ``PIP_DOWNLOAD_CACHE``
-# - ``PIP_USE_MIRRORS``
-# - ``RECLONE``
-# - ``TRACK_DEPENDS``
-# - ``http_proxy``, ``https_proxy``, ``no_proxy``
+#
 
+# Include the common functions
+FUNC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd)
+source ${FUNC_DIR}/functions-common
 
 # Save trace setting
 XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 
-# Convert CIDR notation to a IPv4 netmask
-# cidr2netmask cidr-bits
-function cidr2netmask() {
-    local maskpat="255 255 255 255"
-    local maskdgt="254 252 248 240 224 192 128"
-    set -- ${maskpat:0:$(( ($1 / 8) * 4 ))}${maskdgt:$(( (7 - ($1 % 8)) * 4 )):3}
-    echo ${1-0}.${2-0}.${3-0}.${4-0}
-}
-
-
-# Return the network portion of the given IP address using netmask
-# netmask is in the traditional dotted-quad format
-# maskip ip-address netmask
-function maskip() {
-    local ip=$1
-    local mask=$2
-    local l="${ip%.*}"; local r="${ip#*.}"; local n="${mask%.*}"; local m="${mask#*.}"
-    local subnet=$((${ip%%.*}&${mask%%.*})).$((${r%%.*}&${m%%.*})).$((${l##*.}&${n##*.})).$((${ip##*.}&${mask##*.}))
-    echo $subnet
-}
-
-
-# Exit 0 if address is in network or 1 if address is not in network
-# ip-range is in CIDR notation: 1.2.3.4/20
-# address_in_net ip-address ip-range
-function address_in_net() {
-    local ip=$1
-    local range=$2
-    local masklen=${range#*/}
-    local network=$(maskip ${range%/*} $(cidr2netmask $masklen))
-    local subnet=$(maskip $ip $(cidr2netmask $masklen))
-    [[ $network == $subnet ]]
-}
-
-
-# Wrapper for ``apt-get`` to set cache and proxy environment variables
-# Uses globals ``OFFLINE``, ``*_proxy``
-# apt_get operation package [package ...]
-function apt_get() {
-    [[ "$OFFLINE" = "True" || -z "$@" ]] && return
-    local sudo="sudo"
-    [[ "$(id -u)" = "0" ]] && sudo="env"
-    $sudo DEBIAN_FRONTEND=noninteractive \
-        http_proxy=$http_proxy https_proxy=$https_proxy \
-        no_proxy=$no_proxy \
-        apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@"
-}
-
-
-# Gracefully cp only if source file/dir exists
-# cp_it source destination
-function cp_it {
-    if [ -e $1 ] || [ -d $1 ]; then
-        cp -pRL $1 $2
-    fi
-}
-
-
-# Prints backtrace info
-# filename:lineno:function
-function backtrace {
-    local level=$1
-    local deep=$((${#BASH_SOURCE[@]} - 1))
-    echo "[Call Trace]"
-    while [ $level -le $deep ]; do
-        echo "${BASH_SOURCE[$deep]}:${BASH_LINENO[$deep-1]}:${FUNCNAME[$deep-1]}"
-        deep=$((deep - 1))
-    done
-}
-
-
-# Prints line number and "message" then exits
-# die $LINENO "message"
-function die() {
-    local exitcode=$?
-    set +o xtrace
-    local line=$1; shift
-    if [ $exitcode == 0 ]; then
-        exitcode=1
-    fi
-    backtrace 2
-    err $line "$*"
-    exit $exitcode
-}
-
-
-# Checks an environment variable is not set or has length 0 OR if the
-# exit code is non-zero and prints "message" and exits
-# NOTE: env-var is the variable name without a '$'
-# die_if_not_set $LINENO env-var "message"
-function die_if_not_set() {
-    local exitcode=$?
-    FXTRACE=$(set +o | grep xtrace)
-    set +o xtrace
-    local line=$1; shift
-    local evar=$1; shift
-    if ! is_set $evar || [ $exitcode != 0 ]; then
-        die $line "$*"
-    fi
-    $FXTRACE
-}
-
-
-# Prints line number and "message" in error format
-# err $LINENO "message"
-function err() {
-    local exitcode=$?
-    errXTRACE=$(set +o | grep xtrace)
-    set +o xtrace
-    local msg="[ERROR] ${BASH_SOURCE[2]}:$1 $2"
-    echo $msg 1>&2;
-    if [[ -n ${SCREEN_LOGDIR} ]]; then
-        echo $msg >> "${SCREEN_LOGDIR}/error.log"
-    fi
-    $errXTRACE
-    return $exitcode
-}
-
-
-# Checks an environment variable is not set or has length 0 OR if the
-# exit code is non-zero and prints "message"
-# NOTE: env-var is the variable name without a '$'
-# err_if_not_set $LINENO env-var "message"
-function err_if_not_set() {
-    local exitcode=$?
-    errinsXTRACE=$(set +o | grep xtrace)
-    set +o xtrace
-    local line=$1; shift
-    local evar=$1; shift
-    if ! is_set $evar || [ $exitcode != 0 ]; then
-        err $line "$*"
-    fi
-    $errinsXTRACE
-    return $exitcode
-}
-
-
-# Prints line number and "message" in warning format
-# warn $LINENO "message"
-function warn() {
-    local exitcode=$?
-    errXTRACE=$(set +o | grep xtrace)
-    set +o xtrace
-    local msg="[WARNING] ${BASH_SOURCE[2]}:$1 $2"
-    echo $msg 1>&2;
-    if [[ -n ${SCREEN_LOGDIR} ]]; then
-        echo $msg >> "${SCREEN_LOGDIR}/error.log"
-    fi
-    $errXTRACE
-    return $exitcode
-}
-
-
-# HTTP and HTTPS proxy servers are supported via the usual environment variables [1]
-# ``http_proxy``, ``https_proxy`` and ``no_proxy``. They can be set in
-# ``localrc`` or on the command line if necessary::
-#
-# [1] http://www.w3.org/Daemon/User/Proxies/ProxyClients.html
-#
-#     http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh
-
-function export_proxy_variables() {
-    if [[ -n "$http_proxy" ]]; then
-        export http_proxy=$http_proxy
-    fi
-    if [[ -n "$https_proxy" ]]; then
-        export https_proxy=$https_proxy
-    fi
-    if [[ -n "$no_proxy" ]]; then
-        export no_proxy=$no_proxy
-    fi
-}
-
-
-# Grab a numbered field from python prettytable output
-# Fields are numbered starting with 1
-# Reverse syntax is supported: -1 is the last field, -2 is second to last, etc.
-# get_field field-number
-function get_field() {
-    while read data; do
-        if [ "$1" -lt 0 ]; then
-            field="(\$(NF$1))"
-        else
-            field="\$$(($1 + 1))"
-        fi
-        echo "$data" | awk -F'[ \t]*\\|[ \t]*' "{print $field}"
-    done
-}
-
-
-# Get the default value for HOST_IP
-# get_default_host_ip fixed_range floating_range host_ip_iface host_ip
-function get_default_host_ip() {
-    local fixed_range=$1
-    local floating_range=$2
-    local host_ip_iface=$3
-    local host_ip=$4
-
-    # Find the interface used for the default route
-    host_ip_iface=${host_ip_iface:-$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }' | head -1)}
-    # Search for an IP unless an explicit is set by ``HOST_IP`` environment variable
-    if [ -z "$host_ip" -o "$host_ip" == "dhcp" ]; then
-        host_ip=""
-        host_ips=`LC_ALL=C ip -f inet addr show ${host_ip_iface} | awk '/inet/ {split($2,parts,"/");  print parts[1]}'`
-        for IP in $host_ips; do
-            # Attempt to filter out IP addresses that are part of the fixed and
-            # floating range. Note that this method only works if the ``netaddr``
-            # python library is installed. If it is not installed, an error
-            # will be printed and the first IP from the interface will be used.
-            # If that is not correct set ``HOST_IP`` in ``localrc`` to the correct
-            # address.
-            if ! (address_in_net $IP $fixed_range || address_in_net $IP $floating_range); then
-                host_ip=$IP
-                break;
-            fi
-        done
-    fi
-    echo $host_ip
-}
-
-
-function _get_package_dir() {
-    local pkg_dir
-    if is_ubuntu; then
-        pkg_dir=$FILES/apts
-    elif is_fedora; then
-        pkg_dir=$FILES/rpms
-    elif is_suse; then
-        pkg_dir=$FILES/rpms-suse
-    else
-        exit_distro_not_supported "list of packages"
-    fi
-    echo "$pkg_dir"
-}
-
-
-# get_packages() collects a list of package names of any type from the
-# prerequisite files in ``files/{apts|rpms}``.  The list is intended
-# to be passed to a package installer such as apt or yum.
-#
-# Only packages required for the services in 1st argument will be
-# included.  Two bits of metadata are recognized in the prerequisite files:
-#
-# - ``# NOPRIME`` defers installation to be performed later in `stack.sh`
-# - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection
-#   of the package to the distros listed.  The distro names are case insensitive.
-function get_packages() {
-    local services=$@
-    local package_dir=$(_get_package_dir)
-    local file_to_parse
-    local service
-
-    if [[ -z "$package_dir" ]]; then
-        echo "No package directory supplied"
-        return 1
-    fi
-    if [[ -z "$DISTRO" ]]; then
-        GetDistro
-    fi
-    for service in ${services//,/ }; do
-        # Allow individual services to specify dependencies
-        if [[ -e ${package_dir}/${service} ]]; then
-            file_to_parse="${file_to_parse} $service"
-        fi
-        # NOTE(sdague) n-api needs glance for now because that's where
-        # glance client is
-        if [[ $service == n-api ]]; then
-            if [[ ! $file_to_parse =~ nova ]]; then
-                file_to_parse="${file_to_parse} nova"
-            fi
-            if [[ ! $file_to_parse =~ glance ]]; then
-                file_to_parse="${file_to_parse} glance"
-            fi
-        elif [[ $service == c-* ]]; then
-            if [[ ! $file_to_parse =~ cinder ]]; then
-                file_to_parse="${file_to_parse} cinder"
-            fi
-        elif [[ $service == ceilometer-* ]]; then
-            if [[ ! $file_to_parse =~ ceilometer ]]; then
-                file_to_parse="${file_to_parse} ceilometer"
-            fi
-        elif [[ $service == s-* ]]; then
-            if [[ ! $file_to_parse =~ swift ]]; then
-                file_to_parse="${file_to_parse} swift"
-            fi
-        elif [[ $service == n-* ]]; then
-            if [[ ! $file_to_parse =~ nova ]]; then
-                file_to_parse="${file_to_parse} nova"
-            fi
-        elif [[ $service == g-* ]]; then
-            if [[ ! $file_to_parse =~ glance ]]; then
-                file_to_parse="${file_to_parse} glance"
-            fi
-        elif [[ $service == key* ]]; then
-            if [[ ! $file_to_parse =~ keystone ]]; then
-                file_to_parse="${file_to_parse} keystone"
-            fi
-        elif [[ $service == q-* ]]; then
-            if [[ ! $file_to_parse =~ neutron ]]; then
-                file_to_parse="${file_to_parse} neutron"
-            fi
-        fi
-    done
-
-    for file in ${file_to_parse}; do
-        local fname=${package_dir}/${file}
-        local OIFS line package distros distro
-        [[ -e $fname ]] || continue
-
-        OIFS=$IFS
-        IFS=$'\n'
-        for line in $(<${fname}); do
-            if [[ $line =~ "NOPRIME" ]]; then
-                continue
-            fi
-
-            # Assume we want this package
-            package=${line%#*}
-            inst_pkg=1
-
-            # Look for # dist:xxx in comment
-            if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then
-                # We are using BASH regexp matching feature.
-                package=${BASH_REMATCH[1]}
-                distros=${BASH_REMATCH[2]}
-                # In bash ${VAR,,} will lowecase VAR
-                # Look for a match in the distro list
-                if [[ ! ${distros,,} =~ ${DISTRO,,} ]]; then
-                    # If no match then skip this package
-                    inst_pkg=0
-                fi
-            fi
-
-            # Look for # testonly in comment
-            if [[ $line =~ (.*)#.*testonly.* ]]; then
-                package=${BASH_REMATCH[1]}
-                # Are we installing test packages? (test for the default value)
-                if [[ $INSTALL_TESTONLY_PACKAGES = "False" ]]; then
-                    # If not installing test packages the skip this package
-                    inst_pkg=0
-                fi
-            fi
-
-            if [[ $inst_pkg = 1 ]]; then
-                echo $package
-            fi
-        done
-        IFS=$OIFS
-    done
-}
-
-
-# Determine OS Vendor, Release and Update
-# Tested with OS/X, Ubuntu, RedHat, CentOS, Fedora
-# Returns results in global variables:
-# os_VENDOR - vendor name
-# os_RELEASE - release
-# os_UPDATE - update
-# os_PACKAGE - package type
-# os_CODENAME - vendor's codename for release
-# GetOSVersion
-GetOSVersion() {
-    # Figure out which vendor we are
-    if [[ -x "`which sw_vers 2>/dev/null`" ]]; then
-        # OS/X
-        os_VENDOR=`sw_vers -productName`
-        os_RELEASE=`sw_vers -productVersion`
-        os_UPDATE=${os_RELEASE##*.}
-        os_RELEASE=${os_RELEASE%.*}
-        os_PACKAGE=""
-        if [[ "$os_RELEASE" =~ "10.7" ]]; then
-            os_CODENAME="lion"
-        elif [[ "$os_RELEASE" =~ "10.6" ]]; then
-            os_CODENAME="snow leopard"
-        elif [[ "$os_RELEASE" =~ "10.5" ]]; then
-            os_CODENAME="leopard"
-        elif [[ "$os_RELEASE" =~ "10.4" ]]; then
-            os_CODENAME="tiger"
-        elif [[ "$os_RELEASE" =~ "10.3" ]]; then
-            os_CODENAME="panther"
-        else
-            os_CODENAME=""
-        fi
-    elif [[ -x $(which lsb_release 2>/dev/null) ]]; then
-        os_VENDOR=$(lsb_release -i -s)
-        os_RELEASE=$(lsb_release -r -s)
-        os_UPDATE=""
-        os_PACKAGE="rpm"
-        if [[ "Debian,Ubuntu,LinuxMint" =~ $os_VENDOR ]]; then
-            os_PACKAGE="deb"
-        elif [[ "SUSE LINUX" =~ $os_VENDOR ]]; then
-            lsb_release -d -s | grep -q openSUSE
-            if [[ $? -eq 0 ]]; then
-                os_VENDOR="openSUSE"
-            fi
-        elif [[ $os_VENDOR == "openSUSE project" ]]; then
-            os_VENDOR="openSUSE"
-        elif [[ $os_VENDOR =~ Red.*Hat ]]; then
-            os_VENDOR="Red Hat"
-        fi
-        os_CODENAME=$(lsb_release -c -s)
-    elif [[ -r /etc/redhat-release ]]; then
-        # Red Hat Enterprise Linux Server release 5.5 (Tikanga)
-        # Red Hat Enterprise Linux Server release 7.0 Beta (Maipo)
-        # CentOS release 5.5 (Final)
-        # CentOS Linux release 6.0 (Final)
-        # Fedora release 16 (Verne)
-        # XenServer release 6.2.0-70446c (xenenterprise)
-        os_CODENAME=""
-        for r in "Red Hat" CentOS Fedora XenServer; do
-            os_VENDOR=$r
-            if [[ -n "`grep \"$r\" /etc/redhat-release`" ]]; then
-                ver=`sed -e 's/^.* \([0-9].*\) (\(.*\)).*$/\1\|\2/' /etc/redhat-release`
-                os_CODENAME=${ver#*|}
-                os_RELEASE=${ver%|*}
-                os_UPDATE=${os_RELEASE##*.}
-                os_RELEASE=${os_RELEASE%.*}
-                break
-            fi
-            os_VENDOR=""
-        done
-        os_PACKAGE="rpm"
-    elif [[ -r /etc/SuSE-release ]]; then
-        for r in openSUSE "SUSE Linux"; do
-            if [[ "$r" = "SUSE Linux" ]]; then
-                os_VENDOR="SUSE LINUX"
-            else
-                os_VENDOR=$r
-            fi
-
-            if [[ -n "`grep \"$r\" /etc/SuSE-release`" ]]; then
-                os_CODENAME=`grep "CODENAME = " /etc/SuSE-release | sed 's:.* = ::g'`
-                os_RELEASE=`grep "VERSION = " /etc/SuSE-release | sed 's:.* = ::g'`
-                os_UPDATE=`grep "PATCHLEVEL = " /etc/SuSE-release | sed 's:.* = ::g'`
-                break
-            fi
-            os_VENDOR=""
-        done
-        os_PACKAGE="rpm"
-    # If lsb_release is not installed, we should be able to detect Debian OS
-    elif [[ -f /etc/debian_version ]] && [[ $(cat /proc/version) =~ "Debian" ]]; then
-        os_VENDOR="Debian"
-        os_PACKAGE="deb"
-        os_CODENAME=$(awk '/VERSION=/' /etc/os-release | sed 's/VERSION=//' | sed -r 's/\"|\(|\)//g' | awk '{print $2}')
-        os_RELEASE=$(awk '/VERSION_ID=/' /etc/os-release | sed 's/VERSION_ID=//' | sed 's/\"//g')
-    fi
-    export os_VENDOR os_RELEASE os_UPDATE os_PACKAGE os_CODENAME
-}
-
-
-# Translate the OS version values into common nomenclature
-# Sets ``DISTRO`` from the ``os_*`` values
-function GetDistro() {
-    GetOSVersion
-    if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) ]]; then
-        # 'Everyone' refers to Ubuntu / Debian releases by the code name adjective
-        DISTRO=$os_CODENAME
-    elif [[ "$os_VENDOR" =~ (Fedora) ]]; then
-        # For Fedora, just use 'f' and the release
-        DISTRO="f$os_RELEASE"
-    elif [[ "$os_VENDOR" =~ (openSUSE) ]]; then
-        DISTRO="opensuse-$os_RELEASE"
-    elif [[ "$os_VENDOR" =~ (SUSE LINUX) ]]; then
-        # For SLE, also use the service pack
-        if [[ -z "$os_UPDATE" ]]; then
-            DISTRO="sle${os_RELEASE}"
-        else
-            DISTRO="sle${os_RELEASE}sp${os_UPDATE}"
-        fi
-    elif [[ "$os_VENDOR" =~ (Red Hat) || "$os_VENDOR" =~ (CentOS) ]]; then
-        # Drop the . release as we assume it's compatible
-        DISTRO="rhel${os_RELEASE::1}"
-    elif [[ "$os_VENDOR" =~ (XenServer) ]]; then
-        DISTRO="xs$os_RELEASE"
-    else
-        # Catch-all for now is Vendor + Release + Update
-        DISTRO="$os_VENDOR-$os_RELEASE.$os_UPDATE"
-    fi
-    export DISTRO
-}
-
-
-# Determine if current distribution is a Fedora-based distribution
-# (Fedora, RHEL, CentOS, etc).
-# is_fedora
-function is_fedora {
-    if [[ -z "$os_VENDOR" ]]; then
-        GetOSVersion
-    fi
-
-    [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || [ "$os_VENDOR" = "CentOS" ]
-}
-
-
-# Determine if current distribution is a SUSE-based distribution
-# (openSUSE, SLE).
-# is_suse
-function is_suse {
-    if [[ -z "$os_VENDOR" ]]; then
-        GetOSVersion
-    fi
-
-    [ "$os_VENDOR" = "openSUSE" ] || [ "$os_VENDOR" = "SUSE LINUX" ]
-}
-
-
-# Determine if current distribution is an Ubuntu-based distribution
-# It will also detect non-Ubuntu but Debian-based distros
-# is_ubuntu
-function is_ubuntu {
-    if [[ -z "$os_PACKAGE" ]]; then
-        GetOSVersion
-    fi
-    [ "$os_PACKAGE" = "deb" ]
-}
-
-
-# Exit after outputting a message about the distribution not being supported.
-# exit_distro_not_supported [optional-string-telling-what-is-missing]
-function exit_distro_not_supported {
-    if [[ -z "$DISTRO" ]]; then
-        GetDistro
-    fi
-
-    if [ $# -gt 0 ]; then
-        die $LINENO "Support for $DISTRO is incomplete: no support for $@"
-    else
-        die $LINENO "Support for $DISTRO is incomplete."
-    fi
-}
-
-# Utility function for checking machine architecture
-# is_arch arch-type
-function is_arch {
-    ARCH_TYPE=$1
-
-    [[ "$(uname -m)" == "$ARCH_TYPE" ]]
-}
-
 # Checks if installed Apache is <= given version
 # $1 = x.y.z (version string of Apache)
 function check_apache_version {
@@ -570,478 +29,6 @@
     expr "$version" '>=' $1 > /dev/null
 }
 
-# git clone only if directory doesn't exist already.  Since ``DEST`` might not
-# be owned by the installation user, we create the directory and change the
-# ownership to the proper user.
-# Set global RECLONE=yes to simulate a clone when dest-dir exists
-# Set global ERROR_ON_CLONE=True to abort execution with an error if the git repo
-# does not exist (default is False, meaning the repo will be cloned).
-# Uses global ``OFFLINE``
-# git_clone remote dest-dir branch
-function git_clone {
-    GIT_REMOTE=$1
-    GIT_DEST=$2
-    GIT_REF=$3
-    RECLONE=$(trueorfalse False $RECLONE)
-
-    if [[ "$OFFLINE" = "True" ]]; then
-        echo "Running in offline mode, clones already exist"
-        # print out the results so we know what change was used in the logs
-        cd $GIT_DEST
-        git show --oneline | head -1
-        return
-    fi
-
-    if echo $GIT_REF | egrep -q "^refs"; then
-        # If our branch name is a gerrit style refs/changes/...
-        if [[ ! -d $GIT_DEST ]]; then
-            [[ "$ERROR_ON_CLONE" = "True" ]] && \
-                die $LINENO "Cloning not allowed in this configuration"
-            git clone $GIT_REMOTE $GIT_DEST
-        fi
-        cd $GIT_DEST
-        git fetch $GIT_REMOTE $GIT_REF && git checkout FETCH_HEAD
-    else
-        # do a full clone only if the directory doesn't exist
-        if [[ ! -d $GIT_DEST ]]; then
-            [[ "$ERROR_ON_CLONE" = "True" ]] && \
-                die $LINENO "Cloning not allowed in this configuration"
-            git clone $GIT_REMOTE $GIT_DEST
-            cd $GIT_DEST
-            # This checkout syntax works for both branches and tags
-            git checkout $GIT_REF
-        elif [[ "$RECLONE" = "True" ]]; then
-            # if it does exist then simulate what clone does if asked to RECLONE
-            cd $GIT_DEST
-            # set the url to pull from and fetch
-            git remote set-url origin $GIT_REMOTE
-            git fetch origin
-            # remove the existing ignored files (like pyc) as they cause breakage
-            # (due to the py files having older timestamps than our pyc, so python
-            # thinks the pyc files are correct using them)
-            find $GIT_DEST -name '*.pyc' -delete
-
-            # handle GIT_REF accordingly to type (tag, branch)
-            if [[ -n "`git show-ref refs/tags/$GIT_REF`" ]]; then
-                git_update_tag $GIT_REF
-            elif [[ -n "`git show-ref refs/heads/$GIT_REF`" ]]; then
-                git_update_branch $GIT_REF
-            elif [[ -n "`git show-ref refs/remotes/origin/$GIT_REF`" ]]; then
-                git_update_remote_branch $GIT_REF
-            else
-                die $LINENO "$GIT_REF is neither branch nor tag"
-            fi
-
-        fi
-    fi
-
-    # print out the results so we know what change was used in the logs
-    cd $GIT_DEST
-    git show --oneline | head -1
-}
-
-
-# git update using reference as a branch.
-# git_update_branch ref
-function git_update_branch() {
-
-    GIT_BRANCH=$1
-
-    git checkout -f origin/$GIT_BRANCH
-    # a local branch might not exist
-    git branch -D $GIT_BRANCH || true
-    git checkout -b $GIT_BRANCH
-}
-
-
-# git update using reference as a branch.
-# git_update_remote_branch ref
-function git_update_remote_branch() {
-
-    GIT_BRANCH=$1
-
-    git checkout -b $GIT_BRANCH -t origin/$GIT_BRANCH
-}
-
-
-# git update using reference as a tag. Be careful editing source at that repo
-# as working copy will be in a detached mode
-# git_update_tag ref
-function git_update_tag() {
-
-    GIT_TAG=$1
-
-    git tag -d $GIT_TAG
-    # fetching given tag only
-    git fetch origin tag $GIT_TAG
-    git checkout -f $GIT_TAG
-}
-
-
-# Comment an option in an INI file
-# inicomment config-file section option
-function inicomment() {
-    local file=$1
-    local section=$2
-    local option=$3
-    sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file"
-}
-
-
-# Uncomment an option in an INI file
-# iniuncomment config-file section option
-function iniuncomment() {
-    local file=$1
-    local section=$2
-    local option=$3
-    sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" "$file"
-}
-
-
-# Get an option from an INI file
-# iniget config-file section option
-function iniget() {
-    local file=$1
-    local section=$2
-    local option=$3
-    local line
-    line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file")
-    echo ${line#*=}
-}
-
-
-# Determinate is the given option present in the INI file
-# ini_has_option config-file section option
-function ini_has_option() {
-    local file=$1
-    local section=$2
-    local option=$3
-    local line
-    line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file")
-    [ -n "$line" ]
-}
-
-
-# Set an option in an INI file
-# iniset config-file section option value
-function iniset() {
-    local file=$1
-    local section=$2
-    local option=$3
-    local value=$4
-
-    [[ -z $section || -z $option ]] && return
-
-    if ! grep -q "^\[$section\]" "$file" 2>/dev/null; then
-        # Add section at the end
-        echo -e "\n[$section]" >>"$file"
-    fi
-    if ! ini_has_option "$file" "$section" "$option"; then
-        # Add it
-        sed -i -e "/^\[$section\]/ a\\
-$option = $value
-" "$file"
-    else
-        local sep=$(echo -ne "\x01")
-        # Replace it
-        sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file"
-    fi
-}
-
-
-# Get a multiple line option from an INI file
-# iniget_multiline config-file section option
-function iniget_multiline() {
-    local file=$1
-    local section=$2
-    local option=$3
-    local values
-    values=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { s/^$option[ \t]*=[ \t]*//gp; }" "$file")
-    echo ${values}
-}
-
-
-# Set a multiple line option in an INI file
-# iniset_multiline config-file section option value1 value2 valu3 ...
-function iniset_multiline() {
-    local file=$1
-    local section=$2
-    local option=$3
-    shift 3
-    local values
-    for v in $@; do
-        # The later sed command inserts each new value in the line next to
-        # the section identifier, which causes the values to be inserted in
-        # the reverse order. Do a reverse here to keep the original order.
-        values="$v ${values}"
-    done
-    if ! grep -q "^\[$section\]" "$file"; then
-        # Add section at the end
-        echo -e "\n[$section]" >>"$file"
-    else
-        # Remove old values
-        sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file"
-    fi
-    # Add new ones
-    for v in $values; do
-        sed -i -e "/^\[$section\]/ a\\
-$option = $v
-" "$file"
-    done
-}
-
-
-# Append a new option in an ini file without replacing the old value
-# iniadd config-file section option value1 value2 value3 ...
-function iniadd() {
-    local file=$1
-    local section=$2
-    local option=$3
-    shift 3
-    local values="$(iniget_multiline $file $section $option) $@"
-    iniset_multiline $file $section $option $values
-}
-
-# Find out if a process exists by partial name.
-# is_running name
-function is_running() {
-    local name=$1
-    ps auxw | grep -v grep | grep ${name} > /dev/null
-    RC=$?
-    # some times I really hate bash reverse binary logic
-    return $RC
-}
-
-
-# is_service_enabled() checks if the service(s) specified as arguments are
-# enabled by the user in ``ENABLED_SERVICES``.
-#
-# Multiple services specified as arguments are ``OR``'ed together; the test
-# is a short-circuit boolean, i.e it returns on the first match.
-#
-# There are special cases for some 'catch-all' services::
-#   **nova** returns true if any service enabled start with **n-**
-#   **cinder** returns true if any service enabled start with **c-**
-#   **ceilometer** returns true if any service enabled start with **ceilometer**
-#   **glance** returns true if any service enabled start with **g-**
-#   **neutron** returns true if any service enabled start with **q-**
-#   **swift** returns true if any service enabled start with **s-**
-#   **trove** returns true if any service enabled start with **tr-**
-#   For backward compatibility if we have **swift** in ENABLED_SERVICES all the
-#   **s-** services will be enabled. This will be deprecated in the future.
-#
-# Cells within nova is enabled if **n-cell** is in ``ENABLED_SERVICES``.
-# We also need to make sure to treat **n-cell-region** and **n-cell-child**
-# as enabled in this case.
-#
-# Uses global ``ENABLED_SERVICES``
-# is_service_enabled service [service ...]
-function is_service_enabled() {
-    services=$@
-    for service in ${services}; do
-        [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && return 0
-        [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && return 0
-        [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && return 0
-        [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && return 0
-        [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && return 0
-        [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0
-        [[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && return 0
-        [[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0
-        [[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && return 0
-        [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && return 0
-        [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && return 0
-    done
-    return 1
-}
-
-
-# remove extra commas from the input string (i.e. ``ENABLED_SERVICES``)
-# _cleanup_service_list service-list
-function _cleanup_service_list () {
-    echo "$1" | sed -e '
-        s/,,/,/g;
-        s/^,//;
-        s/,$//
-    '
-}
-
-
-# enable_service() adds the services passed as argument to the
-# ``ENABLED_SERVICES`` list, if they are not already present.
-#
-# For example:
-#   enable_service qpid
-#
-# This function does not know about the special cases
-# for nova, glance, and neutron built into is_service_enabled().
-# Uses global ``ENABLED_SERVICES``
-# enable_service service [service ...]
-function enable_service() {
-    local tmpsvcs="${ENABLED_SERVICES}"
-    for service in $@; do
-        if ! is_service_enabled $service; then
-            tmpsvcs+=",$service"
-        fi
-    done
-    ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs")
-    disable_negated_services
-}
-
-
-# disable_service() removes the services passed as argument to the
-# ``ENABLED_SERVICES`` list, if they are present.
-#
-# For example:
-#   disable_service rabbit
-#
-# This function does not know about the special cases
-# for nova, glance, and neutron built into is_service_enabled().
-# Uses global ``ENABLED_SERVICES``
-# disable_service service [service ...]
-function disable_service() {
-    local tmpsvcs=",${ENABLED_SERVICES},"
-    local service
-    for service in $@; do
-        if is_service_enabled $service; then
-            tmpsvcs=${tmpsvcs//,$service,/,}
-        fi
-    done
-    ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs")
-}
-
-
-# disable_all_services() removes all current services
-# from ``ENABLED_SERVICES`` to reset the configuration
-# before a minimal installation
-# Uses global ``ENABLED_SERVICES``
-# disable_all_services
-function disable_all_services() {
-    ENABLED_SERVICES=""
-}
-
-
-# Remove all services starting with '-'.  For example, to install all default
-# services except rabbit (rabbit) set in ``localrc``:
-# ENABLED_SERVICES+=",-rabbit"
-# Uses global ``ENABLED_SERVICES``
-# disable_negated_services
-function disable_negated_services() {
-    local tmpsvcs="${ENABLED_SERVICES}"
-    local service
-    for service in ${tmpsvcs//,/ }; do
-        if [[ ${service} == -* ]]; then
-            tmpsvcs=$(echo ${tmpsvcs}|sed -r "s/(,)?(-)?${service#-}(,)?/,/g")
-        fi
-    done
-    ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs")
-}
-
-
-# Distro-agnostic package installer
-# install_package package [package ...]
-function install_package() {
-    if is_ubuntu; then
-        [[ "$NO_UPDATE_REPOS" = "True" ]] || apt_get update
-        NO_UPDATE_REPOS=True
-
-        apt_get install "$@"
-    elif is_fedora; then
-        yum_install "$@"
-    elif is_suse; then
-        zypper_install "$@"
-    else
-        exit_distro_not_supported "installing packages"
-    fi
-}
-
-
-# Distro-agnostic package uninstaller
-# uninstall_package package [package ...]
-function uninstall_package() {
-    if is_ubuntu; then
-        apt_get purge "$@"
-    elif is_fedora; then
-        sudo yum remove -y "$@"
-    elif is_suse; then
-        sudo zypper rm "$@"
-    else
-        exit_distro_not_supported "uninstalling packages"
-    fi
-}
-
-
-# Distro-agnostic function to tell if a package is installed
-# is_package_installed package [package ...]
-function is_package_installed() {
-    if [[ -z "$@" ]]; then
-        return 1
-    fi
-
-    if [[ -z "$os_PACKAGE" ]]; then
-        GetOSVersion
-    fi
-
-    if [[ "$os_PACKAGE" = "deb" ]]; then
-        dpkg -s "$@" > /dev/null 2> /dev/null
-    elif [[ "$os_PACKAGE" = "rpm" ]]; then
-        rpm --quiet -q "$@"
-    else
-        exit_distro_not_supported "finding if a package is installed"
-    fi
-}
-
-
-# Test if the named environment variable is set and not zero length
-# is_set env-var
-function is_set() {
-    local var=\$"$1"
-    eval "[ -n \"$var\" ]" # For ex.: sh -c "[ -n \"$var\" ]" would be better, but several exercises depends on this
-}
-
-
-# Wrapper for ``pip install`` to set cache and proxy environment variables
-# Uses globals ``OFFLINE``, ``PIP_DOWNLOAD_CACHE``, ``PIP_USE_MIRRORS``,
-# ``TRACK_DEPENDS``, ``*_proxy``
-# pip_install package [package ...]
-function pip_install {
-    [[ "$OFFLINE" = "True" || -z "$@" ]] && return
-    if [[ -z "$os_PACKAGE" ]]; then
-        GetOSVersion
-    fi
-    if [[ $TRACK_DEPENDS = True ]]; then
-        source $DEST/.venv/bin/activate
-        CMD_PIP=$DEST/.venv/bin/pip
-        SUDO_PIP="env"
-    else
-        SUDO_PIP="sudo"
-        CMD_PIP=$(get_pip_command)
-    fi
-
-    # Mirror option not needed anymore because pypi has CDN available,
-    # but it's useful in certain circumstances
-    PIP_USE_MIRRORS=${PIP_USE_MIRRORS:-False}
-    if [[ "$PIP_USE_MIRRORS" != "False" ]]; then
-        PIP_MIRROR_OPT="--use-mirrors"
-    fi
-
-    # pip < 1.4 has a bug where it will use an already existing build
-    # directory unconditionally.  Say an earlier component installs
-    # foo v1.1; pip will have built foo's source in
-    # /tmp/$USER-pip-build.  Even if a later component specifies foo <
-    # 1.1, the existing extracted build will be used and cause
-    # confusing errors.  By creating unique build directories we avoid
-    # this problem. See https://github.com/pypa/pip/issues/709
-    local pip_build_tmp=$(mktemp --tmpdir -d pip-build.XXXXX)
-
-    $SUDO_PIP PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \
-        HTTP_PROXY=$http_proxy \
-        HTTPS_PROXY=$https_proxy \
-        NO_PROXY=$no_proxy \
-        $CMD_PIP install --build=${pip_build_tmp} \
-        $PIP_MIRROR_OPT $@ \
-        && $SUDO_PIP rm -rf ${pip_build_tmp}
-}
-
 
 # Cleanup anything from /tmp on unstack
 # clean_tmp
@@ -1052,333 +39,6 @@
     sudo rm -rf ${tmp_dir}/pip-build.*
 }
 
-# Service wrapper to restart services
-# restart_service service-name
-function restart_service() {
-    if is_ubuntu; then
-        sudo /usr/sbin/service $1 restart
-    else
-        sudo /sbin/service $1 restart
-    fi
-}
-
-
-# _run_process() is designed to be backgrounded by run_process() to simulate a
-# fork.  It includes the dirty work of closing extra filehandles and preparing log
-# files to produce the same logs as screen_it().  The log filename is derived
-# from the service name and global-and-now-misnamed SCREEN_LOGDIR
-# _run_process service "command-line"
-function _run_process() {
-    local service=$1
-    local command="$2"
-
-    # Undo logging redirections and close the extra descriptors
-    exec 1>&3
-    exec 2>&3
-    exec 3>&-
-    exec 6>&-
-
-    if [[ -n ${SCREEN_LOGDIR} ]]; then
-        exec 1>&${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log 2>&1
-        ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log
-
-        # TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs.
-        export PYTHONUNBUFFERED=1
-    fi
-
-    exec /bin/bash -c "$command"
-    die "$service exec failure: $command"
-}
-
-
-# run_process() launches a child process that closes all file descriptors and
-# then exec's the passed in command.  This is meant to duplicate the semantics
-# of screen_it() without screen.  PIDs are written to
-# $SERVICE_DIR/$SCREEN_NAME/$service.pid
-# run_process service "command-line"
-function run_process() {
-    local service=$1
-    local command="$2"
-
-    # Spawn the child process
-    _run_process "$service" "$command" &
-    echo $!
-}
-
-
-# Helper to launch a service in a named screen
-# screen_it service "command-line"
-function screen_it {
-    SCREEN_NAME=${SCREEN_NAME:-stack}
-    SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
-    USE_SCREEN=$(trueorfalse True $USE_SCREEN)
-
-    if is_service_enabled $1; then
-        # Append the service to the screen rc file
-        screen_rc "$1" "$2"
-
-        if [[ "$USE_SCREEN" = "True" ]]; then
-            screen -S $SCREEN_NAME -X screen -t $1
-
-            if [[ -n ${SCREEN_LOGDIR} ]]; then
-                screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log
-                screen -S $SCREEN_NAME -p $1 -X log on
-                ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log
-            fi
-
-            # sleep to allow bash to be ready to be send the command - we are
-            # creating a new window in screen and then sends characters, so if
-            # bash isn't running by the time we send the command, nothing happens
-            sleep 1.5
-
-            NL=`echo -ne '\015'`
-            # This fun command does the following:
-            # - the passed server command is backgrounded
-            # - the pid of the background process is saved in the usual place
-            # - the server process is brought back to the foreground
-            # - if the server process exits prematurely the fg command errors
-            #   and a message is written to stdout and the service failure file
-            # The pid saved can be used in screen_stop() as a process group
-            # id to kill off all child processes
-            screen -S $SCREEN_NAME -p $1 -X stuff "$2 & echo \$! >$SERVICE_DIR/$SCREEN_NAME/$1.pid; fg || echo \"$1 failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL"
-        else
-            # Spawn directly without screen
-            run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$1.pid
-        fi
-    fi
-}
-
-
-# Stop a service in screen
-# If a PID is available use it, kill the whole process group via TERM
-# If screen is being used kill the screen window; this will catch processes
-# that did not leave a PID behind
-# screen_stop service
-function screen_stop() {
-    SCREEN_NAME=${SCREEN_NAME:-stack}
-    SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
-    USE_SCREEN=$(trueorfalse True $USE_SCREEN)
-
-    if is_service_enabled $1; then
-        # Kill via pid if we have one available
-        if [[ -r $SERVICE_DIR/$SCREEN_NAME/$1.pid ]]; then
-            pkill -TERM -P -$(cat $SERVICE_DIR/$SCREEN_NAME/$1.pid)
-            rm $SERVICE_DIR/$SCREEN_NAME/$1.pid
-        fi
-        if [[ "$USE_SCREEN" = "True" ]]; then
-            # Clean up the screen window
-            screen -S $SCREEN_NAME -p $1 -X kill
-        fi
-    fi
-}
-
-
-# Screen rc file builder
-# screen_rc service "command-line"
-function screen_rc {
-    SCREEN_NAME=${SCREEN_NAME:-stack}
-    SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc
-    if [[ ! -e $SCREENRC ]]; then
-        # Name the screen session
-        echo "sessionname $SCREEN_NAME" > $SCREENRC
-        # Set a reasonable statusbar
-        echo "hardstatus alwayslastline '$SCREEN_HARDSTATUS'" >> $SCREENRC
-        # Some distributions override PROMPT_COMMAND for the screen terminal type - turn that off
-        echo "setenv PROMPT_COMMAND /bin/true" >> $SCREENRC
-        echo "screen -t shell bash" >> $SCREENRC
-    fi
-    # If this service doesn't already exist in the screenrc file
-    if ! grep $1 $SCREENRC 2>&1 > /dev/null; then
-        NL=`echo -ne '\015'`
-        echo "screen -t $1 bash" >> $SCREENRC
-        echo "stuff \"$2$NL\"" >> $SCREENRC
-
-        if [[ -n ${SCREEN_LOGDIR} ]]; then
-            echo "logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log" >>$SCREENRC
-            echo "log on" >>$SCREENRC
-        fi
-    fi
-}
-
-
-# Helper to remove the ``*.failure`` files under ``$SERVICE_DIR/$SCREEN_NAME``.
-# This is used for ``service_check`` when all the ``screen_it`` are called finished
-# init_service_check
-function init_service_check() {
-    SCREEN_NAME=${SCREEN_NAME:-stack}
-    SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
-
-    if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then
-        mkdir -p "$SERVICE_DIR/$SCREEN_NAME"
-    fi
-
-    rm -f "$SERVICE_DIR/$SCREEN_NAME"/*.failure
-}
-
-
-# Helper to get the status of each running service
-# service_check
-function service_check() {
-    local service
-    local failures
-    SCREEN_NAME=${SCREEN_NAME:-stack}
-    SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
-
-
-    if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then
-        echo "No service status directory found"
-        return
-    fi
-
-    # Check if there is any falure flag file under $SERVICE_DIR/$SCREEN_NAME
-    failures=`ls "$SERVICE_DIR/$SCREEN_NAME"/*.failure 2>/dev/null`
-
-    for service in $failures; do
-        service=`basename $service`
-        service=${service%.failure}
-        echo "Error: Service $service is not running"
-    done
-
-    if [ -n "$failures" ]; then
-        echo "More details about the above errors can be found with screen, with ./rejoin-stack.sh"
-    fi
-}
-
-# Returns true if the directory is on a filesystem mounted via NFS.
-function is_nfs_directory() {
-    local mount_type=`stat -f -L -c %T $1`
-    test "$mount_type" == "nfs"
-}
-
-# Only run the command if the target file (the last arg) is not on an
-# NFS filesystem.
-function _safe_permission_operation() {
-    local args=( $@ )
-    local last
-    local sudo_cmd
-    local dir_to_check
-
-    let last="${#args[*]} - 1"
-
-    dir_to_check=${args[$last]}
-    if [ ! -d "$dir_to_check" ]; then
-        dir_to_check=`dirname "$dir_to_check"`
-    fi
-
-    if is_nfs_directory "$dir_to_check" ; then
-        return 0
-    fi
-
-    if [[ $TRACK_DEPENDS = True ]]; then
-        sudo_cmd="env"
-    else
-        sudo_cmd="sudo"
-    fi
-
-    $sudo_cmd $@
-}
-
-# Only change ownership of a file or directory if it is not on an NFS
-# filesystem.
-function safe_chown() {
-    _safe_permission_operation chown $@
-}
-
-# Only change permissions of a file or directory if it is not on an
-# NFS filesystem.
-function safe_chmod() {
-    _safe_permission_operation chmod $@
-}
-
-# ``pip install -e`` the package, which processes the dependencies
-# using pip before running `setup.py develop`
-#
-# Updates the dependencies in project_dir from the
-# openstack/requirements global list before installing anything.
-#
-# Uses globals ``TRACK_DEPENDS``, ``REQUIREMENTS_DIR``
-# setup_develop directory
-function setup_develop() {
-    local project_dir=$1
-
-    echo "cd $REQUIREMENTS_DIR; $SUDO_CMD python update.py $project_dir"
-
-    # Don't update repo if local changes exist
-    # Don't use buggy "git diff --quiet"
-    (cd $project_dir && git diff --exit-code >/dev/null)
-    local update_requirements=$?
-
-    if [ $update_requirements -eq 0 ]; then
-        (cd $REQUIREMENTS_DIR; \
-            $SUDO_CMD python update.py $project_dir)
-    fi
-
-    setup_develop_no_requirements_update $project_dir
-
-    # We've just gone and possibly modified the user's source tree in an
-    # automated way, which is considered bad form if it's a development
-    # tree because we've screwed up their next git checkin. So undo it.
-    #
-    # However... there are some circumstances, like running in the gate
-    # where we really really want the overridden version to stick. So provide
-    # a variable that tells us whether or not we should UNDO the requirements
-    # changes (this will be set to False in the OpenStack ci gate)
-    if [ $UNDO_REQUIREMENTS = "True" ]; then
-        if [ $update_requirements -eq 0 ]; then
-            (cd $project_dir && git reset --hard)
-        fi
-    fi
-}
-
-# ``pip install -e`` the package, which processes the dependencies
-# using pip before running `setup.py develop`
-# Uses globals ``STACK_USER``
-# setup_develop_no_requirements_update directory
-function setup_develop_no_requirements_update() {
-    local project_dir=$1
-
-    pip_install -e $project_dir
-    # ensure that further actions can do things like setup.py sdist
-    safe_chown -R $STACK_USER $1/*.egg-info
-}
-
-
-# Service wrapper to start services
-# start_service service-name
-function start_service() {
-    if is_ubuntu; then
-        sudo /usr/sbin/service $1 start
-    else
-        sudo /sbin/service $1 start
-    fi
-}
-
-
-# Service wrapper to stop services
-# stop_service service-name
-function stop_service() {
-    if is_ubuntu; then
-        sudo /usr/sbin/service $1 stop
-    else
-        sudo /sbin/service $1 stop
-    fi
-}
-
-
-# Normalize config values to True or False
-# Accepts as False: 0 no No NO false False FALSE
-# Accepts as True: 1 yes Yes YES true True TRUE
-# VAR=$(trueorfalse default-value test-value)
-function trueorfalse() {
-    local default=$1
-    local testval=$2
-
-    [[ -z "$testval" ]] && { echo "$default"; return; }
-    [[ "0 no No NO false False FALSE" =~ "$testval" ]] && { echo "False"; return; }
-    [[ "1 yes Yes YES true True TRUE" =~ "$testval" ]] && { echo "True"; return; }
-    echo "$default"
-}
-
 
 # Retrieve an image from a URL and upload into Glance.
 # Uses the following variables:
@@ -1387,7 +47,7 @@
 # - ``GLANCE_HOSTPORT``
 #
 # upload_image image-url glance-token
-function upload_image() {
+function upload_image {
     local image_url=$1
     local token=$2
 
@@ -1675,60 +335,19 @@
 }
 
 
-# Toggle enable/disable_service for services that must run exclusive of each other
-#  $1 The name of a variable containing a space-separated list of services
-#  $2 The name of a variable in which to store the enabled service's name
-#  $3 The name of the service to enable
-function use_exclusive_service {
-    local options=${!1}
-    local selection=$3
-    out=$2
-    [ -z $selection ] || [[ ! "$options" =~ "$selection" ]] && return 1
-    for opt in $options;do
-        [[ "$opt" = "$selection" ]] && enable_service $opt || disable_service $opt
-    done
-    eval "$out=$selection"
-    return 0
-}
-
-
 # Wait for an HTTP server to start answering requests
 # wait_for_service timeout url
-function wait_for_service() {
+function wait_for_service {
     local timeout=$1
     local url=$2
     timeout $timeout sh -c "while ! curl --noproxy '*' -s $url >/dev/null; do sleep 1; done"
 }
 
 
-# Wrapper for ``yum`` to set proxy environment variables
-# Uses globals ``OFFLINE``, ``*_proxy``
-# yum_install package [package ...]
-function yum_install() {
-    [[ "$OFFLINE" = "True" ]] && return
-    local sudo="sudo"
-    [[ "$(id -u)" = "0" ]] && sudo="env"
-    $sudo http_proxy=$http_proxy https_proxy=$https_proxy \
-        no_proxy=$no_proxy \
-        yum install -y "$@"
-}
-
-
-# zypper wrapper to set arguments correctly
-# zypper_install package [package ...]
-function zypper_install() {
-    [[ "$OFFLINE" = "True" ]] && return
-    local sudo="sudo"
-    [[ "$(id -u)" = "0" ]] && sudo="env"
-    $sudo http_proxy=$http_proxy https_proxy=$https_proxy \
-        zypper --non-interactive install --auto-agree-with-licenses "$@"
-}
-
-
 # ping check
 # Uses globals ``ENABLED_SERVICES``
 # ping_check from-net ip boot-timeout expected
-function ping_check() {
+function ping_check {
     if is_service_enabled neutron; then
         _ping_check_neutron  "$1" $2 $3 $4
         return
@@ -1738,7 +357,7 @@
 
 # ping check for nova
 # Uses globals ``MULTI_HOST``, ``PRIVATE_NETWORK``
-function _ping_check_novanet() {
+function _ping_check_novanet {
     local from_net=$1
     local ip=$2
     local boot_timeout=$3
@@ -1763,7 +382,7 @@
 }
 
 # Get ip of instance
-function get_instance_ip(){
+function get_instance_ip {
     local vm_id=$1
     local network_name=$2
     local nova_result="$(nova show $vm_id)"
@@ -1778,7 +397,7 @@
 # ssh check
 
 # ssh_check net-name key-file floating-ip default-user active-timeout
-function ssh_check() {
+function ssh_check {
     if is_service_enabled neutron; then
         _ssh_check_neutron  "$1" $2 $3 $4 $5
         return
@@ -1786,7 +405,7 @@
     _ssh_check_novanet "$1" $2 $3 $4 $5
 }
 
-function _ssh_check_novanet() {
+function _ssh_check_novanet {
     local NET_NAME=$1
     local KEY_FILE=$2
     local FLOATING_IP=$3
@@ -1799,60 +418,19 @@
 }
 
 
-# Add a user to a group.
-# add_user_to_group user group
-function add_user_to_group() {
-    local user=$1
-    local group=$2
-
-    if [[ -z "$os_VENDOR" ]]; then
-        GetOSVersion
-    fi
-
-    # SLE11 and openSUSE 12.2 don't have the usual usermod
-    if ! is_suse || [[ "$os_VENDOR" = "openSUSE" && "$os_RELEASE" != "12.2" ]]; then
-        sudo usermod -a -G "$group" "$user"
-    else
-        sudo usermod -A "$group" "$user"
-    fi
-}
-
-
-# Get the path to the direcotry where python executables are installed.
-# get_python_exec_prefix
-function get_python_exec_prefix() {
-    if is_fedora || is_suse; then
-        echo "/usr/bin"
-    else
-        echo "/usr/local/bin"
-    fi
-}
-
-
 # Get the location of the $module-rootwrap executables, where module is cinder
 # or nova.
 # get_rootwrap_location module
-function get_rootwrap_location() {
+function get_rootwrap_location {
     local module=$1
 
     echo "$(get_python_exec_prefix)/$module-rootwrap"
 }
 
 
-# Get the path to the pip command.
-# get_pip_command
-function get_pip_command() {
-    which pip || which pip-python
-
-    if [ $? -ne 0 ]; then
-        die $LINENO "Unable to find pip; cannot continue"
-    fi
-}
-
-
 # Path permissions sanity check
 # check_path_perm_sanity path
-function check_path_perm_sanity() {
+function check_path_perm_sanity {
     # Ensure no element of the path has 0700 permissions, which is very
     # likely to cause issues for daemons.  Inspired by default 0700
     # homedir permissions on RHEL and common practice of making DEST in
@@ -1923,7 +501,7 @@
 # The above will return "0", as the versions are equal.
 #
 # vercmp_numbers ver1 ver2
-vercmp_numbers() {
+function vercmp_numbers {
     typeset v1=$1 v2=$2 sep
     typeset -a ver1 ver2
 
@@ -1934,37 +512,6 @@
 }
 
 
-# ``policy_add policy_file policy_name policy_permissions``
-#
-# Add a policy to a policy.json file
-# Do nothing if the policy already exists
-
-function policy_add() {
-    local policy_file=$1
-    local policy_name=$2
-    local policy_perm=$3
-
-    if grep -q ${policy_name} ${policy_file}; then
-        echo "Policy ${policy_name} already exists in ${policy_file}"
-        return
-    fi
-
-    # Add a terminating comma to policy lines without one
-    # Remove the closing '}' and all lines following to the end-of-file
-    local tmpfile=$(mktemp)
-    uniq ${policy_file} | sed -e '
-        s/]$/],/
-        /^[}]/,$d
-    ' > ${tmpfile}
-
-    # Append policy and closing brace
-    echo "    \"${policy_name}\": ${policy_perm}" >>${tmpfile}
-    echo "}" >>${tmpfile}
-
-    mv ${tmpfile} ${policy_file}
-}
-
-
 # This function sets log formatting options for colorizing log
 # output to stdout. It is meant to be called by lib modules.
 # The last two parameters are optional and can be used to specify
@@ -1972,7 +519,7 @@
 # Defaults are respectively 'project_name' and 'user_name'
 #
 # setup_colorized_logging something.conf SOMESECTION
-function setup_colorized_logging() {
+function setup_colorized_logging {
     local conf_file=$1
     local conf_section=$2
     local project_var=${3:-"project_name"}
@@ -1984,10 +531,10 @@
     iniset $conf_file $conf_section logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s"
 }
 
+
 # Restore xtrace
 $XTRACE
 
-
 # Local variables:
 # mode: shell-script
 # End:
diff --git a/functions-common b/functions-common
new file mode 100644
index 0000000..0db3ff3
--- /dev/null
+++ b/functions-common
@@ -0,0 +1,1591 @@
+# functions-common - Common functions used by DevStack components
+#
+# The canonical copy of this file is maintained in the DevStack repo.
+# All modifications should be made there and then sync'ed to other repos
+# as required.
+#
+# This file is sorted alphabetically within the function groups.
+#
+# - Config Functions
+# - Control Functions
+# - Distro Functions
+# - Git Functions
+# - OpenStack Functions
+# - Package Functions
+# - Process Functions
+# - Python Functions
+# - Service Functions
+# - System Functions
+#
+# The following variables are assumed to be defined by certain functions:
+#
+# - ``ENABLED_SERVICES``
+# - ``ERROR_ON_CLONE``
+# - ``FILES``
+# - ``OFFLINE``
+# - ``PIP_DOWNLOAD_CACHE``
+# - ``PIP_USE_MIRRORS``
+# - ``RECLONE``
+# - ``REQUIREMENTS_DIR``
+# - ``STACK_USER``
+# - ``TRACK_DEPENDS``
+# - ``UNDO_REQUIREMENTS``
+# - ``http_proxy``, ``https_proxy``, ``no_proxy``
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Config Functions
+# ================
+
+# Append a new option in an ini file without replacing the old value
+# iniadd config-file section option value1 value2 value3 ...
+function iniadd {
+    local xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+    local file=$1
+    local section=$2
+    local option=$3
+    shift 3
+    local values="$(iniget_multiline $file $section $option) $@"
+    iniset_multiline $file $section $option $values
+    $xtrace
+}
+
+# Comment an option in an INI file
+# inicomment config-file section option
+function inicomment {
+    local xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+    local file=$1
+    local section=$2
+    local option=$3
+    sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file"
+    $xtrace
+}
+
+# Get an option from an INI file
+# iniget config-file section option
+function iniget {
+    local xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+    local file=$1
+    local section=$2
+    local option=$3
+    local line
+    line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file")
+    echo ${line#*=}
+    $xtrace
+}
+
+# Get a multiple line option from an INI file
+# iniget_multiline config-file section option
+function iniget_multiline {
+    local xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+    local file=$1
+    local section=$2
+    local option=$3
+    local values
+    values=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { s/^$option[ \t]*=[ \t]*//gp; }" "$file")
+    echo ${values}
+    $xtrace
+}
+
+# Determinate is the given option present in the INI file
+# ini_has_option config-file section option
+function ini_has_option {
+    local xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+    local file=$1
+    local section=$2
+    local option=$3
+    local line
+    line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file")
+    $xtrace
+    [ -n "$line" ]
+}
+
+# Set an option in an INI file
+# iniset config-file section option value
+function iniset {
+    local xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+    local file=$1
+    local section=$2
+    local option=$3
+    local value=$4
+
+    [[ -z $section || -z $option ]] && return
+
+    if ! grep -q "^\[$section\]" "$file" 2>/dev/null; then
+        # Add section at the end
+        echo -e "\n[$section]" >>"$file"
+    fi
+    if ! ini_has_option "$file" "$section" "$option"; then
+        # Add it
+        sed -i -e "/^\[$section\]/ a\\
+$option = $value
+" "$file"
+    else
+        local sep=$(echo -ne "\x01")
+        # Replace it
+        sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file"
+    fi
+    $xtrace
+}
+
+# Set a multiple line option in an INI file
+# iniset_multiline config-file section option value1 value2 valu3 ...
+function iniset_multiline {
+    local xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+    local file=$1
+    local section=$2
+    local option=$3
+    shift 3
+    local values
+    for v in $@; do
+        # The later sed command inserts each new value in the line next to
+        # the section identifier, which causes the values to be inserted in
+        # the reverse order. Do a reverse here to keep the original order.
+        values="$v ${values}"
+    done
+    if ! grep -q "^\[$section\]" "$file"; then
+        # Add section at the end
+        echo -e "\n[$section]" >>"$file"
+    else
+        # Remove old values
+        sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file"
+    fi
+    # Add new ones
+    for v in $values; do
+        sed -i -e "/^\[$section\]/ a\\
+$option = $v
+" "$file"
+    done
+    $xtrace
+}
+
+# Uncomment an option in an INI file
+# iniuncomment config-file section option
+function iniuncomment {
+    local xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+    local file=$1
+    local section=$2
+    local option=$3
+    sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" "$file"
+    $xtrace
+}
+
+# Normalize config values to True or False
+# Accepts as False: 0 no No NO false False FALSE
+# Accepts as True: 1 yes Yes YES true True TRUE
+# VAR=$(trueorfalse default-value test-value)
+function trueorfalse {
+    local xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+    local default=$1
+    local testval=$2
+
+    [[ -z "$testval" ]] && { echo "$default"; return; }
+    [[ "0 no No NO false False FALSE" =~ "$testval" ]] && { echo "False"; return; }
+    [[ "1 yes Yes YES true True TRUE" =~ "$testval" ]] && { echo "True"; return; }
+    echo "$default"
+    $xtrace
+}
+
+
+# Control Functions
+# =================
+
+# Prints backtrace info
+# filename:lineno:function
+# backtrace level
+function backtrace {
+    local level=$1
+    local deep=$((${#BASH_SOURCE[@]} - 1))
+    echo "[Call Trace]"
+    while [ $level -le $deep ]; do
+        echo "${BASH_SOURCE[$deep]}:${BASH_LINENO[$deep-1]}:${FUNCNAME[$deep-1]}"
+        deep=$((deep - 1))
+    done
+}
+
+# Prints line number and "message" then exits
+# die $LINENO "message"
+function die {
+    local exitcode=$?
+    set +o xtrace
+    local line=$1; shift
+    if [ $exitcode == 0 ]; then
+        exitcode=1
+    fi
+    backtrace 2
+    err $line "$*"
+    # Give buffers a second to flush
+    sleep 1
+    exit $exitcode
+}
+
+# Checks an environment variable is not set or has length 0 OR if the
+# exit code is non-zero and prints "message" and exits
+# NOTE: env-var is the variable name without a '$'
+# die_if_not_set $LINENO env-var "message"
+function die_if_not_set {
+    local exitcode=$?
+    FXTRACE=$(set +o | grep xtrace)
+    set +o xtrace
+    local line=$1; shift
+    local evar=$1; shift
+    if ! is_set $evar || [ $exitcode != 0 ]; then
+        die $line "$*"
+    fi
+    $FXTRACE
+}
+
+# Prints line number and "message" in error format
+# err $LINENO "message"
+function err {
+    local exitcode=$?
+    errXTRACE=$(set +o | grep xtrace)
+    set +o xtrace
+    local msg="[ERROR] ${BASH_SOURCE[2]}:$1 $2"
+    echo $msg 1>&2;
+    if [[ -n ${SCREEN_LOGDIR} ]]; then
+        echo $msg >> "${SCREEN_LOGDIR}/error.log"
+    fi
+    $errXTRACE
+    return $exitcode
+}
+
+# Checks an environment variable is not set or has length 0 OR if the
+# exit code is non-zero and prints "message"
+# NOTE: env-var is the variable name without a '$'
+# err_if_not_set $LINENO env-var "message"
+function err_if_not_set {
+    local exitcode=$?
+    errinsXTRACE=$(set +o | grep xtrace)
+    set +o xtrace
+    local line=$1; shift
+    local evar=$1; shift
+    if ! is_set $evar || [ $exitcode != 0 ]; then
+        err $line "$*"
+    fi
+    $errinsXTRACE
+    return $exitcode
+}
+
+# Exit after outputting a message about the distribution not being supported.
+# exit_distro_not_supported [optional-string-telling-what-is-missing]
+function exit_distro_not_supported {
+    if [[ -z "$DISTRO" ]]; then
+        GetDistro
+    fi
+
+    if [ $# -gt 0 ]; then
+        die $LINENO "Support for $DISTRO is incomplete: no support for $@"
+    else
+        die $LINENO "Support for $DISTRO is incomplete."
+    fi
+}
+
+# Test if the named environment variable is set and not zero length
+# is_set env-var
+function is_set {
+    local var=\$"$1"
+    eval "[ -n \"$var\" ]" # For ex.: sh -c "[ -n \"$var\" ]" would be better, but several exercises depends on this
+}
+
+# Prints line number and "message" in warning format
+# warn $LINENO "message"
+function warn {
+    local exitcode=$?
+    errXTRACE=$(set +o | grep xtrace)
+    set +o xtrace
+    local msg="[WARNING] ${BASH_SOURCE[2]}:$1 $2"
+    echo $msg 1>&2;
+    if [[ -n ${SCREEN_LOGDIR} ]]; then
+        echo $msg >> "${SCREEN_LOGDIR}/error.log"
+    fi
+    $errXTRACE
+    return $exitcode
+}
+
+
+# Distro Functions
+# ================
+
+# Determine OS Vendor, Release and Update
+# Tested with OS/X, Ubuntu, RedHat, CentOS, Fedora
+# Returns results in global variables:
+# os_VENDOR - vendor name
+# os_RELEASE - release
+# os_UPDATE - update
+# os_PACKAGE - package type
+# os_CODENAME - vendor's codename for release
+# GetOSVersion
+function GetOSVersion {
+    # Figure out which vendor we are
+    if [[ -x "`which sw_vers 2>/dev/null`" ]]; then
+        # OS/X
+        os_VENDOR=`sw_vers -productName`
+        os_RELEASE=`sw_vers -productVersion`
+        os_UPDATE=${os_RELEASE##*.}
+        os_RELEASE=${os_RELEASE%.*}
+        os_PACKAGE=""
+        if [[ "$os_RELEASE" =~ "10.7" ]]; then
+            os_CODENAME="lion"
+        elif [[ "$os_RELEASE" =~ "10.6" ]]; then
+            os_CODENAME="snow leopard"
+        elif [[ "$os_RELEASE" =~ "10.5" ]]; then
+            os_CODENAME="leopard"
+        elif [[ "$os_RELEASE" =~ "10.4" ]]; then
+            os_CODENAME="tiger"
+        elif [[ "$os_RELEASE" =~ "10.3" ]]; then
+            os_CODENAME="panther"
+        else
+            os_CODENAME=""
+        fi
+    elif [[ -x $(which lsb_release 2>/dev/null) ]]; then
+        os_VENDOR=$(lsb_release -i -s)
+        os_RELEASE=$(lsb_release -r -s)
+        os_UPDATE=""
+        os_PACKAGE="rpm"
+        if [[ "Debian,Ubuntu,LinuxMint" =~ $os_VENDOR ]]; then
+            os_PACKAGE="deb"
+        elif [[ "SUSE LINUX" =~ $os_VENDOR ]]; then
+            lsb_release -d -s | grep -q openSUSE
+            if [[ $? -eq 0 ]]; then
+                os_VENDOR="openSUSE"
+            fi
+        elif [[ $os_VENDOR == "openSUSE project" ]]; then
+            os_VENDOR="openSUSE"
+        elif [[ $os_VENDOR =~ Red.*Hat ]]; then
+            os_VENDOR="Red Hat"
+        fi
+        os_CODENAME=$(lsb_release -c -s)
+    elif [[ -r /etc/redhat-release ]]; then
+        # Red Hat Enterprise Linux Server release 5.5 (Tikanga)
+        # Red Hat Enterprise Linux Server release 7.0 Beta (Maipo)
+        # CentOS release 5.5 (Final)
+        # CentOS Linux release 6.0 (Final)
+        # Fedora release 16 (Verne)
+        # XenServer release 6.2.0-70446c (xenenterprise)
+        os_CODENAME=""
+        for r in "Red Hat" CentOS Fedora XenServer; do
+            os_VENDOR=$r
+            if [[ -n "`grep \"$r\" /etc/redhat-release`" ]]; then
+                ver=`sed -e 's/^.* \([0-9].*\) (\(.*\)).*$/\1\|\2/' /etc/redhat-release`
+                os_CODENAME=${ver#*|}
+                os_RELEASE=${ver%|*}
+                os_UPDATE=${os_RELEASE##*.}
+                os_RELEASE=${os_RELEASE%.*}
+                break
+            fi
+            os_VENDOR=""
+        done
+        os_PACKAGE="rpm"
+    elif [[ -r /etc/SuSE-release ]]; then
+        for r in openSUSE "SUSE Linux"; do
+            if [[ "$r" = "SUSE Linux" ]]; then
+                os_VENDOR="SUSE LINUX"
+            else
+                os_VENDOR=$r
+            fi
+
+            if [[ -n "`grep \"$r\" /etc/SuSE-release`" ]]; then
+                os_CODENAME=`grep "CODENAME = " /etc/SuSE-release | sed 's:.* = ::g'`
+                os_RELEASE=`grep "VERSION = " /etc/SuSE-release | sed 's:.* = ::g'`
+                os_UPDATE=`grep "PATCHLEVEL = " /etc/SuSE-release | sed 's:.* = ::g'`
+                break
+            fi
+            os_VENDOR=""
+        done
+        os_PACKAGE="rpm"
+    # If lsb_release is not installed, we should be able to detect Debian OS
+    elif [[ -f /etc/debian_version ]] && [[ $(cat /proc/version) =~ "Debian" ]]; then
+        os_VENDOR="Debian"
+        os_PACKAGE="deb"
+        os_CODENAME=$(awk '/VERSION=/' /etc/os-release | sed 's/VERSION=//' | sed -r 's/\"|\(|\)//g' | awk '{print $2}')
+        os_RELEASE=$(awk '/VERSION_ID=/' /etc/os-release | sed 's/VERSION_ID=//' | sed 's/\"//g')
+    fi
+    export os_VENDOR os_RELEASE os_UPDATE os_PACKAGE os_CODENAME
+}
+
+# Translate the OS version values into common nomenclature
+# Sets global ``DISTRO`` from the ``os_*`` values
+function GetDistro {
+    GetOSVersion
+    if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) ]]; then
+        # 'Everyone' refers to Ubuntu / Debian releases by the code name adjective
+        DISTRO=$os_CODENAME
+    elif [[ "$os_VENDOR" =~ (Fedora) ]]; then
+        # For Fedora, just use 'f' and the release
+        DISTRO="f$os_RELEASE"
+    elif [[ "$os_VENDOR" =~ (openSUSE) ]]; then
+        DISTRO="opensuse-$os_RELEASE"
+    elif [[ "$os_VENDOR" =~ (SUSE LINUX) ]]; then
+        # For SLE, also use the service pack
+        if [[ -z "$os_UPDATE" ]]; then
+            DISTRO="sle${os_RELEASE}"
+        else
+            DISTRO="sle${os_RELEASE}sp${os_UPDATE}"
+        fi
+    elif [[ "$os_VENDOR" =~ (Red Hat) || "$os_VENDOR" =~ (CentOS) ]]; then
+        # Drop the . release as we assume it's compatible
+        DISTRO="rhel${os_RELEASE::1}"
+    elif [[ "$os_VENDOR" =~ (XenServer) ]]; then
+        DISTRO="xs$os_RELEASE"
+    else
+        # Catch-all for now is Vendor + Release + Update
+        DISTRO="$os_VENDOR-$os_RELEASE.$os_UPDATE"
+    fi
+    export DISTRO
+}
+
+# Utility function for checking machine architecture
+# is_arch arch-type
+function is_arch {
+    ARCH_TYPE=$1
+
+    [[ "$(uname -m)" == "$ARCH_TYPE" ]]
+}
+
+# Determine if current distribution is a Fedora-based distribution
+# (Fedora, RHEL, CentOS, etc).
+# is_fedora
+function is_fedora {
+    if [[ -z "$os_VENDOR" ]]; then
+        GetOSVersion
+    fi
+
+    [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || [ "$os_VENDOR" = "CentOS" ]
+}
+
+
+# Determine if current distribution is a SUSE-based distribution
+# (openSUSE, SLE).
+# is_suse
+function is_suse {
+    if [[ -z "$os_VENDOR" ]]; then
+        GetOSVersion
+    fi
+
+    [ "$os_VENDOR" = "openSUSE" ] || [ "$os_VENDOR" = "SUSE LINUX" ]
+}
+
+
+# Determine if current distribution is an Ubuntu-based distribution
+# It will also detect non-Ubuntu but Debian-based distros
+# is_ubuntu
+function is_ubuntu {
+    if [[ -z "$os_PACKAGE" ]]; then
+        GetOSVersion
+    fi
+    [ "$os_PACKAGE" = "deb" ]
+}
+
+
+# Git Functions
+# =============
+
+# Returns openstack release name for a given branch name
+# ``get_release_name_from_branch branch-name``
+function get_release_name_from_branch {
+    local branch=$1
+    if [[ $branch =~ "stable/" ]]; then
+        echo ${branch#*/}
+    else
+        echo "master"
+    fi
+}
+
+# git clone only if directory doesn't exist already.  Since ``DEST`` might not
+# be owned by the installation user, we create the directory and change the
+# ownership to the proper user.
+# Set global RECLONE=yes to simulate a clone when dest-dir exists
+# Set global ERROR_ON_CLONE=True to abort execution with an error if the git repo
+# does not exist (default is False, meaning the repo will be cloned).
+# Uses global ``OFFLINE``
+# git_clone remote dest-dir branch
+function git_clone {
+    GIT_REMOTE=$1
+    GIT_DEST=$2
+    GIT_REF=$3
+    RECLONE=$(trueorfalse False $RECLONE)
+
+    if [[ "$OFFLINE" = "True" ]]; then
+        echo "Running in offline mode, clones already exist"
+        # print out the results so we know what change was used in the logs
+        cd $GIT_DEST
+        git show --oneline | head -1
+        return
+    fi
+
+    if echo $GIT_REF | egrep -q "^refs"; then
+        # If our branch name is a gerrit style refs/changes/...
+        if [[ ! -d $GIT_DEST ]]; then
+            [[ "$ERROR_ON_CLONE" = "True" ]] && \
+                die $LINENO "Cloning not allowed in this configuration"
+            git_timed clone $GIT_REMOTE $GIT_DEST
+        fi
+        cd $GIT_DEST
+        git_timed fetch $GIT_REMOTE $GIT_REF && git checkout FETCH_HEAD
+    else
+        # do a full clone only if the directory doesn't exist
+        if [[ ! -d $GIT_DEST ]]; then
+            [[ "$ERROR_ON_CLONE" = "True" ]] && \
+                die $LINENO "Cloning not allowed in this configuration"
+            git_timed clone $GIT_REMOTE $GIT_DEST
+            cd $GIT_DEST
+            # This checkout syntax works for both branches and tags
+            git checkout $GIT_REF
+        elif [[ "$RECLONE" = "True" ]]; then
+            # if it does exist then simulate what clone does if asked to RECLONE
+            cd $GIT_DEST
+            # set the url to pull from and fetch
+            git remote set-url origin $GIT_REMOTE
+            git_timed fetch origin
+            # remove the existing ignored files (like pyc) as they cause breakage
+            # (due to the py files having older timestamps than our pyc, so python
+            # thinks the pyc files are correct using them)
+            find $GIT_DEST -name '*.pyc' -delete
+
+            # handle GIT_REF accordingly to type (tag, branch)
+            if [[ -n "`git show-ref refs/tags/$GIT_REF`" ]]; then
+                git_update_tag $GIT_REF
+            elif [[ -n "`git show-ref refs/heads/$GIT_REF`" ]]; then
+                git_update_branch $GIT_REF
+            elif [[ -n "`git show-ref refs/remotes/origin/$GIT_REF`" ]]; then
+                git_update_remote_branch $GIT_REF
+            else
+                die $LINENO "$GIT_REF is neither branch nor tag"
+            fi
+
+        fi
+    fi
+
+    # print out the results so we know what change was used in the logs
+    cd $GIT_DEST
+    git show --oneline | head -1
+}
+
+# git can sometimes get itself infinitely stuck with transient network
+# errors or other issues with the remote end.  This wraps git in a
+# timeout/retry loop and is intended to watch over non-local git
+# processes that might hang.  GIT_TIMEOUT, if set, is passed directly
+# to timeout(1); otherwise the default value of 0 maintains the status
+# quo of waiting forever.
+# usage: git_timed <git-command>
+function git_timed {
+    local count=0
+    local timeout=0
+
+    if [[ -n "${GIT_TIMEOUT}" ]]; then
+        timeout=${GIT_TIMEOUT}
+    fi
+
+    until timeout -s SIGINT ${timeout} git "$@"; do
+        # 124 is timeout(1)'s special return code when it reached the
+        # timeout; otherwise assume fatal failure
+        if [[ $? -ne 124 ]]; then
+            die $LINENO "git call failed: [git $@]"
+        fi
+
+        count=$(($count + 1))
+        warn "timeout ${count} for git call: [git $@]"
+        if [ $count -eq 3 ]; then
+            die $LINENO "Maximum of 3 git retries reached"
+        fi
+        sleep 5
+    done
+}
+
+# git update using reference as a branch.
+# git_update_branch ref
+function git_update_branch {
+
+    GIT_BRANCH=$1
+
+    git checkout -f origin/$GIT_BRANCH
+    # a local branch might not exist
+    git branch -D $GIT_BRANCH || true
+    git checkout -b $GIT_BRANCH
+}
+
+# git update using reference as a branch.
+# git_update_remote_branch ref
+function git_update_remote_branch {
+
+    GIT_BRANCH=$1
+
+    git checkout -b $GIT_BRANCH -t origin/$GIT_BRANCH
+}
+
+# git update using reference as a tag. Be careful editing source at that repo
+# as working copy will be in a detached mode
+# git_update_tag ref
+function git_update_tag {
+
+    GIT_TAG=$1
+
+    git tag -d $GIT_TAG
+    # fetching given tag only
+    git_timed fetch origin tag $GIT_TAG
+    git checkout -f $GIT_TAG
+}
+
+
+# OpenStack Functions
+# ===================
+
+# Get the default value for HOST_IP
+# get_default_host_ip fixed_range floating_range host_ip_iface host_ip
+function get_default_host_ip {
+    local fixed_range=$1
+    local floating_range=$2
+    local host_ip_iface=$3
+    local host_ip=$4
+
+    # Find the interface used for the default route
+    host_ip_iface=${host_ip_iface:-$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }' | head -1)}
+    # Search for an IP unless an explicit is set by ``HOST_IP`` environment variable
+    if [ -z "$host_ip" -o "$host_ip" == "dhcp" ]; then
+        host_ip=""
+        host_ips=`LC_ALL=C ip -f inet addr show ${host_ip_iface} | awk '/inet/ {split($2,parts,"/");  print parts[1]}'`
+        for IP in $host_ips; do
+            # Attempt to filter out IP addresses that are part of the fixed and
+            # floating range. Note that this method only works if the ``netaddr``
+            # python library is installed. If it is not installed, an error
+            # will be printed and the first IP from the interface will be used.
+            # If that is not correct set ``HOST_IP`` in ``localrc`` to the correct
+            # address.
+            if ! (address_in_net $IP $fixed_range || address_in_net $IP $floating_range); then
+                host_ip=$IP
+                break;
+            fi
+        done
+    fi
+    echo $host_ip
+}
+
+# Grab a numbered field from python prettytable output
+# Fields are numbered starting with 1
+# Reverse syntax is supported: -1 is the last field, -2 is second to last, etc.
+# get_field field-number
+function get_field {
+    while read data; do
+        if [ "$1" -lt 0 ]; then
+            field="(\$(NF$1))"
+        else
+            field="\$$(($1 + 1))"
+        fi
+        echo "$data" | awk -F'[ \t]*\\|[ \t]*' "{print $field}"
+    done
+}
+
+# Add a policy to a policy.json file
+# Do nothing if the policy already exists
+# ``policy_add policy_file policy_name policy_permissions``
+function policy_add {
+    local policy_file=$1
+    local policy_name=$2
+    local policy_perm=$3
+
+    if grep -q ${policy_name} ${policy_file}; then
+        echo "Policy ${policy_name} already exists in ${policy_file}"
+        return
+    fi
+
+    # Add a terminating comma to policy lines without one
+    # Remove the closing '}' and all lines following to the end-of-file
+    local tmpfile=$(mktemp)
+    uniq ${policy_file} | sed -e '
+        s/]$/],/
+        /^[}]/,$d
+    ' > ${tmpfile}
+
+    # Append policy and closing brace
+    echo "    \"${policy_name}\": ${policy_perm}" >>${tmpfile}
+    echo "}" >>${tmpfile}
+
+    mv ${tmpfile} ${policy_file}
+}
+
+
+# Package Functions
+# =================
+
+# _get_package_dir
+function _get_package_dir {
+    local pkg_dir
+    if is_ubuntu; then
+        pkg_dir=$FILES/apts
+    elif is_fedora; then
+        pkg_dir=$FILES/rpms
+    elif is_suse; then
+        pkg_dir=$FILES/rpms-suse
+    else
+        exit_distro_not_supported "list of packages"
+    fi
+    echo "$pkg_dir"
+}
+
+# Wrapper for ``apt-get`` to set cache and proxy environment variables
+# Uses globals ``OFFLINE``, ``*_proxy``
+# apt_get operation package [package ...]
+function apt_get {
+    local xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+
+    [[ "$OFFLINE" = "True" || -z "$@" ]] && return
+    local sudo="sudo"
+    [[ "$(id -u)" = "0" ]] && sudo="env"
+
+    $xtrace
+    $sudo DEBIAN_FRONTEND=noninteractive \
+        http_proxy=$http_proxy https_proxy=$https_proxy \
+        no_proxy=$no_proxy \
+        apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@"
+}
+
+# get_packages() collects a list of package names of any type from the
+# prerequisite files in ``files/{apts|rpms}``.  The list is intended
+# to be passed to a package installer such as apt or yum.
+#
+# Only packages required for the services in 1st argument will be
+# included.  Two bits of metadata are recognized in the prerequisite files:
+#
+# - ``# NOPRIME`` defers installation to be performed later in `stack.sh`
+# - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection
+#   of the package to the distros listed.  The distro names are case insensitive.
+function get_packages {
+    local xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+    local services=$@
+    local package_dir=$(_get_package_dir)
+    local file_to_parse
+    local service
+
+    if [[ -z "$package_dir" ]]; then
+        echo "No package directory supplied"
+        return 1
+    fi
+    if [[ -z "$DISTRO" ]]; then
+        GetDistro
+        echo "Found Distro $DISTRO"
+    fi
+    for service in ${services//,/ }; do
+        # Allow individual services to specify dependencies
+        if [[ -e ${package_dir}/${service} ]]; then
+            file_to_parse="${file_to_parse} $service"
+        fi
+        # NOTE(sdague) n-api needs glance for now because that's where
+        # glance client is
+        if [[ $service == n-api ]]; then
+            if [[ ! $file_to_parse =~ nova ]]; then
+                file_to_parse="${file_to_parse} nova"
+            fi
+            if [[ ! $file_to_parse =~ glance ]]; then
+                file_to_parse="${file_to_parse} glance"
+            fi
+        elif [[ $service == c-* ]]; then
+            if [[ ! $file_to_parse =~ cinder ]]; then
+                file_to_parse="${file_to_parse} cinder"
+            fi
+        elif [[ $service == ceilometer-* ]]; then
+            if [[ ! $file_to_parse =~ ceilometer ]]; then
+                file_to_parse="${file_to_parse} ceilometer"
+            fi
+        elif [[ $service == s-* ]]; then
+            if [[ ! $file_to_parse =~ swift ]]; then
+                file_to_parse="${file_to_parse} swift"
+            fi
+        elif [[ $service == n-* ]]; then
+            if [[ ! $file_to_parse =~ nova ]]; then
+                file_to_parse="${file_to_parse} nova"
+            fi
+        elif [[ $service == g-* ]]; then
+            if [[ ! $file_to_parse =~ glance ]]; then
+                file_to_parse="${file_to_parse} glance"
+            fi
+        elif [[ $service == key* ]]; then
+            if [[ ! $file_to_parse =~ keystone ]]; then
+                file_to_parse="${file_to_parse} keystone"
+            fi
+        elif [[ $service == q-* ]]; then
+            if [[ ! $file_to_parse =~ neutron ]]; then
+                file_to_parse="${file_to_parse} neutron"
+            fi
+        fi
+    done
+
+    for file in ${file_to_parse}; do
+        local fname=${package_dir}/${file}
+        local OIFS line package distros distro
+        [[ -e $fname ]] || continue
+
+        OIFS=$IFS
+        IFS=$'\n'
+        for line in $(<${fname}); do
+            if [[ $line =~ "NOPRIME" ]]; then
+                continue
+            fi
+
+            # Assume we want this package
+            package=${line%#*}
+            inst_pkg=1
+
+            # Look for # dist:xxx in comment
+            if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then
+                # We are using BASH regexp matching feature.
+                package=${BASH_REMATCH[1]}
+                distros=${BASH_REMATCH[2]}
+                # In bash ${VAR,,} will lowecase VAR
+                # Look for a match in the distro list
+                if [[ ! ${distros,,} =~ ${DISTRO,,} ]]; then
+                    # If no match then skip this package
+                    inst_pkg=0
+                fi
+            fi
+
+            # Look for # testonly in comment
+            if [[ $line =~ (.*)#.*testonly.* ]]; then
+                package=${BASH_REMATCH[1]}
+                # Are we installing test packages? (test for the default value)
+                if [[ $INSTALL_TESTONLY_PACKAGES = "False" ]]; then
+                    # If not installing test packages the skip this package
+                    inst_pkg=0
+                fi
+            fi
+
+            if [[ $inst_pkg = 1 ]]; then
+                echo $package
+            fi
+        done
+        IFS=$OIFS
+    done
+    $xtrace
+}
+
+# Distro-agnostic package installer
+# install_package package [package ...]
+function install_package {
+    local xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+    if is_ubuntu; then
+        # if there are transient errors pulling the updates, that's fine. It may
+        # be secondary repositories that we don't really care about.
+        [[ "$NO_UPDATE_REPOS" = "True" ]] || apt_get update || /bin/true
+        NO_UPDATE_REPOS=True
+
+        $xtrace
+        apt_get install "$@"
+    elif is_fedora; then
+        $xtrace
+        yum_install "$@"
+    elif is_suse; then
+        $xtrace
+        zypper_install "$@"
+    else
+        $xtrace
+        exit_distro_not_supported "installing packages"
+    fi
+}
+
+# Distro-agnostic function to tell if a package is installed
+# is_package_installed package [package ...]
+function is_package_installed {
+    if [[ -z "$@" ]]; then
+        return 1
+    fi
+
+    if [[ -z "$os_PACKAGE" ]]; then
+        GetOSVersion
+    fi
+
+    if [[ "$os_PACKAGE" = "deb" ]]; then
+        dpkg -s "$@" > /dev/null 2> /dev/null
+    elif [[ "$os_PACKAGE" = "rpm" ]]; then
+        rpm --quiet -q "$@"
+    else
+        exit_distro_not_supported "finding if a package is installed"
+    fi
+}
+
+# Distro-agnostic package uninstaller
+# uninstall_package package [package ...]
+function uninstall_package {
+    if is_ubuntu; then
+        apt_get purge "$@"
+    elif is_fedora; then
+        sudo yum remove -y "$@"
+    elif is_suse; then
+        sudo zypper rm "$@"
+    else
+        exit_distro_not_supported "uninstalling packages"
+    fi
+}
+
+# Wrapper for ``yum`` to set proxy environment variables
+# Uses globals ``OFFLINE``, ``*_proxy``
+# yum_install package [package ...]
+function yum_install {
+    [[ "$OFFLINE" = "True" ]] && return
+    local sudo="sudo"
+    [[ "$(id -u)" = "0" ]] && sudo="env"
+    $sudo http_proxy=$http_proxy https_proxy=$https_proxy \
+        no_proxy=$no_proxy \
+        yum install -y "$@"
+}
+
+# zypper wrapper to set arguments correctly
+# zypper_install package [package ...]
+function zypper_install {
+    [[ "$OFFLINE" = "True" ]] && return
+    local sudo="sudo"
+    [[ "$(id -u)" = "0" ]] && sudo="env"
+    $sudo http_proxy=$http_proxy https_proxy=$https_proxy \
+        zypper --non-interactive install --auto-agree-with-licenses "$@"
+}
+
+
+# Process Functions
+# =================
+
+# _run_process() is designed to be backgrounded by run_process() to simulate a
+# fork.  It includes the dirty work of closing extra filehandles and preparing log
+# files to produce the same logs as screen_it().  The log filename is derived
+# from the service name and global-and-now-misnamed SCREEN_LOGDIR
+# _run_process service "command-line"
+function _run_process {
+    local service=$1
+    local command="$2"
+
+    # Undo logging redirections and close the extra descriptors
+    exec 1>&3
+    exec 2>&3
+    exec 3>&-
+    exec 6>&-
+
+    if [[ -n ${SCREEN_LOGDIR} ]]; then
+        exec 1>&${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log 2>&1
+        ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log
+
+        # TODO(dtroyer): Hack to get stdout from the Python interpreter for the logs.
+        export PYTHONUNBUFFERED=1
+    fi
+
+    exec /bin/bash -c "$command"
+    die "$service exec failure: $command"
+}
+
+# Helper to remove the ``*.failure`` files under ``$SERVICE_DIR/$SCREEN_NAME``.
+# This is used for ``service_check`` when all the ``screen_it`` are called finished
+# init_service_check
+function init_service_check {
+    SCREEN_NAME=${SCREEN_NAME:-stack}
+    SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
+
+    if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then
+        mkdir -p "$SERVICE_DIR/$SCREEN_NAME"
+    fi
+
+    rm -f "$SERVICE_DIR/$SCREEN_NAME"/*.failure
+}
+
+# Find out if a process exists by partial name.
+# is_running name
+function is_running {
+    local name=$1
+    ps auxw | grep -v grep | grep ${name} > /dev/null
+    RC=$?
+    # some times I really hate bash reverse binary logic
+    return $RC
+}
+
+# run_process() launches a child process that closes all file descriptors and
+# then exec's the passed in command.  This is meant to duplicate the semantics
+# of screen_it() without screen.  PIDs are written to
+# $SERVICE_DIR/$SCREEN_NAME/$service.pid
+# run_process service "command-line"
+function run_process {
+    local service=$1
+    local command="$2"
+
+    # Spawn the child process
+    _run_process "$service" "$command" &
+    echo $!
+}
+
+# Helper to launch a service in a named screen
+# screen_it service "command-line"
+function screen_it {
+    SCREEN_NAME=${SCREEN_NAME:-stack}
+    SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
+    USE_SCREEN=$(trueorfalse True $USE_SCREEN)
+
+    if is_service_enabled $1; then
+        # Append the service to the screen rc file
+        screen_rc "$1" "$2"
+
+        if [[ "$USE_SCREEN" = "True" ]]; then
+            screen -S $SCREEN_NAME -X screen -t $1
+
+            if [[ -n ${SCREEN_LOGDIR} ]]; then
+                screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log
+                screen -S $SCREEN_NAME -p $1 -X log on
+                ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log
+            fi
+
+            # sleep to allow bash to be ready to be send the command - we are
+            # creating a new window in screen and then sends characters, so if
+            # bash isn't running by the time we send the command, nothing happens
+            sleep 1.5
+
+            NL=`echo -ne '\015'`
+            # This fun command does the following:
+            # - the passed server command is backgrounded
+            # - the pid of the background process is saved in the usual place
+            # - the server process is brought back to the foreground
+            # - if the server process exits prematurely the fg command errors
+            #   and a message is written to stdout and the service failure file
+            # The pid saved can be used in screen_stop() as a process group
+            # id to kill off all child processes
+            screen -S $SCREEN_NAME -p $1 -X stuff "$2 & echo \$! >$SERVICE_DIR/$SCREEN_NAME/$1.pid; fg || echo \"$1 failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL"
+        else
+            # Spawn directly without screen
+            run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$1.pid
+        fi
+    fi
+}
+
+# Screen rc file builder
+# screen_rc service "command-line"
+function screen_rc {
+    SCREEN_NAME=${SCREEN_NAME:-stack}
+    SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc
+    if [[ ! -e $SCREENRC ]]; then
+        # Name the screen session
+        echo "sessionname $SCREEN_NAME" > $SCREENRC
+        # Set a reasonable statusbar
+        echo "hardstatus alwayslastline '$SCREEN_HARDSTATUS'" >> $SCREENRC
+        # Some distributions override PROMPT_COMMAND for the screen terminal type - turn that off
+        echo "setenv PROMPT_COMMAND /bin/true" >> $SCREENRC
+        echo "screen -t shell bash" >> $SCREENRC
+    fi
+    # If this service doesn't already exist in the screenrc file
+    if ! grep $1 $SCREENRC 2>&1 > /dev/null; then
+        NL=`echo -ne '\015'`
+        echo "screen -t $1 bash" >> $SCREENRC
+        echo "stuff \"$2$NL\"" >> $SCREENRC
+
+        if [[ -n ${SCREEN_LOGDIR} ]]; then
+            echo "logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log" >>$SCREENRC
+            echo "log on" >>$SCREENRC
+        fi
+    fi
+}
+
+# Stop a service in screen
+# If a PID is available use it, kill the whole process group via TERM
+# If screen is being used kill the screen window; this will catch processes
+# that did not leave a PID behind
+# screen_stop service
+function screen_stop {
+    SCREEN_NAME=${SCREEN_NAME:-stack}
+    SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
+    USE_SCREEN=$(trueorfalse True $USE_SCREEN)
+
+    if is_service_enabled $1; then
+        # Kill via pid if we have one available
+        if [[ -r $SERVICE_DIR/$SCREEN_NAME/$1.pid ]]; then
+            pkill -TERM -P -$(cat $SERVICE_DIR/$SCREEN_NAME/$1.pid)
+            rm $SERVICE_DIR/$SCREEN_NAME/$1.pid
+        fi
+        if [[ "$USE_SCREEN" = "True" ]]; then
+            # Clean up the screen window
+            screen -S $SCREEN_NAME -p $1 -X kill
+        fi
+    fi
+}
+
+# Helper to get the status of each running service
+# service_check
+function service_check {
+    local service
+    local failures
+    SCREEN_NAME=${SCREEN_NAME:-stack}
+    SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
+
+
+    if [[ ! -d "$SERVICE_DIR/$SCREEN_NAME" ]]; then
+        echo "No service status directory found"
+        return
+    fi
+
+    # Check if there is any falure flag file under $SERVICE_DIR/$SCREEN_NAME
+    # make this -o errexit safe
+    failures=`ls "$SERVICE_DIR/$SCREEN_NAME"/*.failure 2>/dev/null || /bin/true`
+
+    for service in $failures; do
+        service=`basename $service`
+        service=${service%.failure}
+        echo "Error: Service $service is not running"
+    done
+
+    if [ -n "$failures" ]; then
+        die $LINENO "More details about the above errors can be found with screen, with ./rejoin-stack.sh"
+    fi
+}
+
+
+# Python Functions
+# ================
+
+# Get the path to the pip command.
+# get_pip_command
+function get_pip_command {
+    which pip || which pip-python
+
+    if [ $? -ne 0 ]; then
+        die $LINENO "Unable to find pip; cannot continue"
+    fi
+}
+
+# Get the path to the direcotry where python executables are installed.
+# get_python_exec_prefix
+function get_python_exec_prefix {
+    if is_fedora || is_suse; then
+        echo "/usr/bin"
+    else
+        echo "/usr/local/bin"
+    fi
+}
+
+# Wrapper for ``pip install`` to set cache and proxy environment variables
+# Uses globals ``OFFLINE``, ``PIP_DOWNLOAD_CACHE``, ``PIP_USE_MIRRORS``,
+# ``TRACK_DEPENDS``, ``*_proxy``
+# pip_install package [package ...]
+function pip_install {
+    local xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+    if [[ "$OFFLINE" = "True" || -z "$@" ]]; then
+        $xtrace
+        return
+    fi
+
+    if [[ -z "$os_PACKAGE" ]]; then
+        GetOSVersion
+    fi
+    if [[ $TRACK_DEPENDS = True ]]; then
+        source $DEST/.venv/bin/activate
+        CMD_PIP=$DEST/.venv/bin/pip
+        SUDO_PIP="env"
+    else
+        SUDO_PIP="sudo"
+        CMD_PIP=$(get_pip_command)
+    fi
+
+    # Mirror option not needed anymore because pypi has CDN available,
+    # but it's useful in certain circumstances
+    PIP_USE_MIRRORS=${PIP_USE_MIRRORS:-False}
+    if [[ "$PIP_USE_MIRRORS" != "False" ]]; then
+        PIP_MIRROR_OPT="--use-mirrors"
+    fi
+
+    # pip < 1.4 has a bug where it will use an already existing build
+    # directory unconditionally.  Say an earlier component installs
+    # foo v1.1; pip will have built foo's source in
+    # /tmp/$USER-pip-build.  Even if a later component specifies foo <
+    # 1.1, the existing extracted build will be used and cause
+    # confusing errors.  By creating unique build directories we avoid
+    # this problem. See https://github.com/pypa/pip/issues/709
+    local pip_build_tmp=$(mktemp --tmpdir -d pip-build.XXXXX)
+
+    $xtrace
+    $SUDO_PIP PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \
+        HTTP_PROXY=$http_proxy \
+        HTTPS_PROXY=$https_proxy \
+        NO_PROXY=$no_proxy \
+        $CMD_PIP install --build=${pip_build_tmp} \
+        $PIP_MIRROR_OPT $@ \
+        && $SUDO_PIP rm -rf ${pip_build_tmp}
+}
+
+# ``pip install -e`` the package, which processes the dependencies
+# using pip before running `setup.py develop`
+#
+# Updates the dependencies in project_dir from the
+# openstack/requirements global list before installing anything.
+#
+# Uses globals ``TRACK_DEPENDS``, ``REQUIREMENTS_DIR``, ``UNDO_REQUIREMENTS``
+# setup_develop directory
+function setup_develop {
+    local project_dir=$1
+
+    # Don't update repo if local changes exist
+    # Don't use buggy "git diff --quiet"
+    # ``errexit`` requires us to trap the exit code when the repo is changed
+    local update_requirements=$(cd $project_dir && git diff --exit-code >/dev/null || echo "changed")
+
+    if [[ $update_requirements = "changed" ]]; then
+        (cd $REQUIREMENTS_DIR; \
+            $SUDO_CMD python update.py $project_dir)
+    fi
+
+    setup_develop_no_requirements_update $project_dir
+
+    # We've just gone and possibly modified the user's source tree in an
+    # automated way, which is considered bad form if it's a development
+    # tree because we've screwed up their next git checkin. So undo it.
+    #
+    # However... there are some circumstances, like running in the gate
+    # where we really really want the overridden version to stick. So provide
+    # a variable that tells us whether or not we should UNDO the requirements
+    # changes (this will be set to False in the OpenStack ci gate)
+    if [ $UNDO_REQUIREMENTS = "True" ]; then
+        if [[ $update_requirements = "changed" ]]; then
+            (cd $project_dir && git reset --hard)
+        fi
+    fi
+}
+
+# ``pip install -e`` the package, which processes the dependencies
+# using pip before running `setup.py develop`
+# Uses globals ``STACK_USER``
+# setup_develop_no_requirements_update directory
+function setup_develop_no_requirements_update {
+    local project_dir=$1
+
+    pip_install -e $project_dir
+    # ensure that further actions can do things like setup.py sdist
+    safe_chown -R $STACK_USER $1/*.egg-info
+}
+
+
+# Service Functions
+# =================
+
+# remove extra commas from the input string (i.e. ``ENABLED_SERVICES``)
+# _cleanup_service_list service-list
+function _cleanup_service_list {
+    echo "$1" | sed -e '
+        s/,,/,/g;
+        s/^,//;
+        s/,$//
+    '
+}
+
+# disable_all_services() removes all current services
+# from ``ENABLED_SERVICES`` to reset the configuration
+# before a minimal installation
+# Uses global ``ENABLED_SERVICES``
+# disable_all_services
+function disable_all_services {
+    ENABLED_SERVICES=""
+}
+
+# Remove all services starting with '-'.  For example, to install all default
+# services except rabbit (rabbit) set in ``localrc``:
+# ENABLED_SERVICES+=",-rabbit"
+# Uses global ``ENABLED_SERVICES``
+# disable_negated_services
+function disable_negated_services {
+    local tmpsvcs="${ENABLED_SERVICES}"
+    local service
+    for service in ${tmpsvcs//,/ }; do
+        if [[ ${service} == -* ]]; then
+            tmpsvcs=$(echo ${tmpsvcs}|sed -r "s/(,)?(-)?${service#-}(,)?/,/g")
+        fi
+    done
+    ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs")
+}
+
+# disable_service() removes the services passed as argument to the
+# ``ENABLED_SERVICES`` list, if they are present.
+#
+# For example:
+#   disable_service rabbit
+#
+# This function does not know about the special cases
+# for nova, glance, and neutron built into is_service_enabled().
+# Uses global ``ENABLED_SERVICES``
+# disable_service service [service ...]
+function disable_service {
+    local tmpsvcs=",${ENABLED_SERVICES},"
+    local service
+    for service in $@; do
+        if is_service_enabled $service; then
+            tmpsvcs=${tmpsvcs//,$service,/,}
+        fi
+    done
+    ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs")
+}
+
+# enable_service() adds the services passed as argument to the
+# ``ENABLED_SERVICES`` list, if they are not already present.
+#
+# For example:
+#   enable_service qpid
+#
+# This function does not know about the special cases
+# for nova, glance, and neutron built into is_service_enabled().
+# Uses global ``ENABLED_SERVICES``
+# enable_service service [service ...]
+function enable_service {
+    local tmpsvcs="${ENABLED_SERVICES}"
+    for service in $@; do
+        if ! is_service_enabled $service; then
+            tmpsvcs+=",$service"
+        fi
+    done
+    ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs")
+    disable_negated_services
+}
+
+# is_service_enabled() checks if the service(s) specified as arguments are
+# enabled by the user in ``ENABLED_SERVICES``.
+#
+# Multiple services specified as arguments are ``OR``'ed together; the test
+# is a short-circuit boolean, i.e it returns on the first match.
+#
+# There are special cases for some 'catch-all' services::
+#   **nova** returns true if any service enabled start with **n-**
+#   **cinder** returns true if any service enabled start with **c-**
+#   **ceilometer** returns true if any service enabled start with **ceilometer**
+#   **glance** returns true if any service enabled start with **g-**
+#   **neutron** returns true if any service enabled start with **q-**
+#   **swift** returns true if any service enabled start with **s-**
+#   **trove** returns true if any service enabled start with **tr-**
+#   For backward compatibility if we have **swift** in ENABLED_SERVICES all the
+#   **s-** services will be enabled. This will be deprecated in the future.
+#
+# Cells within nova is enabled if **n-cell** is in ``ENABLED_SERVICES``.
+# We also need to make sure to treat **n-cell-region** and **n-cell-child**
+# as enabled in this case.
+#
+# Uses global ``ENABLED_SERVICES``
+# is_service_enabled service [service ...]
+function is_service_enabled {
+    local xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+    local enabled=1
+    services=$@
+    for service in ${services}; do
+        [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && enabled=0
+
+        # Look for top-level 'enabled' function for this service
+        if type is_${service}_enabled >/dev/null 2>&1; then
+            # A function exists for this service, use it
+            is_${service}_enabled
+            enabled=$?
+        fi
+
+        # TODO(dtroyer): Remove these legacy special-cases after the is_XXX_enabled()
+        #                are implemented
+
+        [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && enabled=0
+        [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && enabled=0
+        [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && enabled=0
+        [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && enabled=0
+        [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && enabled=0
+        [[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && enabled=0
+        [[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && enabled=0
+        [[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && enabled=0
+        [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && enabled=0
+        [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && enabled=0
+    done
+    $xtrace
+    return $enabled
+}
+
+# Toggle enable/disable_service for services that must run exclusive of each other
+#  $1 The name of a variable containing a space-separated list of services
+#  $2 The name of a variable in which to store the enabled service's name
+#  $3 The name of the service to enable
+function use_exclusive_service {
+    local options=${!1}
+    local selection=$3
+    out=$2
+    [ -z $selection ] || [[ ! "$options" =~ "$selection" ]] && return 1
+    for opt in $options;do
+        [[ "$opt" = "$selection" ]] && enable_service $opt || disable_service $opt
+    done
+    eval "$out=$selection"
+    return 0
+}
+
+
+# System Functions
+# ================
+
+# Only run the command if the target file (the last arg) is not on an
+# NFS filesystem.
+function _safe_permission_operation {
+    local xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+    local args=( $@ )
+    local last
+    local sudo_cmd
+    local dir_to_check
+
+    let last="${#args[*]} - 1"
+
+    dir_to_check=${args[$last]}
+    if [ ! -d "$dir_to_check" ]; then
+        dir_to_check=`dirname "$dir_to_check"`
+    fi
+
+    if is_nfs_directory "$dir_to_check" ; then
+        $xtrace
+        return 0
+    fi
+
+    if [[ $TRACK_DEPENDS = True ]]; then
+        sudo_cmd="env"
+    else
+        sudo_cmd="sudo"
+    fi
+
+    $xtrace
+    $sudo_cmd $@
+}
+
+# Exit 0 if address is in network or 1 if address is not in network
+# ip-range is in CIDR notation: 1.2.3.4/20
+# address_in_net ip-address ip-range
+function address_in_net {
+    local ip=$1
+    local range=$2
+    local masklen=${range#*/}
+    local network=$(maskip ${range%/*} $(cidr2netmask $masklen))
+    local subnet=$(maskip $ip $(cidr2netmask $masklen))
+    [[ $network == $subnet ]]
+}
+
+# Add a user to a group.
+# add_user_to_group user group
+function add_user_to_group {
+    local user=$1
+    local group=$2
+
+    if [[ -z "$os_VENDOR" ]]; then
+        GetOSVersion
+    fi
+
+    # SLE11 and openSUSE 12.2 don't have the usual usermod
+    if ! is_suse || [[ "$os_VENDOR" = "openSUSE" && "$os_RELEASE" != "12.2" ]]; then
+        sudo usermod -a -G "$group" "$user"
+    else
+        sudo usermod -A "$group" "$user"
+    fi
+}
+
+# Convert CIDR notation to a IPv4 netmask
+# cidr2netmask cidr-bits
+function cidr2netmask {
+    local maskpat="255 255 255 255"
+    local maskdgt="254 252 248 240 224 192 128"
+    set -- ${maskpat:0:$(( ($1 / 8) * 4 ))}${maskdgt:$(( (7 - ($1 % 8)) * 4 )):3}
+    echo ${1-0}.${2-0}.${3-0}.${4-0}
+}
+
+# Gracefully cp only if source file/dir exists
+# cp_it source destination
+function cp_it {
+    if [ -e $1 ] || [ -d $1 ]; then
+        cp -pRL $1 $2
+    fi
+}
+
+# HTTP and HTTPS proxy servers are supported via the usual environment variables [1]
+# ``http_proxy``, ``https_proxy`` and ``no_proxy``. They can be set in
+# ``localrc`` or on the command line if necessary::
+#
+# [1] http://www.w3.org/Daemon/User/Proxies/ProxyClients.html
+#
+#     http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh
+
+function export_proxy_variables {
+    if [[ -n "$http_proxy" ]]; then
+        export http_proxy=$http_proxy
+    fi
+    if [[ -n "$https_proxy" ]]; then
+        export https_proxy=$https_proxy
+    fi
+    if [[ -n "$no_proxy" ]]; then
+        export no_proxy=$no_proxy
+    fi
+}
+
+# Returns true if the directory is on a filesystem mounted via NFS.
+function is_nfs_directory {
+    local mount_type=`stat -f -L -c %T $1`
+    test "$mount_type" == "nfs"
+}
+
+# Return the network portion of the given IP address using netmask
+# netmask is in the traditional dotted-quad format
+# maskip ip-address netmask
+function maskip {
+    local ip=$1
+    local mask=$2
+    local l="${ip%.*}"; local r="${ip#*.}"; local n="${mask%.*}"; local m="${mask#*.}"
+    local subnet=$((${ip%%.*}&${mask%%.*})).$((${r%%.*}&${m%%.*})).$((${l##*.}&${n##*.})).$((${ip##*.}&${mask##*.}))
+    echo $subnet
+}
+
+# Service wrapper to restart services
+# restart_service service-name
+function restart_service {
+    if is_ubuntu; then
+        sudo /usr/sbin/service $1 restart
+    else
+        sudo /sbin/service $1 restart
+    fi
+}
+
+# Only change permissions of a file or directory if it is not on an
+# NFS filesystem.
+function safe_chmod {
+    _safe_permission_operation chmod $@
+}
+
+# Only change ownership of a file or directory if it is not on an NFS
+# filesystem.
+function safe_chown {
+    _safe_permission_operation chown $@
+}
+
+# Service wrapper to start services
+# start_service service-name
+function start_service {
+    if is_ubuntu; then
+        sudo /usr/sbin/service $1 start
+    else
+        sudo /sbin/service $1 start
+    fi
+}
+
+# Service wrapper to stop services
+# stop_service service-name
+function stop_service {
+    if is_ubuntu; then
+        sudo /usr/sbin/service $1 stop
+    else
+        sudo /sbin/service $1 stop
+    fi
+}
+
+
+# Restore xtrace
+$XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/apache b/lib/apache
index 8ae78b2..2d5e39a 100644
--- a/lib/apache
+++ b/lib/apache
@@ -4,8 +4,8 @@
 # Dependencies:
 #
 # - ``functions`` file
-# -``STACK_USER`` must be defined
-
+# - ``STACK_USER`` must be defined
+#
 # lib/apache exports the following functions:
 #
 # - is_apache_enabled_service
@@ -50,7 +50,7 @@
 #
 # Uses global ``APACHE_ENABLED_SERVICES``
 # APACHE_ENABLED_SERVICES service [service ...]
-function is_apache_enabled_service() {
+function is_apache_enabled_service {
     services=$@
     for service in ${services}; do
         [[ ,${APACHE_ENABLED_SERVICES}, =~ ,${service}, ]] && return 0
@@ -59,7 +59,7 @@
 }
 
 # install_apache_wsgi() - Install Apache server and wsgi module
-function install_apache_wsgi() {
+function install_apache_wsgi {
     # Apache installation, because we mark it NOPRIME
     if is_ubuntu; then
         # Install apache2, which is NOPRIME'd
@@ -79,7 +79,7 @@
 }
 
 # enable_apache_site() - Enable a particular apache site
-function enable_apache_site() {
+function enable_apache_site {
     local site=$@
     if is_ubuntu; then
         sudo a2ensite ${site}
@@ -90,7 +90,7 @@
 }
 
 # disable_apache_site() - Disable a particular apache site
-function disable_apache_site() {
+function disable_apache_site {
     local site=$@
     if is_ubuntu; then
         sudo a2dissite ${site}
@@ -100,12 +100,12 @@
 }
 
 # start_apache_server() - Start running apache server
-function start_apache_server() {
+function start_apache_server {
     start_service $APACHE_NAME
 }
 
 # stop_apache_server() - Stop running apache server
-function stop_apache_server() {
+function stop_apache_server {
     if [ -n "$APACHE_NAME" ]; then
         stop_service $APACHE_NAME
     else
@@ -114,7 +114,7 @@
 }
 
 # restart_apache_server
-function restart_apache_server() {
+function restart_apache_server {
     restart_service $APACHE_NAME
 }
 
diff --git a/lib/baremetal b/lib/baremetal
index a0df85e..473de0d 100644
--- a/lib/baremetal
+++ b/lib/baremetal
@@ -166,7 +166,7 @@
 # Check if baremetal is properly enabled
 # Returns false if VIRT_DRIVER is not baremetal, or if ENABLED_SERVICES
 # does not contain "baremetal"
-function is_baremetal() {
+function is_baremetal {
     if [[ "$ENABLED_SERVICES" =~ 'baremetal' && "$VIRT_DRIVER" = 'baremetal' ]]; then
         return 0
     fi
@@ -175,7 +175,7 @@
 
 # Install diskimage-builder and shell-in-a-box
 # so that we can build the deployment kernel & ramdisk
-function prepare_baremetal_toolchain() {
+function prepare_baremetal_toolchain {
     git_clone $BM_IMAGE_BUILD_REPO $BM_IMAGE_BUILD_DIR $BM_IMAGE_BUILD_BRANCH
     git_clone $BM_POSEUR_REPO $BM_POSEUR_DIR $BM_POSEUR_BRANCH
 
@@ -197,7 +197,7 @@
 }
 
 # set up virtualized environment for devstack-gate testing
-function create_fake_baremetal_env() {
+function create_fake_baremetal_env {
     local bm_poseur="$BM_POSEUR_DIR/bm_poseur"
     # TODO(deva): add support for >1 VM
     sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS create-bridge
@@ -211,14 +211,14 @@
     BM_SECOND_MAC='12:34:56:78:90:12'
 }
 
-function cleanup_fake_baremetal_env() {
+function cleanup_fake_baremetal_env {
     local bm_poseur="$BM_POSEUR_DIR/bm_poseur"
     sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS destroy-vm
     sudo $bm_poseur $BM_POSEUR_EXTRA_OPTS destroy-bridge
 }
 
 # prepare various directories needed by baremetal hypervisor
-function configure_baremetal_nova_dirs() {
+function configure_baremetal_nova_dirs {
     # ensure /tftpboot is prepared
     sudo mkdir -p /tftpboot
     sudo mkdir -p /tftpboot/pxelinux.cfg
@@ -249,7 +249,7 @@
 
 # build deploy kernel+ramdisk, then upload them to glance
 # this function sets BM_DEPLOY_KERNEL_ID and BM_DEPLOY_RAMDISK_ID
-function upload_baremetal_deploy() {
+function upload_baremetal_deploy {
     token=$1
 
     if [ "$BM_BUILD_DEPLOY_RAMDISK" = "True" ]; then
@@ -281,7 +281,7 @@
 # create a basic baremetal flavor, associated with deploy kernel & ramdisk
 #
 # Usage: create_baremetal_flavor <aki_uuid> <ari_uuid>
-function create_baremetal_flavor() {
+function create_baremetal_flavor {
     aki=$1
     ari=$2
     nova flavor-create $BM_FLAVOR_NAME $BM_FLAVOR_ID \
@@ -298,7 +298,7 @@
 # Sets KERNEL_ID and RAMDISK_ID
 #
 # Usage: extract_and_upload_k_and_r_from_image $token $file
-function extract_and_upload_k_and_r_from_image() {
+function extract_and_upload_k_and_r_from_image {
     token=$1
     file=$2
     image_name=$(basename "$file" ".qcow2")
@@ -339,7 +339,7 @@
 # Takes the same parameters, but has some peculiarities which made it
 # easier to create a separate method, rather than complicate the logic
 # of the existing function.
-function upload_baremetal_image() {
+function upload_baremetal_image {
     local image_url=$1
     local token=$2
 
@@ -429,10 +429,9 @@
     DEFAULT_IMAGE_NAME="${IMAGE_NAME%.img}"
 }
 
-function clear_baremetal_of_all_nodes() {
+function clear_baremetal_of_all_nodes {
     list=$(nova baremetal-node-list | awk -F '| ' 'NR>3 {print $2}' )
-    for node in $list
-    do
+    for node in $list; do
         nova baremetal-node-delete $node
     done
 }
@@ -441,7 +440,7 @@
 # Defaults to using BM_FIRST_MAC and BM_SECOND_MAC if parameters not specified
 #
 # Usage: add_baremetal_node <first_mac> <second_mac>
-function add_baremetal_node() {
+function add_baremetal_node {
     mac_1=${1:-$BM_FIRST_MAC}
     mac_2=${2:-$BM_SECOND_MAC}
 
diff --git a/lib/ceilometer b/lib/ceilometer
index f9c7691..0be4184 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -59,38 +59,45 @@
 
 # Functions
 # ---------
-#
+
+# Test if any Ceilometer services are enabled
+# is_ceilometer_enabled
+function is_ceilometer_enabled {
+    [[ ,${ENABLED_SERVICES} =~ ,"ceilometer-" ]] && return 0
+    return 1
+}
+
 # create_ceilometer_accounts() - Set up common required ceilometer accounts
 
 create_ceilometer_accounts() {
 
-    SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
+    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
 
     # Ceilometer
     if [[ "$ENABLED_SERVICES" =~ "ceilometer-api" ]]; then
-        CEILOMETER_USER=$(keystone user-create \
-            --name=ceilometer \
-            --pass="$SERVICE_PASSWORD" \
-            --tenant_id $SERVICE_TENANT \
-            --email=ceilometer@example.com \
+        CEILOMETER_USER=$(openstack user create \
+            ceilometer \
+            --password "$SERVICE_PASSWORD" \
+            --project $SERVICE_TENANT \
+            --email ceilometer@example.com \
             | grep " id " | get_field 2)
-        keystone user-role-add \
-            --tenant-id $SERVICE_TENANT \
-            --user-id $CEILOMETER_USER \
-            --role-id $ADMIN_ROLE
+        openstack role add \
+            $ADMIN_ROLE \
+            --project $SERVICE_TENANT \
+            --user $CEILOMETER_USER
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-            CEILOMETER_SERVICE=$(keystone service-create \
-                --name=ceilometer \
+            CEILOMETER_SERVICE=$(openstack service create \
+                ceilometer \
                 --type=metering \
                 --description="OpenStack Telemetry Service" \
                 | grep " id " | get_field 2)
-            keystone endpoint-create \
+            openstack endpoint create \
+                $CEILOMETER_SERVICE \
                 --region RegionOne \
-                --service_id $CEILOMETER_SERVICE \
-                --publicurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT" \
-                --adminurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT" \
-                --internalurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT"
+                --publicurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \
+                --adminurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \
+                --internalurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/"
         fi
     fi
 }
@@ -98,18 +105,18 @@
 
 # cleanup_ceilometer() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
-function cleanup_ceilometer() {
+function cleanup_ceilometer {
     mongo ceilometer --eval "db.dropDatabase();"
 }
 
 # configure_ceilometerclient() - Set config files, create data dirs, etc
-function configure_ceilometerclient() {
+function configure_ceilometerclient {
     setup_develop $CEILOMETERCLIENT_DIR
     sudo install -D -m 0644 -o $STACK_USER {$CEILOMETERCLIENT_DIR/tools/,/etc/bash_completion.d/}ceilometer.bash_completion
 }
 
 # configure_ceilometer() - Set config files, create data dirs, etc
-function configure_ceilometer() {
+function configure_ceilometer {
     setup_develop $CEILOMETER_DIR
 
     [ ! -d $CEILOMETER_CONF_DIR ] && sudo mkdir -m 755 -p $CEILOMETER_CONF_DIR
@@ -155,7 +162,7 @@
     fi
 }
 
-function configure_mongodb() {
+function configure_mongodb {
     if is_fedora; then
         # install mongodb client
         install_package mongodb
@@ -167,7 +174,7 @@
 }
 
 # init_ceilometer() - Initialize etc.
-function init_ceilometer() {
+function init_ceilometer {
     # Create cache dir
     sudo mkdir -p $CEILOMETER_AUTH_CACHE_DIR
     sudo chown $STACK_USER $CEILOMETER_AUTH_CACHE_DIR
@@ -180,17 +187,17 @@
 }
 
 # install_ceilometer() - Collect source and prepare
-function install_ceilometer() {
+function install_ceilometer {
     git_clone $CEILOMETER_REPO $CEILOMETER_DIR $CEILOMETER_BRANCH
 }
 
 # install_ceilometerclient() - Collect source and prepare
-function install_ceilometerclient() {
+function install_ceilometerclient {
     git_clone $CEILOMETERCLIENT_REPO $CEILOMETERCLIENT_DIR $CEILOMETERCLIENT_BRANCH
 }
 
 # start_ceilometer() - Start running processes, including screen
-function start_ceilometer() {
+function start_ceilometer {
     if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
         screen_it ceilometer-acompute "cd ; sg $LIBVIRT_GROUP \"ceilometer-agent-compute --config-file $CEILOMETER_CONF\""
     fi
@@ -199,9 +206,12 @@
     screen_it ceilometer-collector "cd ; ceilometer-collector --config-file $CEILOMETER_CONF"
     screen_it ceilometer-api "cd ; ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF"
 
-    echo "Waiting for ceilometer-api to start..."
-    if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://localhost:8777/v2/ >/dev/null; do sleep 1; done"; then
-        die $LINENO "ceilometer-api did not start"
+    # only die on API if it was actually intended to be turned on
+    if service_enabled ceilometer-api; then
+        echo "Waiting for ceilometer-api to start..."
+        if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://localhost:8777/v2/ >/dev/null; do sleep 1; done"; then
+            die $LINENO "ceilometer-api did not start"
+        fi
     fi
 
     screen_it ceilometer-alarm-notifier "cd ; ceilometer-alarm-notifier --config-file $CEILOMETER_CONF"
@@ -209,7 +219,7 @@
 }
 
 # stop_ceilometer() - Stop running processes
-function stop_ceilometer() {
+function stop_ceilometer {
     # Kill the ceilometer screen windows
     for serv in ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator; do
         screen_stop $serv
diff --git a/lib/cinder b/lib/cinder
index 9f70b2a..d003f5d 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -27,6 +27,12 @@
 
 # set up default driver
 CINDER_DRIVER=${CINDER_DRIVER:-default}
+CINDER_PLUGINS=$TOP_DIR/lib/cinder_plugins
+
+# grab plugin config if specified via cinder_driver
+if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then
+    source $CINDER_PLUGINS/$CINDER_DRIVER
+fi
 
 # set up default directories
 CINDER_DIR=$DEST/cinder
@@ -85,10 +91,18 @@
 
 # Functions
 # ---------
+
+# Test if any Cinder services are enabled
+# is_cinder_enabled
+function is_cinder_enabled {
+    [[ ,${ENABLED_SERVICES} =~ ,"c-" ]] && return 0
+    return 1
+}
+
 # _clean_lvm_lv removes all cinder LVM volumes
 #
 # Usage: _clean_lvm_lv $VOLUME_GROUP $VOLUME_NAME_PREFIX
-function _clean_lvm_lv() {
+function _clean_lvm_lv {
     local vg=$1
     local lv_prefix=$2
 
@@ -105,7 +119,7 @@
 # volume group used by cinder
 #
 # Usage: _clean_lvm_backing_file() $VOLUME_GROUP
-function _clean_lvm_backing_file() {
+function _clean_lvm_backing_file {
     local vg=$1
 
     # if there is no logical volume left, it's safe to attempt a cleanup
@@ -122,7 +136,7 @@
 
 # cleanup_cinder() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
-function cleanup_cinder() {
+function cleanup_cinder {
     # ensure the volume group is cleared up because fails might
     # leave dead volumes in the group
     TARGETS=$(sudo tgtadm --op show --mode target)
@@ -167,53 +181,38 @@
 }
 
 # configure_cinder_rootwrap() - configure Cinder's rootwrap
-function configure_cinder_rootwrap() {
+function configure_cinder_rootwrap {
     # Set the paths of certain binaries
     CINDER_ROOTWRAP=$(get_rootwrap_location cinder)
-    if [[ ! -x $CINDER_ROOTWRAP ]]; then
-        CINDER_ROOTWRAP=$(get_rootwrap_location oslo)
-        if [[ ! -x $CINDER_ROOTWRAP ]]; then
-            die $LINENO "No suitable rootwrap found."
-        fi
-    fi
 
-    # If Cinder ships the new rootwrap filters files, deploy them
-    # (owned by root) and add a parameter to $CINDER_ROOTWRAP
-    ROOTWRAP_CINDER_SUDOER_CMD="$CINDER_ROOTWRAP"
-    if [[ -d $CINDER_DIR/etc/cinder/rootwrap.d ]]; then
-        # Wipe any existing rootwrap.d files first
-        if [[ -d $CINDER_CONF_DIR/rootwrap.d ]]; then
-            sudo rm -rf $CINDER_CONF_DIR/rootwrap.d
-        fi
-        # Deploy filters to /etc/cinder/rootwrap.d
-        sudo mkdir -m 755 $CINDER_CONF_DIR/rootwrap.d
-        sudo cp $CINDER_DIR/etc/cinder/rootwrap.d/*.filters $CINDER_CONF_DIR/rootwrap.d
-        sudo chown -R root:root $CINDER_CONF_DIR/rootwrap.d
-        sudo chmod 644 $CINDER_CONF_DIR/rootwrap.d/*
-        # Set up rootwrap.conf, pointing to /etc/cinder/rootwrap.d
-        if [[ -f $CINDER_DIR/etc/cinder/rootwrap.conf ]]; then
-            sudo cp $CINDER_DIR/etc/cinder/rootwrap.conf $CINDER_CONF_DIR/
-        else
-            # rootwrap.conf is no longer shipped in Cinder itself
-            echo "filters_path=" | sudo tee $CINDER_CONF_DIR/rootwrap.conf > /dev/null
-        fi
-        sudo sed -e "s:^filters_path=.*$:filters_path=$CINDER_CONF_DIR/rootwrap.d:" -i $CINDER_CONF_DIR/rootwrap.conf
-        sudo chown root:root $CINDER_CONF_DIR/rootwrap.conf
-        sudo chmod 0644 $CINDER_CONF_DIR/rootwrap.conf
-        # Specify rootwrap.conf as first parameter to rootwrap
-        CINDER_ROOTWRAP="$CINDER_ROOTWRAP $CINDER_CONF_DIR/rootwrap.conf"
-        ROOTWRAP_CINDER_SUDOER_CMD="$CINDER_ROOTWRAP *"
+    # Deploy new rootwrap filters files (owned by root).
+    # Wipe any existing rootwrap.d files first
+    if [[ -d $CINDER_CONF_DIR/rootwrap.d ]]; then
+        sudo rm -rf $CINDER_CONF_DIR/rootwrap.d
     fi
+    # Deploy filters to /etc/cinder/rootwrap.d
+    sudo mkdir -m 755 $CINDER_CONF_DIR/rootwrap.d
+    sudo cp $CINDER_DIR/etc/cinder/rootwrap.d/*.filters $CINDER_CONF_DIR/rootwrap.d
+    sudo chown -R root:root $CINDER_CONF_DIR/rootwrap.d
+    sudo chmod 644 $CINDER_CONF_DIR/rootwrap.d/*
+    # Set up rootwrap.conf, pointing to /etc/cinder/rootwrap.d
+    sudo cp $CINDER_DIR/etc/cinder/rootwrap.conf $CINDER_CONF_DIR/
+    sudo sed -e "s:^filters_path=.*$:filters_path=$CINDER_CONF_DIR/rootwrap.d:" -i $CINDER_CONF_DIR/rootwrap.conf
+    sudo chown root:root $CINDER_CONF_DIR/rootwrap.conf
+    sudo chmod 0644 $CINDER_CONF_DIR/rootwrap.conf
+    # Specify rootwrap.conf as first parameter to rootwrap
+    ROOTWRAP_CSUDOER_CMD="$CINDER_ROOTWRAP $CINDER_CONF_DIR/rootwrap.conf *"
 
+    # Set up the rootwrap sudoers for cinder
     TEMPFILE=`mktemp`
-    echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_CINDER_SUDOER_CMD" >$TEMPFILE
+    echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_CSUDOER_CMD" >$TEMPFILE
     chmod 0440 $TEMPFILE
     sudo chown root:root $TEMPFILE
     sudo mv $TEMPFILE /etc/sudoers.d/cinder-rootwrap
 }
 
 # configure_cinder() - Set config files, create data dirs, etc
-function configure_cinder() {
+function configure_cinder {
     if [[ ! -d $CINDER_CONF_DIR ]]; then
         sudo mkdir -p $CINDER_CONF_DIR
     fi
@@ -300,42 +299,8 @@
         setup_colorized_logging $CINDER_CONF DEFAULT "project_id" "user_id"
     fi
 
-    if [ "$CINDER_DRIVER" == "XenAPINFS" ]; then
-        (
-            set -u
-            iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.xenapi.sm.XenAPINFSDriver"
-            iniset $CINDER_CONF DEFAULT xenapi_connection_url "$CINDER_XENAPI_CONNECTION_URL"
-            iniset $CINDER_CONF DEFAULT xenapi_connection_username "$CINDER_XENAPI_CONNECTION_USERNAME"
-            iniset $CINDER_CONF DEFAULT xenapi_connection_password "$CINDER_XENAPI_CONNECTION_PASSWORD"
-            iniset $CINDER_CONF DEFAULT xenapi_nfs_server "$CINDER_XENAPI_NFS_SERVER"
-            iniset $CINDER_CONF DEFAULT xenapi_nfs_serverpath "$CINDER_XENAPI_NFS_SERVERPATH"
-        )
-    elif [ "$CINDER_DRIVER" == "nfs" ]; then
-        iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.nfs.NfsDriver"
-        iniset $CINDER_CONF DEFAULT nfs_shares_config "$CINDER_CONF_DIR/nfs_shares.conf"
-        echo "$CINDER_NFS_SERVERPATH" | sudo tee "$CINDER_CONF_DIR/nfs_shares.conf"
-        sudo chmod 666 $CINDER_CONF_DIR/nfs_shares.conf
-    elif [ "$CINDER_DRIVER" == "sheepdog" ]; then
-        iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.sheepdog.SheepdogDriver"
-    elif [ "$CINDER_DRIVER" == "glusterfs" ]; then
-        # To use glusterfs, set the following in localrc:
-        # CINDER_DRIVER=glusterfs
-        # CINDER_GLUSTERFS_SHARES="127.0.0.1:/vol1;127.0.0.1:/vol2"
-        # Shares are <host>:<volume> and separated by semicolons.
-
-        iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.glusterfs.GlusterfsDriver"
-        iniset $CINDER_CONF DEFAULT glusterfs_shares_config "$CINDER_CONF_DIR/glusterfs_shares"
-        touch $CINDER_CONF_DIR/glusterfs_shares
-        if [ ! -z "$CINDER_GLUSTERFS_SHARES" ]; then
-            CINDER_GLUSTERFS_SHARES=$(echo $CINDER_GLUSTERFS_SHARES | tr ";" "\n")
-            echo "$CINDER_GLUSTERFS_SHARES" > $CINDER_CONF_DIR/glusterfs_shares
-        fi
-    elif [ "$CINDER_DRIVER" == "vsphere" ]; then
-        echo_summary "Using VMware vCenter driver"
-        iniset $CINDER_CONF DEFAULT vmware_host_ip "$VMWAREAPI_IP"
-        iniset $CINDER_CONF DEFAULT vmware_host_username "$VMWAREAPI_USER"
-        iniset $CINDER_CONF DEFAULT vmware_host_password "$VMWAREAPI_PASSWORD"
-        iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver"
+    if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then
+        configure_cinder_driver
     fi
 
     if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then
@@ -363,60 +328,59 @@
 # service              cinder     admin        # if enabled
 
 # Migrated from keystone_data.sh
-create_cinder_accounts() {
+function create_cinder_accounts {
 
-    SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
+    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
 
     # Cinder
     if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then
-        CINDER_USER=$(keystone user-create \
-            --name=cinder \
-            --pass="$SERVICE_PASSWORD" \
-            --tenant-id $SERVICE_TENANT \
-            --email=cinder@example.com \
+        CINDER_USER=$(openstack user create \
+            cinder \
+            --password "$SERVICE_PASSWORD" \
+            --project $SERVICE_TENANT \
+            --email cinder@example.com \
             | grep " id " | get_field 2)
-        keystone user-role-add \
-            --tenant-id $SERVICE_TENANT \
-            --user-id $CINDER_USER \
-            --role-id $ADMIN_ROLE
+        openstack role add \
+            $ADMIN_ROLE \
+            --project $SERVICE_TENANT \
+            --user $CINDER_USER
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-            CINDER_SERVICE=$(keystone service-create \
-                --name=cinder \
+            CINDER_SERVICE=$(openstack service create \
+                cinder \
                 --type=volume \
                 --description="Cinder Volume Service" \
                 | grep " id " | get_field 2)
-            keystone endpoint-create \
+            openstack endpoint create \
+                $CINDER_SERVICE \
                 --region RegionOne \
-                --service_id $CINDER_SERVICE \
                 --publicurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \
                 --adminurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \
                 --internalurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s"
-            CINDER_V2_SERVICE=$(keystone service-create \
-                --name=cinderv2 \
+            CINDER_V2_SERVICE=$(openstack service create \
+                cinderv2 \
                 --type=volumev2 \
                 --description="Cinder Volume Service V2" \
                 | grep " id " | get_field 2)
-            keystone endpoint-create \
+            openstack endpoint create \
+                $CINDER_V2_SERVICE \
                 --region RegionOne \
-                --service_id $CINDER_V2_SERVICE \
                 --publicurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \
                 --adminurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \
                 --internalurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s"
-
         fi
     fi
 }
 
 # create_cinder_cache_dir() - Part of the init_cinder() process
-function create_cinder_cache_dir() {
+function create_cinder_cache_dir {
     # Create cache dir
     sudo mkdir -p $CINDER_AUTH_CACHE_DIR
     sudo chown $STACK_USER $CINDER_AUTH_CACHE_DIR
     rm -f $CINDER_AUTH_CACHE_DIR/*
 }
 
-create_cinder_volume_group() {
+function create_cinder_volume_group {
     # According to the ``CINDER_MULTI_LVM_BACKEND`` value, configure one or two default volumes
     # group called ``stack-volumes`` (and ``stack-volumes2``) for the volume
     # service if it (they) does (do) not yet exist. If you don't wish to use a
@@ -464,7 +428,7 @@
 }
 
 # init_cinder() - Initialize database and volume group
-function init_cinder() {
+function init_cinder {
     # Force nova volumes off
     NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/osapi_volume,//")
 
@@ -500,20 +464,20 @@
 }
 
 # install_cinder() - Collect source and prepare
-function install_cinder() {
+function install_cinder {
     git_clone $CINDER_REPO $CINDER_DIR $CINDER_BRANCH
     setup_develop $CINDER_DIR
 }
 
 # install_cinderclient() - Collect source and prepare
-function install_cinderclient() {
+function install_cinderclient {
     git_clone $CINDERCLIENT_REPO $CINDERCLIENT_DIR $CINDERCLIENT_BRANCH
     setup_develop $CINDERCLIENT_DIR
     sudo install -D -m 0644 -o $STACK_USER {$CINDERCLIENT_DIR/tools/,/etc/bash_completion.d/}cinder.bash_completion
 }
 
 # apply config.d approach for cinder volumes directory
-function _configure_tgt_for_config_d() {
+function _configure_tgt_for_config_d {
     if [[ ! -d /etc/tgt/stack.d/ ]]; then
         sudo ln -sf $CINDER_STATE_PATH/volumes /etc/tgt/stack.d
         echo "include /etc/tgt/stack.d/*" | sudo tee -a /etc/tgt/targets.conf
@@ -521,7 +485,7 @@
 }
 
 # start_cinder() - Start running processes, including screen
-function start_cinder() {
+function start_cinder {
     if is_service_enabled c-vol; then
         # Delete any old stack.conf
         sudo rm -f /etc/tgt/conf.d/stack.conf
@@ -532,8 +496,12 @@
             sudo stop tgt || true
             sudo start tgt
         elif is_fedora; then
-            # bypass redirection to systemctl during restart
-            sudo /sbin/service --skip-redirect tgtd restart
+            if [[ $DISTRO =~ (rhel6) ]]; then
+                sudo /sbin/service tgtd restart
+            else
+                # bypass redirection to systemctl during restart
+                sudo /sbin/service --skip-redirect tgtd restart
+            fi
         elif is_suse; then
             restart_service tgtd
         else
@@ -561,7 +529,7 @@
 }
 
 # stop_cinder() - Stop running processes
-function stop_cinder() {
+function stop_cinder {
     # Kill the cinder screen windows
     for serv in c-api c-bak c-sch c-vol; do
         screen_stop $serv
diff --git a/lib/cinder_plugins/XenAPINFS b/lib/cinder_plugins/XenAPINFS
new file mode 100644
index 0000000..fa10715
--- /dev/null
+++ b/lib/cinder_plugins/XenAPINFS
@@ -0,0 +1,44 @@
+# lib/cinder_plugins/XenAPINFS
+# Configure the XenAPINFS driver
+
+# Enable with:
+#
+#   CINDER_DRIVER=XenAPINFS
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``cinder`` configurations
+
+# configure_cinder_driver - make configuration changes, including those to other services
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+
+# Set up default directories
+
+
+# Entry Points
+# ------------
+
+# configure_cinder_driver - Set config files, create data dirs, etc
+function configure_cinder_driver {
+    iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.xenapi.sm.XenAPINFSDriver"
+    iniset $CINDER_CONF DEFAULT xenapi_connection_url "$CINDER_XENAPI_CONNECTION_URL"
+    iniset $CINDER_CONF DEFAULT xenapi_connection_username "$CINDER_XENAPI_CONNECTION_USERNAME"
+    iniset $CINDER_CONF DEFAULT xenapi_connection_password "$CINDER_XENAPI_CONNECTION_PASSWORD"
+    iniset $CINDER_CONF DEFAULT xenapi_nfs_server "$CINDER_XENAPI_NFS_SERVER"
+    iniset $CINDER_CONF DEFAULT xenapi_nfs_serverpath "$CINDER_XENAPI_NFS_SERVERPATH"
+}
+
+# Restore xtrace
+$MY_XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/cinder_plugins/glusterfs b/lib/cinder_plugins/glusterfs
new file mode 100644
index 0000000..b4196e4
--- /dev/null
+++ b/lib/cinder_plugins/glusterfs
@@ -0,0 +1,50 @@
+# lib/cinder_plugins/glusterfs
+# Configure the glusterfs driver
+
+# Enable with:
+#
+#   CINDER_DRIVER=glusterfs
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``cinder`` configurations
+
+# configure_cinder_driver - make configuration changes, including those to other services
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+
+# Set up default directories
+
+
+# Entry Points
+# ------------
+
+# configure_cinder_driver - Set config files, create data dirs, etc
+function configure_cinder_driver {
+    # To use glusterfs, set the following in localrc:
+    # CINDER_DRIVER=glusterfs
+    # CINDER_GLUSTERFS_SHARES="127.0.0.1:/vol1;127.0.0.1:/vol2"
+    # Shares are <host>:<volume> and separated by semicolons.
+
+    iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.glusterfs.GlusterfsDriver"
+    iniset $CINDER_CONF DEFAULT glusterfs_shares_config "$CINDER_CONF_DIR/glusterfs_shares"
+    touch $CINDER_CONF_DIR/glusterfs_shares
+    if [ ! -z "$CINDER_GLUSTERFS_SHARES" ]; then
+        CINDER_GLUSTERFS_SHARES=$(echo $CINDER_GLUSTERFS_SHARES | tr ";" "\n")
+        echo "$CINDER_GLUSTERFS_SHARES" > $CINDER_CONF_DIR/glusterfs_shares
+    fi
+}
+
+# Restore xtrace
+$MY_XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/cinder_plugins/nfs b/lib/cinder_plugins/nfs
new file mode 100644
index 0000000..2d9d875
--- /dev/null
+++ b/lib/cinder_plugins/nfs
@@ -0,0 +1,42 @@
+# lib/cinder_plugins/nfs
+# Configure the nfs driver
+
+# Enable with:
+#
+#   CINDER_DRIVER=nfs
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``cinder`` configurations
+
+# configure_cinder_driver - make configuration changes, including those to other services
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+
+# Set up default directories
+
+
+# Entry Points
+# ------------
+
+# configure_cinder_driver - Set config files, create data dirs, etc
+function configure_cinder_driver {
+    iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.nfs.NfsDriver"
+    iniset $CINDER_CONF DEFAULT nfs_shares_config "$CINDER_CONF_DIR/nfs_shares.conf"
+    echo "$CINDER_NFS_SERVERPATH" | sudo tee "$CINDER_CONF_DIR/nfs_shares.conf"
+    sudo chmod 660 $CINDER_CONF_DIR/nfs_shares.conf
+}
+
+# Restore xtrace
+$MY_XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/cinder_plugins/sheepdog b/lib/cinder_plugins/sheepdog
new file mode 100644
index 0000000..30c60c6
--- /dev/null
+++ b/lib/cinder_plugins/sheepdog
@@ -0,0 +1,39 @@
+# lib/cinder_plugins/sheepdog
+# Configure the sheepdog driver
+
+# Enable with:
+#
+#   CINDER_DRIVER=sheepdog
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``cinder`` configurations
+
+# configure_cinder_driver - make configuration changes, including those to other services
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+
+# Set up default directories
+
+
+# Entry Points
+# ------------
+
+# configure_cinder_driver - Set config files, create data dirs, etc
+function configure_cinder_driver {
+    iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.sheepdog.SheepdogDriver"
+}
+
+# Restore xtrace
+$MY_XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/cinder_plugins/solidfire b/lib/cinder_plugins/solidfire
new file mode 100644
index 0000000..2c970b5
--- /dev/null
+++ b/lib/cinder_plugins/solidfire
@@ -0,0 +1,48 @@
+# lib/cinder_plugins/solidfire
+# Configure the solidfire driver
+
+# Enable with:
+#
+#   CINDER_DRIVER=solidfire
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``cinder`` configurations
+
+# configure_cinder_driver - make configuration changes, including those to other services
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+
+# Set up default directories
+
+
+# Entry Points
+# ------------
+
+# configure_cinder_driver - Set config files, create data dirs, etc
+function configure_cinder_driver {
+    # To use solidfire, set the following in localrc:
+    # CINDER_DRIVER=solidfire
+    # SAN_IP=<mvip>
+    # SAN_LOGIN=<cluster-admin-account>
+    # SAN_PASSWORD=<cluster-admin-password>
+
+    iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.solidfire.SolidFireDriver"
+    iniset $CINDER_CONF DEFAULT san_ip $SAN_IP
+    iniset $CINDER_CONF DEFAULT san_login $SAN_LOGIN
+    iniset $CINDER_CONF DEFAULT san_password $SAN_PASSWORD
+}
+
+# Restore xtrace
+$MY_XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/cinder_plugins/vsphere b/lib/cinder_plugins/vsphere
new file mode 100644
index 0000000..436b060
--- /dev/null
+++ b/lib/cinder_plugins/vsphere
@@ -0,0 +1,42 @@
+# lib/cinder_plugins/vsphere
+# Configure the vsphere driver
+
+# Enable with:
+#
+#   CINDER_DRIVER=vsphere
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``cinder`` configurations
+
+# configure_cinder_driver - make configuration changes, including those to other services
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+
+# Set up default directories
+
+
+# Entry Points
+# ------------
+
+# configure_cinder_driver - Set config files, create data dirs, etc
+function configure_cinder_driver {
+    iniset $CINDER_CONF DEFAULT vmware_host_ip "$VMWAREAPI_IP"
+    iniset $CINDER_CONF DEFAULT vmware_host_username "$VMWAREAPI_USER"
+    iniset $CINDER_CONF DEFAULT vmware_host_password "$VMWAREAPI_PASSWORD"
+    iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver"
+}
+
+# Restore xtrace
+$MY_XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/config b/lib/config
index 1678aec..552aeb0 100644
--- a/lib/config
+++ b/lib/config
@@ -25,7 +25,7 @@
 
 # Get the section for the specific group and config file
 # get_meta_section infile group configfile
-function get_meta_section() {
+function get_meta_section {
     local file=$1
     local matchgroup=$2
     local configfile=$3
@@ -57,7 +57,7 @@
 
 # Get a list of config files for a specific group
 # get_meta_section_files infile group
-function get_meta_section_files() {
+function get_meta_section_files {
     local file=$1
     local matchgroup=$2
 
@@ -77,7 +77,7 @@
 # Merge the contents of a meta-config file into its destination config file
 # If configfile does not exist it will be created.
 # merge_config_file infile group configfile
-function merge_config_file() {
+function merge_config_file {
     local file=$1
     local matchgroup=$2
     local configfile=$3
@@ -106,7 +106,7 @@
 
 # Merge all of the files specified by group
 # merge_config_group infile group [group ...]
-function merge_config_group() {
+function merge_config_group {
     local localfile=$1; shift
     local matchgroups=$@
 
diff --git a/lib/databases/mysql b/lib/databases/mysql
index 476b4b9..f5ee3c0 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -21,11 +21,15 @@
     if is_ubuntu; then
         # Get ruthless with mysql
         stop_service $MYSQL
-        sudo aptitude purge -y ~nmysql-server
+        apt_get purge -y mysql*
         sudo rm -rf /var/lib/mysql
         return
     elif is_fedora; then
-        MYSQL=mysqld
+        if [[ $DISTRO =~ (rhel7) ]]; then
+            MYSQL=mariadb
+        else
+            MYSQL=mysqld
+        fi
     elif is_suse; then
         MYSQL=mysql
     else
@@ -48,8 +52,12 @@
         MY_CONF=/etc/mysql/my.cnf
         MYSQL=mysql
     elif is_fedora; then
+        if [[ $DISTRO =~ (rhel7) ]]; then
+            MYSQL=mariadb
+        else
+            MYSQL=mysqld
+        fi
         MY_CONF=/etc/my.cnf
-        MYSQL=mysqld
     elif is_suse; then
         MY_CONF=/etc/my.cnf
         MYSQL=mysql
@@ -135,7 +143,11 @@
     fi
     # Install mysql-server
     if is_ubuntu || is_fedora; then
-        install_package mysql-server
+        if [[ $DISTRO =~ (rhel7) ]]; then
+            install_package mariadb-server
+        else
+            install_package mysql-server
+        fi
     elif is_suse; then
         if ! is_package_installed mariadb; then
             install_package mysql-community-server
diff --git a/lib/databases/postgresql b/lib/databases/postgresql
index c459feb..96a5947 100644
--- a/lib/databases/postgresql
+++ b/lib/databases/postgresql
@@ -21,7 +21,7 @@
     stop_service postgresql
     if is_ubuntu; then
         # Get ruthless with mysql
-        sudo aptitude purge -y  ~npostgresql
+        apt_get purge -y postgresql*
         return
     elif is_fedora; then
         uninstall_package postgresql-server
diff --git a/lib/gantt b/lib/gantt
index 832d759..8db2ca1 100644
--- a/lib/gantt
+++ b/lib/gantt
@@ -47,42 +47,42 @@
 
 # cleanup_gantt() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
-function cleanup_gantt() {
+function cleanup_gantt {
     echo "Cleanup Gantt"
 }
 
 # configure_gantt() - Set config files, create data dirs, etc
-function configure_gantt() {
+function configure_gantt {
     echo "Configure Gantt"
 }
 
 # init_gantt() - Initialize database and volume group
-function init_gantt() {
+function init_gantt {
     echo "Initialize Gantt"
 }
 
 # install_gantt() - Collect source and prepare
-function install_gantt() {
+function install_gantt {
     git_clone $GANTT_REPO $GANTT_DIR $GANTT_BRANCH
     setup_develop $GANTT_DIR
 }
 
 # install_ganttclient() - Collect source and prepare
-function install_ganttclient() {
+function install_ganttclient {
     echo "Install Gantt Client"
 #    git_clone $GANTTCLIENT_REPO $GANTTCLIENT_DIR $GANTTCLIENT_BRANCH
 #    setup_develop $GANTTCLIENT_DIR
 }
 
 # start_gantt() - Start running processes, including screen
-function start_gantt() {
+function start_gantt {
     if is_service_enabled gantt; then
         screen_it gantt "cd $GANTT_DIR && $GANTT_BIN_DIR/gantt-scheduler --config-file $GANTT_CONF"
     fi
 }
 
 # stop_gantt() - Stop running processes
-function stop_gantt() {
+function stop_gantt {
     echo "Stop Gantt"
     screen_stop gantt
 }
diff --git a/lib/glance b/lib/glance
index 00f499a..8a4c21b 100644
--- a/lib/glance
+++ b/lib/glance
@@ -59,16 +59,23 @@
 # Functions
 # ---------
 
+# Test if any Glance services are enabled
+# is_glance_enabled
+function is_glance_enabled {
+    [[ ,${ENABLED_SERVICES} =~ ,"g-" ]] && return 0
+    return 1
+}
+
 # cleanup_glance() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
-function cleanup_glance() {
+function cleanup_glance {
     # kill instances (nova)
     # delete image files (glance)
     sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR $GLANCE_AUTH_CACHE_DIR
 }
 
 # configure_glance() - Set config files, create data dirs, etc
-function configure_glance() {
+function configure_glance {
     if [[ ! -d $GLANCE_CONF_DIR ]]; then
         sudo mkdir -p $GLANCE_CONF_DIR
     fi
@@ -108,10 +115,8 @@
     iniset $GLANCE_API_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
     iniset $GLANCE_API_CONF keystone_authtoken admin_user glance
     iniset $GLANCE_API_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
-    if is_service_enabled qpid; then
-        iniset $GLANCE_API_CONF DEFAULT notifier_strategy qpid
-    elif [ -n "$RABBIT_HOST" ] &&  [ -n "$RABBIT_PASSWORD" ]; then
-        iniset $GLANCE_API_CONF DEFAULT notifier_strategy rabbit
+    if is_service_enabled qpid || [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then
+        iniset $GLANCE_API_CONF DEFAULT notification_driver messaging
     fi
     iniset_rpc_backend glance $GLANCE_API_CONF DEFAULT
     iniset $GLANCE_API_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/api
@@ -155,7 +160,7 @@
 }
 
 # create_glance_cache_dir() - Part of the init_glance() process
-function create_glance_cache_dir() {
+function create_glance_cache_dir {
     # Create cache dir
     sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/api
     sudo chown $STACK_USER $GLANCE_AUTH_CACHE_DIR/api
@@ -166,7 +171,7 @@
 }
 
 # init_glance() - Initialize databases, etc.
-function init_glance() {
+function init_glance {
     # Delete existing images
     rm -rf $GLANCE_IMAGE_DIR
     mkdir -p $GLANCE_IMAGE_DIR
@@ -185,19 +190,19 @@
 }
 
 # install_glanceclient() - Collect source and prepare
-function install_glanceclient() {
+function install_glanceclient {
     git_clone $GLANCECLIENT_REPO $GLANCECLIENT_DIR $GLANCECLIENT_BRANCH
     setup_develop $GLANCECLIENT_DIR
 }
 
 # install_glance() - Collect source and prepare
-function install_glance() {
+function install_glance {
     git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH
     setup_develop $GLANCE_DIR
 }
 
 # start_glance() - Start running processes, including screen
-function start_glance() {
+function start_glance {
     screen_it g-reg "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf"
     screen_it g-api "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf"
     echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..."
@@ -207,7 +212,7 @@
 }
 
 # stop_glance() - Stop running processes
-function stop_glance() {
+function stop_glance {
     # Kill the Glance screen windows
     screen_stop g-api
     screen_stop g-reg
diff --git a/lib/heat b/lib/heat
index f171cb4..d0c0302 100644
--- a/lib/heat
+++ b/lib/heat
@@ -47,14 +47,14 @@
 
 # cleanup_heat() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
-function cleanup_heat() {
+function cleanup_heat {
     sudo rm -rf $HEAT_AUTH_CACHE_DIR
     sudo rm -rf $HEAT_ENV_DIR
     sudo rm -rf $HEAT_TEMPLATES_DIR
 }
 
 # configure_heat() - Set config files, create data dirs, etc
-function configure_heat() {
+function configure_heat {
     setup_develop $HEAT_DIR
 
     if [[ ! -d $HEAT_CONF_DIR ]]; then
@@ -137,7 +137,7 @@
 }
 
 # init_heat() - Initialize database
-function init_heat() {
+function init_heat {
 
     # (re)create heat database
     recreate_database heat utf8
@@ -147,26 +147,26 @@
 }
 
 # create_heat_cache_dir() - Part of the init_heat() process
-function create_heat_cache_dir() {
+function create_heat_cache_dir {
     # Create cache dirs
     sudo mkdir -p $HEAT_AUTH_CACHE_DIR
     sudo chown $STACK_USER $HEAT_AUTH_CACHE_DIR
 }
 
 # install_heatclient() - Collect source and prepare
-function install_heatclient() {
+function install_heatclient {
     git_clone $HEATCLIENT_REPO $HEATCLIENT_DIR $HEATCLIENT_BRANCH
     setup_develop $HEATCLIENT_DIR
     sudo install -D -m 0644 -o $STACK_USER {$HEATCLIENT_DIR/tools/,/etc/bash_completion.d/}heat.bash_completion
 }
 
 # install_heat() - Collect source and prepare
-function install_heat() {
+function install_heat {
     git_clone $HEAT_REPO $HEAT_DIR $HEAT_BRANCH
 }
 
 # start_heat() - Start running processes, including screen
-function start_heat() {
+function start_heat {
     screen_it h-eng "cd $HEAT_DIR; bin/heat-engine --config-file=$HEAT_CONF"
     screen_it h-api "cd $HEAT_DIR; bin/heat-api --config-file=$HEAT_CONF"
     screen_it h-api-cfn "cd $HEAT_DIR; bin/heat-api-cfn --config-file=$HEAT_CONF"
@@ -174,7 +174,7 @@
 }
 
 # stop_heat() - Stop running processes
-function stop_heat() {
+function stop_heat {
     # Kill the screen windows
     for serv in h-eng h-api h-api-cfn h-api-cw; do
         screen_stop $serv
@@ -186,8 +186,7 @@
     local elements=$2
     local arch=$3
     local output=$TOP_DIR/files/$4
-    if [[ -f "$output.qcow2" ]];
-    then
+    if [[ -f "$output.qcow2" ]]; then
         echo "Image file already exists: $output_file"
     else
         ELEMENTS_PATH=$elements_path disk-image-create \
@@ -197,6 +196,29 @@
     upload_image "http://localhost/$output.qcow2" $TOKEN
 }
 
+# create_heat_accounts() - Set up common required heat accounts
+# Note this is in addition to what is in files/keystone_data.sh
+function create_heat_accounts {
+    # Note we have to pass token/endpoint here because the current endpoint and
+    # version negotiation in OSC means just --os-identity-api-version=3 won't work
+    KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3"
+    D_ID=$(openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \
+        --os-identity-api-version=3 domain create heat \
+        --description "Owns users and projects created by heat" \
+        | grep ' id ' | get_field 2)
+    iniset $HEAT_CONF DEFAULT stack_user_domain ${D_ID}
+
+    openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \
+        --os-identity-api-version=3 user create --password $SERVICE_PASSWORD \
+        --domain $D_ID heat_domain_admin \
+        --description "Manages users and projects created by heat"
+    openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \
+        --os-identity-api-version=3 role add \
+        --user heat_domain_admin --domain ${D_ID} admin
+    iniset $HEAT_CONF DEFAULT stack_domain_admin heat_domain_admin
+    iniset $HEAT_CONF DEFAULT stack_domain_admin_password $SERVICE_PASSWORD
+}
+
 # Restore xtrace
 $XTRACE
 
diff --git a/lib/horizon b/lib/horizon
index c64d850..27c2d26 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -39,7 +39,7 @@
 # ---------
 
 # utility method of setting python option
-function _horizon_config_set() {
+function _horizon_config_set {
     local file=$1
     local section=$2
     local option=$3
@@ -64,7 +64,7 @@
 
 # cleanup_horizon() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
-function cleanup_horizon() {
+function cleanup_horizon {
     if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then
         # If ``/usr/bin/node`` points into ``$DEST``
         # we installed it via ``install_nodejs``
@@ -75,15 +75,12 @@
 }
 
 # configure_horizon() - Set config files, create data dirs, etc
-function configure_horizon() {
+function configure_horizon {
     setup_develop $HORIZON_DIR
 }
 
 # init_horizon() - Initialize databases, etc.
-function init_horizon() {
-    # Remove stale session database.
-    rm -f $HORIZON_DIR/openstack_dashboard/local/dashboard_openstack.sqlite3
-
+function init_horizon {
     # ``local_settings.py`` is used to override horizon default settings.
     local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py
     cp $HORIZON_SETTINGS $local_settings
@@ -106,12 +103,6 @@
         _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_vpn True
     fi
 
-    # Initialize the horizon database (it stores sessions and notices shown to
-    # users).  The user system is external (keystone).
-    cd $HORIZON_DIR
-    python manage.py syncdb --noinput
-    cd $TOP_DIR
-
     # Create an empty directory that apache uses as docroot
     sudo mkdir -p $HORIZON_DIR/.blackhole
 
@@ -152,7 +143,7 @@
 }
 
 # install_horizon() - Collect source and prepare
-function install_horizon() {
+function install_horizon {
     # Apache installation, because we mark it NOPRIME
     install_apache_wsgi
 
@@ -160,13 +151,13 @@
 }
 
 # start_horizon() - Start running processes, including screen
-function start_horizon() {
+function start_horizon {
     restart_apache_server
     screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/$APACHE_NAME/horizon_error.log"
 }
 
 # stop_horizon() - Stop running processes (non-screen)
-function stop_horizon() {
+function stop_horizon {
     stop_apache_server
 }
 
diff --git a/lib/infra b/lib/infra
index 0dcf0ad..7f70ff2 100644
--- a/lib/infra
+++ b/lib/infra
@@ -27,7 +27,7 @@
 # ------------
 
 # unfubar_setuptools() - Unbreak the giant mess that is the current state of setuptools
-function unfubar_setuptools() {
+function unfubar_setuptools {
     # this is a giant game of who's on first, but it does consistently work
     # there is hope that upstream python packaging fixes this in the future
     echo_summary "Unbreaking setuptools"
@@ -40,7 +40,7 @@
 
 
 # install_infra() - Collect source and prepare
-function install_infra() {
+function install_infra {
     # bring down global requirements
     git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH
 
diff --git a/lib/ironic b/lib/ironic
index b8838f5..4e5edc9 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -49,26 +49,33 @@
 # Functions
 # ---------
 
+# Test if any Ironic services are enabled
+# is_ironic_enabled
+function is_ironic_enabled {
+    [[ ,${ENABLED_SERVICES} =~ ,"ir-" ]] && return 0
+    return 1
+}
+
 # install_ironic() - Collect source and prepare
-function install_ironic() {
+function install_ironic {
     git_clone $IRONIC_REPO $IRONIC_DIR $IRONIC_BRANCH
     setup_develop $IRONIC_DIR
 }
 
 # install_ironicclient() - Collect sources and prepare
-function install_ironicclient() {
+function install_ironicclient {
     git_clone $IRONICCLIENT_REPO $IRONICCLIENT_DIR $IRONICCLIENT_BRANCH
     setup_develop $IRONICCLIENT_DIR
 }
 
 # cleanup_ironic() - Remove residual data files, anything left over from previous
 # runs that would need to clean up.
-function cleanup_ironic() {
+function cleanup_ironic {
     sudo rm -rf $IRONIC_AUTH_CACHE_DIR
 }
 
 # configure_ironic() - Set config files, create data dirs, etc
-function configure_ironic() {
+function configure_ironic {
     if [[ ! -d $IRONIC_CONF_DIR ]]; then
         sudo mkdir -p $IRONIC_CONF_DIR
     fi
@@ -94,7 +101,7 @@
 
 # configure_ironic_api() - Is used by configure_ironic(). Performs
 # API specific configuration.
-function configure_ironic_api() {
+function configure_ironic_api {
     iniset $IRONIC_CONF_FILE DEFAULT auth_strategy keystone
     iniset $IRONIC_CONF_FILE DEFAULT policy_file $IRONIC_POLICY_JSON
     iniset $IRONIC_CONF_FILE keystone_authtoken auth_host $KEYSTONE_AUTH_HOST
@@ -105,11 +112,6 @@
     iniset $IRONIC_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
     iniset $IRONIC_CONF_FILE keystone_authtoken admin_user ironic
     iniset $IRONIC_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD
-    if is_service_enabled qpid; then
-        iniset $IRONIC_CONF_FILE DEFAULT notifier_strategy qpid
-    elif [ -n "$RABBIT_HOST" ] &&  [ -n "$RABBIT_PASSWORD" ]; then
-        iniset $IRONIC_CONF_FILE DEFAULT notifier_strategy rabbit
-    fi
     iniset_rpc_backend ironic $IRONIC_CONF_FILE DEFAULT
     iniset $IRONIC_CONF_FILE keystone_authtoken signing_dir $IRONIC_AUTH_CACHE_DIR/api
 
@@ -118,7 +120,7 @@
 
 # configure_ironic_conductor() - Is used by configure_ironic().
 # Sets conductor specific settings.
-function configure_ironic_conductor() {
+function configure_ironic_conductor {
     cp $IRONIC_DIR/etc/ironic/rootwrap.conf $IRONIC_ROOTWRAP_CONF
     cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_CONF_DIR
 
@@ -126,7 +128,7 @@
 }
 
 # create_ironic_cache_dir() - Part of the init_ironic() process
-function create_ironic_cache_dir() {
+function create_ironic_cache_dir {
     # Create cache dir
     sudo mkdir -p $IRONIC_AUTH_CACHE_DIR/api
     sudo chown $STACK_USER $IRONIC_AUTH_CACHE_DIR/api
@@ -141,32 +143,32 @@
 # Tenant               User       Roles
 # ------------------------------------------------------------------
 # service              ironic     admin        # if enabled
-create_ironic_accounts() {
+function create_ironic_accounts {
 
-    SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
+    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
 
     # Ironic
     if [[ "$ENABLED_SERVICES" =~ "ir-api" ]]; then
-        IRONIC_USER=$(keystone user-create \
-            --name=ironic \
-            --pass="$SERVICE_PASSWORD" \
-            --tenant-id $SERVICE_TENANT \
-            --email=ironic@example.com \
+        IRONIC_USER=$(openstack user create \
+            ironic \
+            --password "$SERVICE_PASSWORD" \
+            --project $SERVICE_TENANT \
+            --email ironic@example.com \
             | grep " id " | get_field 2)
-        keystone user-role-add \
-            --tenant-id $SERVICE_TENANT \
-            --user_id $IRONIC_USER \
-            --role_id $ADMIN_ROLE
+        openstack role add \
+            $ADMIN_ROLE \
+            --project $SERVICE_TENANT \
+            --user $IRONIC_USER
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-            IRONIC_SERVICE=$(keystone service-create \
-                --name=ironic \
+            IRONIC_SERVICE=$(openstack service create \
+                ironic \
                 --type=baremetal \
                 --description="Ironic baremetal provisioning service" \
                 | grep " id " | get_field 2)
-            keystone endpoint-create \
+            openstack endpoint create \
+                $IRONIC_SERVICE \
                 --region RegionOne \
-                --service_id $IRONIC_SERVICE \
                 --publicurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" \
                 --adminurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" \
                 --internalurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT"
@@ -176,7 +178,7 @@
 
 
 # init_ironic() - Initialize databases, etc.
-function init_ironic() {
+function init_ironic {
     # (Re)create  ironic database
     recreate_database ironic utf8
 
@@ -184,13 +186,10 @@
     $IRONIC_BIN_DIR/ironic-dbsync
 
     create_ironic_cache_dir
-
-    # Create keystone artifacts for Ironic.
-    create_ironic_accounts
 }
 
 # start_ironic() - Start running processes, including screen
-function start_ironic() {
+function start_ironic {
     # Start Ironic API server, if enabled.
     if is_service_enabled ir-api; then
         start_ironic_api
@@ -204,7 +203,7 @@
 
 # start_ironic_api() - Used by start_ironic().
 # Starts Ironic API server.
-function start_ironic_api() {
+function start_ironic_api {
     screen_it ir-api "cd $IRONIC_DIR; $IRONIC_BIN_DIR/ironic-api --config-file=$IRONIC_CONF_FILE"
     echo "Waiting for ir-api ($IRONIC_HOSTPORT) to start..."
     if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$IRONIC_HOSTPORT; do sleep 1; done"; then
@@ -214,13 +213,13 @@
 
 # start_ironic_conductor() - Used by start_ironic().
 # Starts Ironic conductor.
-function start_ironic_conductor() {
+function start_ironic_conductor {
     screen_it ir-cond "cd $IRONIC_DIR; $IRONIC_BIN_DIR/ironic-conductor --config-file=$IRONIC_CONF_FILE"
     # TODO(romcheg): Find a way to check whether the conductor has started.
 }
 
 # stop_ironic() - Stop running processes
-function stop_ironic() {
+function stop_ironic {
     # Kill the Ironic screen windows
     screen -S $SCREEN_NAME -p ir-api -X kill
     screen -S $SCREEN_NAME -p ir-cond -X kill
diff --git a/lib/keystone b/lib/keystone
index 4f7f68b..c6856c9 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -70,6 +70,8 @@
 KEYSTONE_SERVICE_PORT_INT=${KEYSTONE_SERVICE_PORT_INT:-5001}
 KEYSTONE_SERVICE_PROTOCOL=${KEYSTONE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
 
+# Bind hosts
+KEYSTONE_ADMIN_BIND_HOST=${KEYSTONE_ADMIN_BIND_HOST:-$KEYSTONE_SERVICE_HOST}
 # Set the tenant for service accounts in Keystone
 SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service}
 
@@ -90,7 +92,7 @@
 # ---------
 # cleanup_keystone() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
-function cleanup_keystone() {
+function cleanup_keystone {
     # kill instances (nova)
     # delete image files (glance)
     # This function intentionally left blank
@@ -98,14 +100,14 @@
 }
 
 # _cleanup_keystone_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file
-function _cleanup_keystone_apache_wsgi() {
+function _cleanup_keystone_apache_wsgi {
     sudo rm -f $KEYSTONE_WSGI_DIR/*.wsgi
     disable_apache_site keystone
     sudo rm -f /etc/$APACHE_NAME/$APACHE_CONF_DIR/keystone
 }
 
 # _config_keystone_apache_wsgi() - Set WSGI config files of Keystone
-function _config_keystone_apache_wsgi() {
+function _config_keystone_apache_wsgi {
     sudo mkdir -p $KEYSTONE_WSGI_DIR
 
     # copy proxy vhost and wsgi file
@@ -125,7 +127,7 @@
 }
 
 # configure_keystone() - Set config files, create data dirs, etc
-function configure_keystone() {
+function configure_keystone {
     if [[ ! -d $KEYSTONE_CONF_DIR ]]; then
         sudo mkdir -p $KEYSTONE_CONF_DIR
     fi
@@ -178,7 +180,7 @@
     # Set the URL advertised in the ``versions`` structure returned by the '/' route
     iniset $KEYSTONE_CONF DEFAULT public_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(public_port)s/"
     iniset $KEYSTONE_CONF DEFAULT admin_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(admin_port)s/"
-    iniset $KEYSTONE_CONF DEFAULT admin_bind_host "$KEYSTONE_SERVICE_HOST"
+    iniset $KEYSTONE_CONF DEFAULT admin_bind_host "$KEYSTONE_ADMIN_BIND_HOST"
 
     # Register SSL certificates if provided
     if is_ssl_enabled_service key; then
@@ -201,7 +203,7 @@
         iniset $KEYSTONE_CONF token provider keystone.token.providers.uuid.Provider
     fi
 
-    iniset $KEYSTONE_CONF sql connection `database_connection_url keystone`
+    iniset $KEYSTONE_CONF database connection `database_connection_url keystone`
     iniset $KEYSTONE_CONF ec2 driver "keystone.contrib.ec2.backends.sql.Ec2"
 
     if [[ "$KEYSTONE_TOKEN_BACKEND" = "sql" ]]; then
@@ -272,63 +274,72 @@
 # invisible_to_admin   demo       Member
 
 # Migrated from keystone_data.sh
-create_keystone_accounts() {
+function create_keystone_accounts {
 
     # admin
-    ADMIN_TENANT=$(keystone tenant-create \
-        --name admin \
+    ADMIN_TENANT=$(openstack project create \
+        admin \
         | grep " id " | get_field 2)
-    ADMIN_USER=$(keystone user-create \
-        --name admin \
-        --pass "$ADMIN_PASSWORD" \
+    ADMIN_USER=$(openstack user create \
+        admin \
+        --project "$ADMIN_TENANT" \
         --email admin@example.com \
+        --password "$ADMIN_PASSWORD" \
         | grep " id " | get_field 2)
-    ADMIN_ROLE=$(keystone role-create \
-        --name admin \
+    ADMIN_ROLE=$(openstack role create \
+        admin \
         | grep " id " | get_field 2)
-    keystone user-role-add \
-        --user-id $ADMIN_USER \
-        --role-id $ADMIN_ROLE \
-        --tenant-id $ADMIN_TENANT
+    openstack role add \
+        $ADMIN_ROLE \
+        --project $ADMIN_TENANT \
+        --user $ADMIN_USER
 
     # service
-    SERVICE_TENANT=$(keystone tenant-create \
-        --name $SERVICE_TENANT_NAME \
+    SERVICE_TENANT=$(openstack project create \
+        $SERVICE_TENANT_NAME \
         | grep " id " | get_field 2)
 
     # The Member role is used by Horizon and Swift so we need to keep it:
-    MEMBER_ROLE=$(keystone role-create --name=Member | grep " id " | get_field 2)
+    MEMBER_ROLE=$(openstack role create \
+        Member \
+        | grep " id " | get_field 2)
     # ANOTHER_ROLE demonstrates that an arbitrary role may be created and used
     # TODO(sleepsonthefloor): show how this can be used for rbac in the future!
-    ANOTHER_ROLE=$(keystone role-create --name=anotherrole | grep " id " | get_field 2)
+    ANOTHER_ROLE=$(openstack role create \
+        anotherrole \
+        | grep " id " | get_field 2)
 
     # invisible tenant - admin can't see this one
-    INVIS_TENANT=$(keystone tenant-create --name=invisible_to_admin | grep " id " | get_field 2)
+    INVIS_TENANT=$(openstack project create \
+        invisible_to_admin \
+        | grep " id " | get_field 2)
 
     # demo
-    DEMO_TENANT=$(keystone tenant-create \
-        --name=demo \
+    DEMO_TENANT=$(openstack project create \
+        demo \
         | grep " id " | get_field 2)
-    DEMO_USER=$(keystone user-create \
-        --name demo \
-        --pass "$ADMIN_PASSWORD" \
+    DEMO_USER=$(openstack user create \
+        demo \
+        --project $DEMO_TENANT \
         --email demo@example.com \
+        --password "$ADMIN_PASSWORD" \
         | grep " id " | get_field 2)
-    keystone user-role-add --user-id $DEMO_USER --role-id $MEMBER_ROLE --tenant-id $DEMO_TENANT
-    keystone user-role-add --user-id $ADMIN_USER --role-id $ADMIN_ROLE --tenant-id $DEMO_TENANT
-    keystone user-role-add --user-id $DEMO_USER --role-id $ANOTHER_ROLE --tenant-id $DEMO_TENANT
-    keystone user-role-add --user-id $DEMO_USER --role-id $MEMBER_ROLE --tenant-id $INVIS_TENANT
+
+    openstack role add --project $DEMO_TENANT --user $DEMO_USER $MEMBER_ROLE
+    openstack role add --project $DEMO_TENANT --user $ADMIN_USER $ADMIN_ROLE
+    openstack role add --project $DEMO_TENANT --user $DEMO_USER $ANOTHER_ROLE
+    openstack role add --project $INVIS_TENANT --user $DEMO_USER $MEMBER_ROLE
 
     # Keystone
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-        KEYSTONE_SERVICE=$(keystone service-create \
-            --name keystone \
+        KEYSTONE_SERVICE=$(openstack service create \
+            keystone \
             --type identity \
             --description "Keystone Identity Service" \
             | grep " id " | get_field 2)
-        keystone endpoint-create \
+        openstack endpoint create \
+            $KEYSTONE_SERVICE \
             --region RegionOne \
-            --service_id $KEYSTONE_SERVICE \
             --publicurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$IDENTITY_API_VERSION" \
             --adminurl "$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v$IDENTITY_API_VERSION" \
             --internalurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$IDENTITY_API_VERSION"
@@ -337,14 +348,14 @@
 
 # Configure the API version for the OpenStack projects.
 # configure_API_version conf_file version
-function configure_API_version() {
+function configure_API_version {
     local conf_file=$1
     local api_version=$2
     iniset $conf_file keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$api_version
 }
 
 # init_keystone() - Initialize databases, etc.
-function init_keystone() {
+function init_keystone {
     if is_service_enabled ldap; then
         init_ldap
     fi
@@ -368,14 +379,14 @@
 }
 
 # install_keystoneclient() - Collect source and prepare
-function install_keystoneclient() {
+function install_keystoneclient {
     git_clone $KEYSTONECLIENT_REPO $KEYSTONECLIENT_DIR $KEYSTONECLIENT_BRANCH
     setup_develop $KEYSTONECLIENT_DIR
     sudo install -D -m 0644 -o $STACK_USER {$KEYSTONECLIENT_DIR/tools/,/etc/bash_completion.d/}keystone.bash_completion
 }
 
 # install_keystone() - Collect source and prepare
-function install_keystone() {
+function install_keystone {
     # only install ldap if the service has been enabled
     if is_service_enabled ldap; then
         install_ldap
@@ -399,7 +410,7 @@
 }
 
 # start_keystone() - Start running processes, including screen
-function start_keystone() {
+function start_keystone {
     # Get right service port for testing
     local service_port=$KEYSTONE_SERVICE_PORT
     if is_service_enabled tls-proxy; then
@@ -415,7 +426,7 @@
     fi
 
     echo "Waiting for keystone to start..."
-    if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then
+    if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -k -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then
         die $LINENO "keystone did not start"
     fi
 
@@ -427,7 +438,7 @@
 }
 
 # stop_keystone() - Stop running processes
-function stop_keystone() {
+function stop_keystone {
     # Kill the Keystone screen window
     screen_stop key
 }
diff --git a/lib/ldap b/lib/ldap
index e4bd416..51d0251 100644
--- a/lib/ldap
+++ b/lib/ldap
@@ -49,7 +49,7 @@
 
 # Perform common variable substitutions on the data files
 # _ldap_varsubst file
-function _ldap_varsubst() {
+function _ldap_varsubst {
     local infile=$1
     sed -e "
         s|\${LDAP_OLCDB_NUMBER}|$LDAP_OLCDB_NUMBER|
@@ -62,7 +62,7 @@
 }
 
 # clean_ldap() - Remove ldap server
-function cleanup_ldap() {
+function cleanup_ldap {
     uninstall_package $(get_packages ldap)
     if is_ubuntu; then
         uninstall_package slapd ldap-utils libslp1
@@ -76,7 +76,7 @@
 
 # init_ldap
 # init_ldap() - Initialize databases, etc.
-function init_ldap() {
+function init_ldap {
     local keystone_ldif
 
     TMP_LDAP_DIR=$(mktemp -d -t ldap.$$.XXXXXXXXXX)
@@ -106,7 +106,7 @@
 
 # install_ldap
 # install_ldap() - Collect source and prepare
-function install_ldap() {
+function install_ldap {
     echo "Installing LDAP inside function"
     echo "os_VENDOR is $os_VENDOR"
 
@@ -143,17 +143,17 @@
 }
 
 # start_ldap() - Start LDAP
-function start_ldap() {
+function start_ldap {
     sudo service $LDAP_SERVICE_NAME restart
 }
 
 # stop_ldap() - Stop LDAP
-function stop_ldap() {
+function stop_ldap {
     sudo service $LDAP_SERVICE_NAME stop
 }
 
 # clear_ldap_state() - Clear LDAP State
-function clear_ldap_state() {
+function clear_ldap_state {
     ldapdelete -x -w $LDAP_PASSWORD -D "$LDAP_MANAGER_DN" -H $LDAP_URL -r "$LDAP_BASE_DN"
 }
 
diff --git a/lib/marconi b/lib/marconi
index 1eaebbd..29ae386 100644
--- a/lib/marconi
+++ b/lib/marconi
@@ -2,7 +2,8 @@
 # Install and start **Marconi** service
 
 # To enable a minimal set of Marconi services, add the following to localrc:
-#   enable_service marconi-server
+#
+#     enable_service marconi-server
 #
 # Dependencies:
 # - functions
@@ -51,6 +52,11 @@
 MARCONICLIENT_REPO=${MARCONICLIENT_REPO:-${GIT_BASE}/openstack/python-marconiclient.git}
 MARCONICLIENT_BRANCH=${MARCONICLIENT_BRANCH:-master}
 
+# Set Marconi Connection Info
+MARCONI_SERVICE_HOST=${MARCONI_SERVICE_HOST:-$SERVICE_HOST}
+MARCONI_SERVICE_PORT=${MARCONI_SERVICE_PORT:-8888}
+MARCONI_SERVICE_PROTOCOL=${MARCONI_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
+
 # Tell Tempest this project is present
 TEMPEST_SERVICES+=,marconi
 
@@ -58,19 +64,28 @@
 # Functions
 # ---------
 
+# Test if any Marconi services are enabled
+# is_marconi_enabled
+function is_marconi_enabled {
+    [[ ,${ENABLED_SERVICES} =~ ,"marconi-" ]] && return 0
+    return 1
+}
+
 # cleanup_marconi() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
-function cleanup_marconi() {
-    mongo marconi --eval "db.dropDatabase();"
+function cleanup_marconi {
+    if ! timeout $SERVICE_TIMEOUT sh -c "while ! mongo marconi --eval 'db.dropDatabase();'; do sleep 1; done"; then
+        die $LINENO "Mongo DB did not start"
+    fi
 }
 
 # configure_marconiclient() - Set config files, create data dirs, etc
-function configure_marconiclient() {
+function configure_marconiclient {
     setup_develop $MARCONICLIENT_DIR
 }
 
 # configure_marconi() - Set config files, create data dirs, etc
-function configure_marconi() {
+function configure_marconi {
     setup_develop $MARCONI_DIR
 
     [ ! -d $MARCONI_CONF_DIR ] && sudo mkdir -m 755 -p $MARCONI_CONF_DIR
@@ -80,11 +95,8 @@
     sudo chown $USER $MARCONI_API_LOG_DIR
 
     iniset $MARCONI_CONF DEFAULT verbose True
-    iniset $MARCONI_CONF 'drivers:transport:wsgi' bind '0.0.0.0'
-
-    # Install the policy file for the API server
-    cp $MARCONI_DIR/etc/marconi/policy.json $MARCONI_CONF_DIR
-    iniset $MARCONI_CONF DEFAULT policy_file $MARCONI_CONF_DIR/policy.json
+    iniset $MARCONI_CONF DEFAULT use_syslog $SYSLOG
+    iniset $MARCONI_CONF 'drivers:transport:wsgi' bind $MARCONI_SERVICE_HOST
 
     iniset $MARCONI_CONF keystone_authtoken auth_protocol http
     iniset $MARCONI_CONF keystone_authtoken admin_user marconi
@@ -99,16 +111,23 @@
     fi
 }
 
-function configure_mongodb() {
+function configure_mongodb {
     # Set nssize to 2GB. This increases the number of namespaces supported
     # # per database.
-    sudo sed -i '/--nssize/!s/OPTIONS=\"/OPTIONS=\"--nssize 2047 /' /etc/sysconfig/mongod
-
-    restart_service mongod
+    if is_ubuntu; then
+        sudo sed -i -e "
+            s|[^ \t]*#[ \t]*\(nssize[ \t]*=.*\$\)|\1|
+            s|^\(nssize[ \t]*=[ \t]*\).*\$|\1 2047|
+        " /etc/mongodb.conf
+        restart_service mongodb
+    elif is_fedora; then
+        sudo sed -i '/--nssize/!s/OPTIONS=\"/OPTIONS=\"--nssize 2047 /' /etc/sysconfig/mongod
+        restart_service mongod
+    fi
 }
 
 # init_marconi() - Initialize etc.
-function init_marconi() {
+function init_marconi {
     # Create cache dir
     sudo mkdir -p $MARCONI_AUTH_CACHE_DIR
     sudo chown $STACK_USER $MARCONI_AUTH_CACHE_DIR
@@ -116,53 +135,61 @@
 }
 
 # install_marconi() - Collect source and prepare
-function install_marconi() {
+function install_marconi {
     git_clone $MARCONI_REPO $MARCONI_DIR $MARCONI_BRANCH
     setup_develop $MARCONI_DIR
 }
 
 # install_marconiclient() - Collect source and prepare
-function install_marconiclient() {
+function install_marconiclient {
     git_clone $MARCONICLIENT_REPO $MARCONICLIENT_DIR $MARCONICLIENT_BRANCH
     setup_develop $MARCONICLIENT_DIR
 }
 
 # start_marconi() - Start running processes, including screen
-function start_marconi() {
+function start_marconi {
     screen_it marconi-server "marconi-server --config-file $MARCONI_CONF"
+    echo "Waiting for Marconi to start..."
+    if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- $MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT/v1/health; do sleep 1; done"; then
+        die $LINENO "Marconi did not start"
+    fi
 }
 
 # stop_marconi() - Stop running processes
-function stop_marconi() {
+function stop_marconi {
     # Kill the marconi screen windows
     for serv in marconi-server; do
         screen -S $SCREEN_NAME -p $serv -X kill
     done
 }
 
-function create_marconi_accounts() {
-    SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
+function create_marconi_accounts {
+    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
 
-    MARCONI_USER=$(get_id keystone user-create --name=marconi \
-                                                --pass="$SERVICE_PASSWORD" \
-                                                --tenant-id $SERVICE_TENANT \
-                                                --email=marconi@example.com)
-    keystone user-role-add --tenant-id $SERVICE_TENANT \
-                            --user-id $MARCONI_USER \
-                            --role-id $ADMIN_ROLE
+    MARCONI_USER=$(openstack user create \
+        marconi \
+        --password "$SERVICE_PASSWORD" \
+        --project $SERVICE_TENANT \
+        --email marconi@example.com \
+        | grep " id " | get_field 2)
+    openstack role add \
+        $ADMIN_ROLE \
+        --project $SERVICE_TENANT \
+        --user $MARCONI_USER
+
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-        MARCONI_SERVICE=$(keystone service-create \
-            --name=marconi \
+        MARCONI_SERVICE=$(openstack service create \
+            marconi \
             --type=queuing \
             --description="Marconi Service" \
             | grep " id " | get_field 2)
-        keystone endpoint-create \
+        openstack endpoint create \
+            $MARCONI_SERVICE \
             --region RegionOne \
-            --service_id $MARCONI_SERVICE \
-            --publicurl "http://$SERVICE_HOST:8888" \
-            --adminurl "http://$SERVICE_HOST:8888" \
-            --internalurl "http://$SERVICE_HOST:8888"
+            --publicurl "$MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT" \
+            --adminurl "$MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT" \
+            --internalurl "$MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT"
     fi
 
 }
diff --git a/lib/neutron b/lib/neutron
index 81db2a7..7ca66a5 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -59,10 +59,6 @@
 # LinuxBridge plugin, please see the top level README file under the
 # Neutron section.
 
-# Save trace setting
-XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
 
 # Neutron Network Configuration
 # -----------------------------
@@ -127,82 +123,81 @@
 # See _configure_neutron_common() for details about setting it up
 declare -a Q_PLUGIN_EXTRA_CONF_FILES
 
-if is_service_enabled neutron; then
-    Q_RR_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf
-    if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then
-        Q_RR_COMMAND="sudo"
-    else
-        NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron)
-        Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE"
-    fi
 
-    # Provider Network Configurations
-    # --------------------------------
-
-    # The following variables control the Neutron openvswitch and
-    # linuxbridge plugins' allocation of tenant networks and
-    # availability of provider networks. If these are not configured
-    # in ``localrc``, tenant networks will be local to the host (with no
-    # remote connectivity), and no physical resources will be
-    # available for the allocation of provider networks.
-
-    # To use GRE tunnels for tenant networks, set to True in
-    # ``localrc``. GRE tunnels are only supported by the openvswitch
-    # plugin, and currently only on Ubuntu.
-    ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-False}
-
-    # If using GRE tunnels for tenant networks, specify the range of
-    # tunnel IDs from which tenant networks are allocated. Can be
-    # overriden in ``localrc`` in necesssary.
-    TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGE:-1:1000}
-
-    # To use VLANs for tenant networks, set to True in localrc. VLANs
-    # are supported by the openvswitch and linuxbridge plugins, each
-    # requiring additional configuration described below.
-    ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False}
-
-    # If using VLANs for tenant networks, set in ``localrc`` to specify
-    # the range of VLAN VIDs from which tenant networks are
-    # allocated. An external network switch must be configured to
-    # trunk these VLANs between hosts for multi-host connectivity.
-    #
-    # Example: ``TENANT_VLAN_RANGE=1000:1999``
-    TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-}
-
-    # If using VLANs for tenant networks, or if using flat or VLAN
-    # provider networks, set in ``localrc`` to the name of the physical
-    # network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the
-    # openvswitch agent or ``LB_PHYSICAL_INTERFACE`` for the linuxbridge
-    # agent, as described below.
-    #
-    # Example: ``PHYSICAL_NETWORK=default``
-    PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-}
-
-    # With the openvswitch plugin, if using VLANs for tenant networks,
-    # or if using flat or VLAN provider networks, set in ``localrc`` to
-    # the name of the OVS bridge to use for the physical network. The
-    # bridge will be created if it does not already exist, but a
-    # physical interface must be manually added to the bridge as a
-    # port for external connectivity.
-    #
-    # Example: ``OVS_PHYSICAL_BRIDGE=br-eth1``
-    OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-}
-
-    # With the linuxbridge plugin, if using VLANs for tenant networks,
-    # or if using flat or VLAN provider networks, set in ``localrc`` to
-    # the name of the network interface to use for the physical
-    # network.
-    #
-    # Example: ``LB_PHYSICAL_INTERFACE=eth1``
-    LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-}
-
-    # With the openvswitch plugin, set to True in ``localrc`` to enable
-    # provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False.
-    #
-    # Example: ``OVS_ENABLE_TUNNELING=True``
-    OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS}
+Q_RR_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf
+if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then
+    Q_RR_COMMAND="sudo"
+else
+    NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron)
+    Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE"
 fi
 
+# Provider Network Configurations
+# --------------------------------
+
+# The following variables control the Neutron openvswitch and
+# linuxbridge plugins' allocation of tenant networks and
+# availability of provider networks. If these are not configured
+# in ``localrc``, tenant networks will be local to the host (with no
+# remote connectivity), and no physical resources will be
+# available for the allocation of provider networks.
+
+# To use GRE tunnels for tenant networks, set to True in
+# ``localrc``. GRE tunnels are only supported by the openvswitch
+# plugin, and currently only on Ubuntu.
+ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-False}
+
+# If using GRE tunnels for tenant networks, specify the range of
+# tunnel IDs from which tenant networks are allocated. Can be
+# overriden in ``localrc`` in necesssary.
+TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGE:-1:1000}
+
+# To use VLANs for tenant networks, set to True in localrc. VLANs
+# are supported by the openvswitch and linuxbridge plugins, each
+# requiring additional configuration described below.
+ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False}
+
+# If using VLANs for tenant networks, set in ``localrc`` to specify
+# the range of VLAN VIDs from which tenant networks are
+# allocated. An external network switch must be configured to
+# trunk these VLANs between hosts for multi-host connectivity.
+#
+# Example: ``TENANT_VLAN_RANGE=1000:1999``
+TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-}
+
+# If using VLANs for tenant networks, or if using flat or VLAN
+# provider networks, set in ``localrc`` to the name of the physical
+# network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the
+# openvswitch agent or ``LB_PHYSICAL_INTERFACE`` for the linuxbridge
+# agent, as described below.
+#
+# Example: ``PHYSICAL_NETWORK=default``
+PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-}
+
+# With the openvswitch plugin, if using VLANs for tenant networks,
+# or if using flat or VLAN provider networks, set in ``localrc`` to
+# the name of the OVS bridge to use for the physical network. The
+# bridge will be created if it does not already exist, but a
+# physical interface must be manually added to the bridge as a
+# port for external connectivity.
+#
+# Example: ``OVS_PHYSICAL_BRIDGE=br-eth1``
+OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-}
+
+# With the linuxbridge plugin, if using VLANs for tenant networks,
+# or if using flat or VLAN provider networks, set in ``localrc`` to
+# the name of the network interface to use for the physical
+# network.
+#
+# Example: ``LB_PHYSICAL_INTERFACE=eth1``
+LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-}
+
+# With the openvswitch plugin, set to True in ``localrc`` to enable
+# provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False.
+#
+# Example: ``OVS_ENABLE_TUNNELING=True``
+OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS}
+
 # Neutron plugin specific functions
 # ---------------------------------
 
@@ -241,12 +236,24 @@
 TEMPEST_SERVICES+=,neutron
 
 
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
 # Functions
 # ---------
 
+# Test if any Neutron services are enabled
+# is_neutron_enabled
+function is_neutron_enabled {
+    [[ ,${ENABLED_SERVICES} =~ ,"q-" ]] && return 0
+    return 1
+}
+
 # configure_neutron()
 # Set common config for all neutron server and agents.
-function configure_neutron() {
+function configure_neutron {
     _configure_neutron_common
     iniset_rpc_backend neutron $NEUTRON_CONF DEFAULT
 
@@ -282,7 +289,7 @@
     _configure_neutron_debug_command
 }
 
-function create_nova_conf_neutron() {
+function create_nova_conf_neutron {
     iniset $NOVA_CONF DEFAULT network_api_class "nova.network.neutronv2.api.API"
     iniset $NOVA_CONF DEFAULT neutron_admin_username "$Q_ADMIN_USERNAME"
     iniset $NOVA_CONF DEFAULT neutron_admin_password "$SERVICE_PASSWORD"
@@ -309,7 +316,7 @@
 }
 
 # create_neutron_cache_dir() - Part of the _neutron_setup_keystone() process
-function create_neutron_cache_dir() {
+function create_neutron_cache_dir {
     # Create cache dir
     sudo mkdir -p $NEUTRON_AUTH_CACHE_DIR
     sudo chown $STACK_USER $NEUTRON_AUTH_CACHE_DIR
@@ -323,31 +330,31 @@
 # service              neutron    admin        # if enabled
 
 # Migrated from keystone_data.sh
-function create_neutron_accounts() {
+function create_neutron_accounts {
 
-    SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
+    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
 
     if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then
-        NEUTRON_USER=$(keystone user-create \
-            --name=neutron \
-            --pass="$SERVICE_PASSWORD" \
-            --tenant-id $SERVICE_TENANT \
-            --email=neutron@example.com \
+        NEUTRON_USER=$(openstack user create \
+            neutron \
+            --password "$SERVICE_PASSWORD" \
+            --project $SERVICE_TENANT \
+            --email neutron@example.com \
             | grep " id " | get_field 2)
-        keystone user-role-add \
-            --tenant-id $SERVICE_TENANT \
-            --user-id $NEUTRON_USER \
-            --role-id $ADMIN_ROLE
+        openstack role add \
+            $ADMIN_ROLE \
+            --project $SERVICE_TENANT \
+            --user $NEUTRON_USER
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-            NEUTRON_SERVICE=$(keystone service-create \
-                --name=neutron \
+            NEUTRON_SERVICE=$(openstack service create \
+                neutron \
                 --type=network \
                 --description="Neutron Service" \
                 | grep " id " | get_field 2)
-            keystone endpoint-create \
+            openstack endpoint create \
+                $NEUTRON_SERVICE \
                 --region RegionOne \
-                --service_id $NEUTRON_SERVICE \
                 --publicurl "http://$SERVICE_HOST:9696/" \
                 --adminurl "http://$SERVICE_HOST:9696/" \
                 --internalurl "http://$SERVICE_HOST:9696/"
@@ -355,8 +362,8 @@
     fi
 }
 
-function create_neutron_initial_network() {
-    TENANT_ID=$(keystone tenant-list | grep " demo " | get_field 1)
+function create_neutron_initial_network {
+    TENANT_ID=$(openstack project list | grep " demo " | get_field 1)
     die_if_not_set $LINENO TENANT_ID "Failure retrieving TENANT_ID for demo"
 
     # Create a small network
@@ -422,27 +429,27 @@
 }
 
 # init_neutron() - Initialize databases, etc.
-function init_neutron() {
+function init_neutron {
     recreate_database $Q_DB_NAME utf8
     # Run Neutron db migrations
     $NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head
 }
 
 # install_neutron() - Collect source and prepare
-function install_neutron() {
+function install_neutron {
     git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH
     setup_develop $NEUTRON_DIR
 }
 
 # install_neutronclient() - Collect source and prepare
-function install_neutronclient() {
+function install_neutronclient {
     git_clone $NEUTRONCLIENT_REPO $NEUTRONCLIENT_DIR $NEUTRONCLIENT_BRANCH
     setup_develop $NEUTRONCLIENT_DIR
     sudo install -D -m 0644 -o $STACK_USER {$NEUTRONCLIENT_DIR/tools/,/etc/bash_completion.d/}neutron.bash_completion
 }
 
 # install_neutron_agent_packages() - Collect source and prepare
-function install_neutron_agent_packages() {
+function install_neutron_agent_packages {
     # install packages that are specific to plugin agent(s)
     if is_service_enabled q-agt q-dhcp q-l3; then
         neutron_plugin_install_agent_packages
@@ -454,7 +461,7 @@
 }
 
 # Start running processes, including screen
-function start_neutron_service_and_check() {
+function start_neutron_service_and_check {
     # build config-file options
     local cfg_file
     local CFG_FILE_OPTIONS="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE"
@@ -470,7 +477,7 @@
 }
 
 # Start running processes, including screen
-function start_neutron_agents() {
+function start_neutron_agents {
     # Start up the neutron agents if enabled
     screen_it q-agt "cd $NEUTRON_DIR && python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE"
     screen_it q-dhcp "cd $NEUTRON_DIR && python $AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE"
@@ -503,7 +510,7 @@
 }
 
 # stop_neutron() - Stop running processes (non-screen)
-function stop_neutron() {
+function stop_neutron {
     if is_service_enabled q-dhcp; then
         pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }')
         [ ! -z "$pid" ] && sudo kill -9 $pid
@@ -528,7 +535,7 @@
 
 # cleanup_neutron() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
-function cleanup_neutron() {
+function cleanup_neutron {
     if is_neutron_ovs_base_plugin; then
         neutron_ovs_base_cleanup
     fi
@@ -542,7 +549,7 @@
 # _configure_neutron_common()
 # Set common config for all neutron server and agents.
 # This MUST be called before other ``_configure_neutron_*`` functions.
-function _configure_neutron_common() {
+function _configure_neutron_common {
     # Put config files in ``NEUTRON_CONF_DIR`` for everyone to find
     if [[ ! -d $NEUTRON_CONF_DIR ]]; then
         sudo mkdir -p $NEUTRON_CONF_DIR
@@ -604,7 +611,7 @@
     _neutron_setup_rootwrap
 }
 
-function _configure_neutron_debug_command() {
+function _configure_neutron_debug_command {
     if [[ "$Q_USE_DEBUG_COMMAND" != "True" ]]; then
         return
     fi
@@ -621,7 +628,7 @@
     neutron_plugin_configure_debug_command
 }
 
-function _configure_neutron_dhcp_agent() {
+function _configure_neutron_dhcp_agent {
     AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent"
     Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini
 
@@ -645,7 +652,7 @@
     neutron_plugin_configure_dhcp_agent
 }
 
-function _configure_neutron_l3_agent() {
+function _configure_neutron_l3_agent {
     Q_L3_ENABLED=True
     # for l3-agent, only use per tenant router if we have namespaces
     Q_L3_ROUTER_PER_TENANT=$Q_USE_NAMESPACE
@@ -669,7 +676,7 @@
     neutron_plugin_configure_l3_agent
 }
 
-function _configure_neutron_metadata_agent() {
+function _configure_neutron_metadata_agent {
     AGENT_META_BINARY="$NEUTRON_BIN_DIR/neutron-metadata-agent"
     Q_META_CONF_FILE=$NEUTRON_CONF_DIR/metadata_agent.ini
 
@@ -684,30 +691,29 @@
 
 }
 
-function _configure_neutron_lbaas() {
+function _configure_neutron_lbaas {
     neutron_agent_lbaas_configure_common
     neutron_agent_lbaas_configure_agent
 }
 
-function _configure_neutron_metering() {
+function _configure_neutron_metering {
     neutron_agent_metering_configure_common
     neutron_agent_metering_configure_agent
 }
 
-function _configure_neutron_fwaas() {
+function _configure_neutron_fwaas {
     neutron_fwaas_configure_common
     neutron_fwaas_configure_driver
 }
 
-function _configure_neutron_vpn()
-{
+function _configure_neutron_vpn {
     neutron_vpn_install_agent_packages
     neutron_vpn_configure_common
 }
 
 # _configure_neutron_plugin_agent() - Set config files for neutron plugin agent
 # It is called when q-agt is enabled.
-function _configure_neutron_plugin_agent() {
+function _configure_neutron_plugin_agent {
     # Specify the default root helper prior to agent configuration to
     # ensure that an agent's configuration can override the default
     iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_COMMAND"
@@ -720,7 +726,7 @@
 
 # _configure_neutron_service() - Set config files for neutron service
 # It is called when q-svc is enabled.
-function _configure_neutron_service() {
+function _configure_neutron_service {
     Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini
     Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json
 
@@ -758,7 +764,7 @@
 #------------------
 
 # _neutron_service_plugin_class_add() - add service plugin class
-function _neutron_service_plugin_class_add() {
+function _neutron_service_plugin_class_add {
     local service_plugin_class=$1
     if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then
         Q_SERVICE_PLUGIN_CLASSES=$service_plugin_class
@@ -768,7 +774,7 @@
 }
 
 # _neutron_setup_rootwrap() - configure Neutron's rootwrap
-function _neutron_setup_rootwrap() {
+function _neutron_setup_rootwrap {
     if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then
         return
     fi
@@ -808,7 +814,7 @@
 }
 
 # Configures keystone integration for neutron service and agents
-function _neutron_setup_keystone() {
+function _neutron_setup_keystone {
     local conf_file=$1
     local section=$2
     local use_auth_url=$3
@@ -835,7 +841,7 @@
     fi
 }
 
-function _neutron_setup_interface_driver() {
+function _neutron_setup_interface_driver {
 
     # ovs_use_veth needs to be set before the plugin configuration
     # occurs to allow plugins to override the setting.
@@ -847,14 +853,14 @@
 # Functions for Neutron Exercises
 #--------------------------------
 
-function delete_probe() {
+function delete_probe {
     local from_net="$1"
     net_id=`_get_net_id $from_net`
     probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'`
     neutron-debug --os-tenant-name admin --os-username admin probe-delete $probe_id
 }
 
-function setup_neutron_debug() {
+function setup_neutron_debug {
     if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then
         public_net_id=`_get_net_id $PUBLIC_NETWORK_NAME`
         neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create --device-owner compute $public_net_id
@@ -863,23 +869,23 @@
     fi
 }
 
-function teardown_neutron_debug() {
+function teardown_neutron_debug {
     delete_probe $PUBLIC_NETWORK_NAME
     delete_probe $PRIVATE_NETWORK_NAME
 }
 
-function _get_net_id() {
+function _get_net_id {
     neutron --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD net-list | grep $1 | awk '{print $2}'
 }
 
-function _get_probe_cmd_prefix() {
+function _get_probe_cmd_prefix {
     local from_net="$1"
     net_id=`_get_net_id $from_net`
     probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}' | head -n 1`
     echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id"
 }
 
-function _ping_check_neutron() {
+function _ping_check_neutron {
     local from_net=$1
     local ip=$2
     local timeout_sec=$3
@@ -901,7 +907,7 @@
 }
 
 # ssh check
-function _ssh_check_neutron() {
+function _ssh_check_neutron {
     local from_net=$1
     local key_file=$2
     local ip=$3
@@ -927,39 +933,39 @@
     fi
 done
 
-function _neutron_third_party_do() {
+function _neutron_third_party_do {
     for third_party in ${NEUTRON_THIRD_PARTIES//,/ }; do
         ${1}_${third_party}
     done
 }
 
 # configure_neutron_third_party() - Set config files, create data dirs, etc
-function configure_neutron_third_party() {
+function configure_neutron_third_party {
     _neutron_third_party_do configure
 }
 
 # init_neutron_third_party() - Initialize databases, etc.
-function init_neutron_third_party() {
+function init_neutron_third_party {
     _neutron_third_party_do init
 }
 
 # install_neutron_third_party() - Collect source and prepare
-function install_neutron_third_party() {
+function install_neutron_third_party {
     _neutron_third_party_do install
 }
 
 # start_neutron_third_party() - Start running processes, including screen
-function start_neutron_third_party() {
+function start_neutron_third_party {
     _neutron_third_party_do start
 }
 
 # stop_neutron_third_party - Stop running processes (non-screen)
-function stop_neutron_third_party() {
+function stop_neutron_third_party {
     _neutron_third_party_do stop
 }
 
 # check_neutron_third_party_integration() - Check that third party integration is sane
-function check_neutron_third_party_integration() {
+function check_neutron_third_party_integration {
     _neutron_third_party_do check
 }
 
diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight
index 93ec497..4cb0da8 100644
--- a/lib/neutron_plugins/bigswitch_floodlight
+++ b/lib/neutron_plugins/bigswitch_floodlight
@@ -8,15 +8,15 @@
 source $TOP_DIR/lib/neutron_plugins/ovs_base
 source $TOP_DIR/lib/neutron_thirdparty/bigswitch_floodlight     # for third party service specific configuration values
 
-function neutron_plugin_create_nova_conf() {
+function neutron_plugin_create_nova_conf {
     :
 }
 
-function neutron_plugin_install_agent_packages() {
+function neutron_plugin_install_agent_packages {
     _neutron_ovs_base_install_agent_packages
 }
 
-function neutron_plugin_configure_common() {
+function neutron_plugin_configure_common {
     Q_PLUGIN_CONF_PATH=etc/neutron/plugins/bigswitch
     Q_PLUGIN_CONF_FILENAME=restproxy.ini
     Q_DB_NAME="restproxy_neutron"
@@ -25,35 +25,33 @@
     BS_FL_CONTROLLER_TIMEOUT=${BS_FL_CONTROLLER_TIMEOUT:-10}
 }
 
-function neutron_plugin_configure_debug_command() {
+function neutron_plugin_configure_debug_command {
     _neutron_ovs_base_configure_debug_command
 }
 
-function neutron_plugin_configure_dhcp_agent() {
+function neutron_plugin_configure_dhcp_agent {
     :
 }
 
-function neutron_plugin_configure_l3_agent() {
+function neutron_plugin_configure_l3_agent {
     _neutron_ovs_base_configure_l3_agent
 }
 
-function neutron_plugin_configure_plugin_agent() {
+function neutron_plugin_configure_plugin_agent {
     :
 }
 
-function neutron_plugin_configure_service() {
+function neutron_plugin_configure_service {
     iniset /$Q_PLUGIN_CONF_FILE restproxy servers $BS_FL_CONTROLLERS_PORT
     iniset /$Q_PLUGIN_CONF_FILE restproxy servertimeout $BS_FL_CONTROLLER_TIMEOUT
-    if [ "$BS_FL_VIF_DRIVER" = "ivs" ]
-    then
+    if [ "$BS_FL_VIF_DRIVER" = "ivs" ]; then
         iniset /$Q_PLUGIN_CONF_FILE nova vif_type ivs
     fi
 }
 
-function neutron_plugin_setup_interface_driver() {
+function neutron_plugin_setup_interface_driver {
     local conf_file=$1
-    if [ "$BS_FL_VIF_DRIVER" = "ivs" ]
-    then
+    if [ "$BS_FL_VIF_DRIVER" = "ivs" ]; then
         iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.IVSInterfaceDriver
     else
         iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
@@ -61,12 +59,12 @@
 }
 
 
-function has_neutron_plugin_security_group() {
+function has_neutron_plugin_security_group {
     # 1 means False here
     return 1
 }
 
-function neutron_plugin_check_adv_test_requirements() {
+function neutron_plugin_check_adv_test_requirements {
     is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
 }
 
diff --git a/lib/neutron_plugins/brocade b/lib/neutron_plugins/brocade
index 8e18d04..4443fa7 100644
--- a/lib/neutron_plugins/brocade
+++ b/lib/neutron_plugins/brocade
@@ -5,53 +5,53 @@
 BRCD_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
-function is_neutron_ovs_base_plugin() {
+function is_neutron_ovs_base_plugin {
     return 1
 }
 
-function neutron_plugin_create_nova_conf() {
+function neutron_plugin_create_nova_conf {
     NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
 }
 
-function neutron_plugin_install_agent_packages() {
+function neutron_plugin_install_agent_packages {
     install_package bridge-utils
 }
 
-function neutron_plugin_configure_common() {
+function neutron_plugin_configure_common {
     Q_PLUGIN_CONF_PATH=etc/neutron/plugins/brocade
     Q_PLUGIN_CONF_FILENAME=brocade.ini
     Q_DB_NAME="brcd_neutron"
     Q_PLUGIN_CLASS="neutron.plugins.brocade.NeutronPlugin.BrocadePluginV2"
 }
 
-function neutron_plugin_configure_debug_command() {
+function neutron_plugin_configure_debug_command {
     iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge
 }
 
-function neutron_plugin_configure_dhcp_agent() {
+function neutron_plugin_configure_dhcp_agent {
     iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport
 }
 
-function neutron_plugin_configure_l3_agent() {
+function neutron_plugin_configure_l3_agent {
     iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge
     iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport
 }
 
-function neutron_plugin_configure_plugin_agent() {
+function neutron_plugin_configure_plugin_agent {
     AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-linuxbridge-agent"
 }
 
-function neutron_plugin_setup_interface_driver() {
+function neutron_plugin_setup_interface_driver {
     local conf_file=$1
     iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.BridgeInterfaceDriver
 }
 
-function has_neutron_plugin_security_group() {
+function has_neutron_plugin_security_group {
     # 0 means True here
     return 0
 }
 
-function neutron_plugin_check_adv_test_requirements() {
+function neutron_plugin_check_adv_test_requirements {
     is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
 }
 
diff --git a/lib/neutron_plugins/cisco b/lib/neutron_plugins/cisco
index 8948be6..7728eb1 100644
--- a/lib/neutron_plugins/cisco
+++ b/lib/neutron_plugins/cisco
@@ -27,12 +27,12 @@
 NCCLIENT_BRANCH=${NCCLIENT_BRANCH:-master}
 
 # This routine put a prefix on an existing function name
-function _prefix_function() {
+function _prefix_function {
     declare -F $1 > /dev/null || die "$1 doesn't exist"
     eval "$(echo "${2}_${1}()"; declare -f ${1} | tail -n +2)"
 }
 
-function _has_ovs_subplugin() {
+function _has_ovs_subplugin {
     local subplugin
     for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do
         if [[ "$subplugin" == "openvswitch" ]]; then
@@ -42,7 +42,7 @@
     return 1
 }
 
-function _has_nexus_subplugin() {
+function _has_nexus_subplugin {
     local subplugin
     for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do
         if [[ "$subplugin" == "nexus" ]]; then
@@ -52,7 +52,7 @@
     return 1
 }
 
-function _has_n1kv_subplugin() {
+function _has_n1kv_subplugin {
     local subplugin
     for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do
         if [[ "$subplugin" == "n1kv" ]]; then
@@ -64,7 +64,7 @@
 
 # This routine populates the cisco config file with the information for
 # a particular nexus switch
-function _config_switch() {
+function _config_switch {
     local cisco_cfg_file=$1
     local switch_ip=$2
     local username=$3
@@ -99,7 +99,7 @@
 _prefix_function has_neutron_plugin_security_group ovs
 
 # Check the version of the installed ncclient package
-function check_ncclient_version() {
+function check_ncclient_version {
 python << EOF
 version = '$NCCLIENT_VERSION'
 import sys
@@ -115,13 +115,13 @@
 }
 
 # Install the ncclient package
-function install_ncclient() {
+function install_ncclient {
     git_clone $NCCLIENT_REPO $NCCLIENT_DIR $NCCLIENT_BRANCH
     (cd $NCCLIENT_DIR; sudo python setup.py install)
 }
 
 # Check if the required version of ncclient has been installed
-function is_ncclient_installed() {
+function is_ncclient_installed {
     # Check if the Cisco ncclient repository exists
     if [[ -d $NCCLIENT_DIR ]]; then
         remotes=$(cd $NCCLIENT_DIR; git remote -v | grep fetch | awk '{ print $2}')
@@ -144,7 +144,7 @@
     return 0
 }
 
-function has_neutron_plugin_security_group() {
+function has_neutron_plugin_security_group {
     if _has_ovs_subplugin; then
         ovs_has_neutron_plugin_security_group
     else
@@ -152,14 +152,14 @@
     fi
 }
 
-function is_neutron_ovs_base_plugin() {
+function is_neutron_ovs_base_plugin {
     # Cisco uses OVS if openvswitch subplugin is deployed
     _has_ovs_subplugin
     return
 }
 
 # populate required nova configuration parameters
-function neutron_plugin_create_nova_conf() {
+function neutron_plugin_create_nova_conf {
     if _has_ovs_subplugin; then
         ovs_neutron_plugin_create_nova_conf
     else
@@ -167,13 +167,13 @@
     fi
 }
 
-function neutron_plugin_install_agent_packages() {
+function neutron_plugin_install_agent_packages {
     # Cisco plugin uses openvswitch to operate in one of its configurations
     ovs_neutron_plugin_install_agent_packages
 }
 
 # Configure common parameters
-function neutron_plugin_configure_common() {
+function neutron_plugin_configure_common {
     # setup default subplugins
     if [ ! -v Q_CISCO_PLUGIN_SUBPLUGINS ]; then
         declare -ga Q_CISCO_PLUGIN_SUBPLUGINS
@@ -191,23 +191,23 @@
     Q_DB_NAME=cisco_neutron
 }
 
-function neutron_plugin_configure_debug_command() {
+function neutron_plugin_configure_debug_command {
     if _has_ovs_subplugin; then
         ovs_neutron_plugin_configure_debug_command
     fi
 }
 
-function neutron_plugin_configure_dhcp_agent() {
+function neutron_plugin_configure_dhcp_agent {
     iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport
 }
 
-function neutron_plugin_configure_l3_agent() {
+function neutron_plugin_configure_l3_agent {
     if _has_ovs_subplugin; then
         ovs_neutron_plugin_configure_l3_agent
     fi
 }
 
-function _configure_nexus_subplugin() {
+function _configure_nexus_subplugin {
     local cisco_cfg_file=$1
 
     # Install a known compatible ncclient from the Cisco repository if necessary
@@ -252,7 +252,7 @@
 }
 
 # Configure n1kv plugin
-function _configure_n1kv_subplugin() {
+function _configure_n1kv_subplugin {
     local cisco_cfg_file=$1
 
     # populate the cisco plugin cfg file with the VSM information
@@ -270,13 +270,13 @@
     _neutron_ovs_base_setup_bridge $OVS_BRIDGE
 }
 
-function neutron_plugin_configure_plugin_agent() {
+function neutron_plugin_configure_plugin_agent {
     if _has_ovs_subplugin; then
         ovs_neutron_plugin_configure_plugin_agent
     fi
 }
 
-function neutron_plugin_configure_service() {
+function neutron_plugin_configure_service {
     local subplugin
     local cisco_cfg_file
 
@@ -318,7 +318,7 @@
     fi
 }
 
-function neutron_plugin_setup_interface_driver() {
+function neutron_plugin_setup_interface_driver {
     local conf_file=$1
     iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
 }
diff --git a/lib/neutron_plugins/embrane b/lib/neutron_plugins/embrane
new file mode 100644
index 0000000..62f9737
--- /dev/null
+++ b/lib/neutron_plugins/embrane
@@ -0,0 +1,40 @@
+# Neutron Embrane plugin
+# ---------------------------
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+source $TOP_DIR/lib/neutron_plugins/openvswitch
+
+function save_function {
+    local ORIG_FUNC=$(declare -f $1)
+    local NEW_FUNC="$2${ORIG_FUNC#$1}"
+    eval "$NEW_FUNC"
+}
+
+save_function neutron_plugin_configure_service _neutron_plugin_configure_service
+
+function neutron_plugin_configure_common {
+    Q_PLUGIN_CONF_PATH=etc/neutron/plugins/embrane
+    Q_PLUGIN_CONF_FILENAME=heleos_conf.ini
+    Q_DB_NAME="ovs_neutron"
+    Q_PLUGIN_CLASS="neutron.plugins.embrane.plugins.embrane_ovs_plugin.EmbraneOvsPlugin"
+}
+
+function neutron_plugin_configure_service {
+    _neutron_plugin_configure_service
+    iniset /$Q_PLUGIN_CONF_FILE heleos esm_mgmt $HELEOS_ESM_MGMT
+    iniset /$Q_PLUGIN_CONF_FILE heleos admin_username $HELEOS_ADMIN_USERNAME
+    iniset /$Q_PLUGIN_CONF_FILE heleos admin_password $HELEOS_ADMIN_PASSWORD
+    iniset /$Q_PLUGIN_CONF_FILE heleos router_image $HELEOS_ROUTER_IMAGE
+    iniset /$Q_PLUGIN_CONF_FILE heleos mgmt_id $HELEOS_MGMT_ID
+    iniset /$Q_PLUGIN_CONF_FILE heleos inband_id $HELEOS_INBAND_ID
+    iniset /$Q_PLUGIN_CONF_FILE heleos oob_id $HELEOS_OOB_ID
+    iniset /$Q_PLUGIN_CONF_FILE heleos dummy_utif_id $HELEOS_DUMMY_UTIF_ID
+    iniset /$Q_PLUGIN_CONF_FILE heleos resource_pool_id $HELEOS_RESOURCE_POOL_ID
+    iniset /$Q_PLUGIN_CONF_FILE heleos async_requests $HELEOS_ASYNC_REQUESTS
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/neutron_plugins/linuxbridge b/lib/neutron_plugins/linuxbridge
index 37bc748..362fd5b 100644
--- a/lib/neutron_plugins/linuxbridge
+++ b/lib/neutron_plugins/linuxbridge
@@ -7,14 +7,14 @@
 
 source $TOP_DIR/lib/neutron_plugins/linuxbridge_agent
 
-function neutron_plugin_configure_common() {
+function neutron_plugin_configure_common {
     Q_PLUGIN_CONF_PATH=etc/neutron/plugins/linuxbridge
     Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini
     Q_DB_NAME="neutron_linux_bridge"
     Q_PLUGIN_CLASS="neutron.plugins.linuxbridge.lb_neutron_plugin.LinuxBridgePluginV2"
 }
 
-function neutron_plugin_configure_service() {
+function neutron_plugin_configure_service {
     if [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then
         iniset /$Q_PLUGIN_CONF_FILE vlans tenant_network_type vlan
     else
@@ -47,7 +47,7 @@
     done
 }
 
-function has_neutron_plugin_security_group() {
+function has_neutron_plugin_security_group {
     # 0 means True here
     return 0
 }
diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent
index 85e8c08..74799e4 100644
--- a/lib/neutron_plugins/linuxbridge_agent
+++ b/lib/neutron_plugins/linuxbridge_agent
@@ -5,33 +5,33 @@
 PLUGIN_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
-function is_neutron_ovs_base_plugin() {
+function is_neutron_ovs_base_plugin {
     # linuxbridge doesn't use OVS
     return 1
 }
 
-function neutron_plugin_create_nova_conf() {
+function neutron_plugin_create_nova_conf {
     :
 }
 
-function neutron_plugin_install_agent_packages() {
+function neutron_plugin_install_agent_packages {
     install_package bridge-utils
 }
 
-function neutron_plugin_configure_debug_command() {
+function neutron_plugin_configure_debug_command {
     iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge
 }
 
-function neutron_plugin_configure_dhcp_agent() {
+function neutron_plugin_configure_dhcp_agent {
     iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport
 }
 
-function neutron_plugin_configure_l3_agent() {
+function neutron_plugin_configure_l3_agent {
     iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge
     iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport
 }
 
-function neutron_plugin_configure_plugin_agent() {
+function neutron_plugin_configure_plugin_agent {
     # Setup physical network interface mappings.  Override
     # ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` for more
     # complex physical network configurations.
@@ -63,12 +63,12 @@
     done
 }
 
-function neutron_plugin_setup_interface_driver() {
+function neutron_plugin_setup_interface_driver {
     local conf_file=$1
     iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.BridgeInterfaceDriver
 }
 
-function neutron_plugin_check_adv_test_requirements() {
+function neutron_plugin_check_adv_test_requirements {
     is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
 }
 
diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet
index f95fcb7..742e3b2 100644
--- a/lib/neutron_plugins/midonet
+++ b/lib/neutron_plugins/midonet
@@ -1,36 +1,40 @@
 # Neutron MidoNet plugin
 # ----------------------
 
+MIDONET_DIR=${MIDONET_DIR:-$DEST/midonet}
+MIDONET_API_PORT=${MIDONET_API_PORT:-8080}
+MIDONET_API_URL=${MIDONET_API_URL:-http://localhost:$MIDONET_API_PORT/midonet-api}
+
 # Save trace setting
 MY_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
-function is_neutron_ovs_base_plugin() {
+function is_neutron_ovs_base_plugin {
     # MidoNet does not use l3-agent
     # 0 means True here
     return 1
 }
 
-function neutron_plugin_create_nova_conf() {
+function neutron_plugin_create_nova_conf {
     NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
 }
 
-function neutron_plugin_install_agent_packages() {
+function neutron_plugin_install_agent_packages {
     :
 }
 
-function neutron_plugin_configure_common() {
+function neutron_plugin_configure_common {
     Q_PLUGIN_CONF_PATH=etc/neutron/plugins/midonet
     Q_PLUGIN_CONF_FILENAME=midonet.ini
     Q_DB_NAME="neutron_midonet"
     Q_PLUGIN_CLASS="neutron.plugins.midonet.plugin.MidonetPluginV2"
 }
 
-function neutron_plugin_configure_debug_command() {
+function neutron_plugin_configure_debug_command {
     :
 }
 
-function neutron_plugin_configure_dhcp_agent() {
+function neutron_plugin_configure_dhcp_agent {
     DHCP_DRIVER=${DHCP_DRIVER:-"neutron.plugins.midonet.agent.midonet_driver.DhcpNoOpDriver"}
     neutron_plugin_setup_interface_driver $Q_DHCP_CONF_FILE
     iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_driver $DHCP_DRIVER
@@ -38,17 +42,17 @@
     iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True
 }
 
-function neutron_plugin_configure_l3_agent() {
+function neutron_plugin_configure_l3_agent {
     die $LINENO "q-l3 must not be executed with MidoNet plugin!"
 }
 
-function neutron_plugin_configure_plugin_agent() {
+function neutron_plugin_configure_plugin_agent {
     die $LINENO "q-agt must not be executed with MidoNet plugin!"
 }
 
-function neutron_plugin_configure_service() {
-    if [[ "$MIDONET_API_URI" != "" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE MIDONET midonet_uri $MIDONET_API_URI
+function neutron_plugin_configure_service {
+    if [[ "$MIDONET_API_URL" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE MIDONET midonet_uri $MIDONET_API_URL
     fi
     if [[ "$MIDONET_USERNAME" != "" ]]; then
         iniset /$Q_PLUGIN_CONF_FILE MIDONET username $MIDONET_USERNAME
@@ -59,25 +63,22 @@
     if [[ "$MIDONET_PROJECT_ID" != "" ]]; then
         iniset /$Q_PLUGIN_CONF_FILE MIDONET project_id $MIDONET_PROJECT_ID
     fi
-    if [[ "$MIDONET_PROVIDER_ROUTER_ID" != "" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE MIDONET provider_router_id $MIDONET_PROVIDER_ROUTER_ID
-    fi
 
     Q_L3_ENABLED=True
     Q_L3_ROUTER_PER_TENANT=True
 }
 
-function neutron_plugin_setup_interface_driver() {
+function neutron_plugin_setup_interface_driver {
     local conf_file=$1
     iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.MidonetInterfaceDriver
 }
 
-function has_neutron_plugin_security_group() {
+function has_neutron_plugin_security_group {
     # 0 means True here
     return 0
 }
 
-function neutron_plugin_check_adv_test_requirements() {
+function neutron_plugin_check_adv_test_requirements {
     # 0 means True here
     return 1
 }
diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2
index ab4e347..e985dcb 100644
--- a/lib/neutron_plugins/ml2
+++ b/lib/neutron_plugins/ml2
@@ -33,7 +33,7 @@
 # L3 Plugin to load for ML2
 ML2_L3_PLUGIN=${ML2_L3_PLUGIN:-neutron.services.l3_router.l3_router_plugin.L3RouterPlugin}
 
-function populate_ml2_config() {
+function populate_ml2_config {
     CONF=$1
     SECTION=$2
     OPTS=$3
@@ -47,7 +47,7 @@
     done
 }
 
-function neutron_plugin_configure_common() {
+function neutron_plugin_configure_common {
     Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ml2
     Q_PLUGIN_CONF_FILENAME=ml2_conf.ini
     Q_DB_NAME="neutron_ml2"
@@ -57,7 +57,7 @@
     _neutron_service_plugin_class_add $ML2_L3_PLUGIN
 }
 
-function neutron_plugin_configure_service() {
+function neutron_plugin_configure_service {
     if [[ "$Q_ML2_TENANT_NETWORK_TYPE" != "" ]]; then
         Q_SRV_EXTRA_OPTS+=(tenant_network_types=$Q_ML2_TENANT_NETWORK_TYPE)
     elif [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then
@@ -93,9 +93,9 @@
     # instead use its own config variable to indicate whether security
     # groups is enabled, and that will need to be set here instead.
     if [[ "$Q_USE_SECGROUP" == "True" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver neutron.agent.not.a.real.FirewallDriver
+        iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.not.a.real.FirewallDriver
     else
-        iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver neutron.agent.firewall.NoopFirewallDriver
+        iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver
     fi
 
     # Since we enable the tunnel TypeDrivers, also enable a local_ip
@@ -114,7 +114,7 @@
     populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_vlan $Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS
 }
 
-function has_neutron_plugin_security_group() {
+function has_neutron_plugin_security_group {
     return 0
 }
 
diff --git a/lib/neutron_plugins/nec b/lib/neutron_plugins/nec
index d8d8b7c..6d4bfca 100644
--- a/lib/neutron_plugins/nec
+++ b/lib/neutron_plugins/nec
@@ -22,11 +22,11 @@
 
 source $TOP_DIR/lib/neutron_plugins/ovs_base
 
-function neutron_plugin_create_nova_conf() {
+function neutron_plugin_create_nova_conf {
     _neutron_ovs_base_configure_nova_vif_driver
 }
 
-function neutron_plugin_install_agent_packages() {
+function neutron_plugin_install_agent_packages {
     # SKIP_OVS_INSTALL is useful when we want to use Open vSwitch whose
     # version is different from the version provided by the distribution.
     if [[ "$SKIP_OVS_INSTALL" = "True" ]]; then
@@ -36,26 +36,26 @@
     _neutron_ovs_base_install_agent_packages
 }
 
-function neutron_plugin_configure_common() {
+function neutron_plugin_configure_common {
     Q_PLUGIN_CONF_PATH=etc/neutron/plugins/nec
     Q_PLUGIN_CONF_FILENAME=nec.ini
     Q_DB_NAME="neutron_nec"
     Q_PLUGIN_CLASS="neutron.plugins.nec.nec_plugin.NECPluginV2"
 }
 
-function neutron_plugin_configure_debug_command() {
+function neutron_plugin_configure_debug_command {
     _neutron_ovs_base_configure_debug_command
 }
 
-function neutron_plugin_configure_dhcp_agent() {
+function neutron_plugin_configure_dhcp_agent {
     :
 }
 
-function neutron_plugin_configure_l3_agent() {
+function neutron_plugin_configure_l3_agent {
     _neutron_ovs_base_configure_l3_agent
 }
 
-function _quantum_plugin_setup_bridge() {
+function _quantum_plugin_setup_bridge {
     if [[ "$SKIP_OVS_BRIDGE_SETUP" = "True" ]]; then
         return
     fi
@@ -72,7 +72,7 @@
     _neutron_setup_ovs_tunnels $OVS_BRIDGE
 }
 
-function neutron_plugin_configure_plugin_agent() {
+function neutron_plugin_configure_plugin_agent {
     _quantum_plugin_setup_bridge
 
     AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-nec-agent"
@@ -80,7 +80,7 @@
     _neutron_ovs_base_configure_firewall_driver
 }
 
-function neutron_plugin_configure_service() {
+function neutron_plugin_configure_service {
     iniset $NEUTRON_CONF DEFAULT api_extensions_path neutron/plugins/nec/extensions/
     iniset /$Q_PLUGIN_CONF_FILE ofc host $OFC_API_HOST
     iniset /$Q_PLUGIN_CONF_FILE ofc port $OFC_API_PORT
@@ -91,7 +91,7 @@
     _neutron_ovs_base_configure_firewall_driver
 }
 
-function neutron_plugin_setup_interface_driver() {
+function neutron_plugin_setup_interface_driver {
     local conf_file=$1
     iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
     iniset $conf_file DEFAULT ovs_use_veth True
@@ -101,13 +101,12 @@
 # ---------------------------
 
 # Setup OVS tunnel manually
-function _neutron_setup_ovs_tunnels() {
+function _neutron_setup_ovs_tunnels {
     local bridge=$1
     local id=0
     GRE_LOCAL_IP=${GRE_LOCAL_IP:-$HOST_IP}
     if [ -n "$GRE_REMOTE_IPS" ]; then
-        for ip in ${GRE_REMOTE_IPS//:/ }
-        do
+        for ip in ${GRE_REMOTE_IPS//:/ }; do
             if [[ "$ip" == "$GRE_LOCAL_IP" ]]; then
                 continue
             fi
@@ -118,12 +117,12 @@
     fi
 }
 
-function has_neutron_plugin_security_group() {
+function has_neutron_plugin_security_group {
     # 0 means True here
     return 0
 }
 
-function neutron_plugin_check_adv_test_requirements() {
+function neutron_plugin_check_adv_test_requirements {
     is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
 }
 
diff --git a/lib/neutron_plugins/openvswitch b/lib/neutron_plugins/openvswitch
index f99eb38..bdbc5a9 100644
--- a/lib/neutron_plugins/openvswitch
+++ b/lib/neutron_plugins/openvswitch
@@ -7,14 +7,14 @@
 
 source $TOP_DIR/lib/neutron_plugins/openvswitch_agent
 
-function neutron_plugin_configure_common() {
+function neutron_plugin_configure_common {
     Q_PLUGIN_CONF_PATH=etc/neutron/plugins/openvswitch
     Q_PLUGIN_CONF_FILENAME=ovs_neutron_plugin.ini
     Q_DB_NAME="ovs_neutron"
     Q_PLUGIN_CLASS="neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2"
 }
 
-function neutron_plugin_configure_service() {
+function neutron_plugin_configure_service {
     if [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then
         iniset /$Q_PLUGIN_CONF_FILE ovs tenant_network_type gre
         iniset /$Q_PLUGIN_CONF_FILE ovs tunnel_id_ranges $TENANT_TUNNEL_RANGES
@@ -52,7 +52,7 @@
     done
 }
 
-function has_neutron_plugin_security_group() {
+function has_neutron_plugin_security_group {
     return 0
 }
 
diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent
index 46c2a5c..3a2bdc3 100644
--- a/lib/neutron_plugins/openvswitch_agent
+++ b/lib/neutron_plugins/openvswitch_agent
@@ -7,7 +7,7 @@
 
 source $TOP_DIR/lib/neutron_plugins/ovs_base
 
-function neutron_plugin_create_nova_conf() {
+function neutron_plugin_create_nova_conf {
     _neutron_ovs_base_configure_nova_vif_driver
     if [ "$VIRT_DRIVER" = 'xenserver' ]; then
         iniset $NOVA_CONF DEFAULT xenapi_vif_driver nova.virt.xenapi.vif.XenAPIOpenVswitchDriver
@@ -17,24 +17,24 @@
     fi
 }
 
-function neutron_plugin_install_agent_packages() {
+function neutron_plugin_install_agent_packages {
     _neutron_ovs_base_install_agent_packages
 }
 
-function neutron_plugin_configure_debug_command() {
+function neutron_plugin_configure_debug_command {
     _neutron_ovs_base_configure_debug_command
 }
 
-function neutron_plugin_configure_dhcp_agent() {
+function neutron_plugin_configure_dhcp_agent {
     iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport
 }
 
-function neutron_plugin_configure_l3_agent() {
+function neutron_plugin_configure_l3_agent {
     _neutron_ovs_base_configure_l3_agent
     iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport
 }
 
-function neutron_plugin_configure_plugin_agent() {
+function neutron_plugin_configure_plugin_agent {
     # Setup integration bridge
     _neutron_ovs_base_setup_bridge $OVS_BRIDGE
     _neutron_ovs_base_configure_firewall_driver
@@ -118,12 +118,12 @@
     done
 }
 
-function neutron_plugin_setup_interface_driver() {
+function neutron_plugin_setup_interface_driver {
     local conf_file=$1
     iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
 }
 
-function neutron_plugin_check_adv_test_requirements() {
+function neutron_plugin_check_adv_test_requirements {
     is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
 }
 
diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base
index 89db29d..0a2ba58 100644
--- a/lib/neutron_plugins/ovs_base
+++ b/lib/neutron_plugins/ovs_base
@@ -8,19 +8,19 @@
 OVS_BRIDGE=${OVS_BRIDGE:-br-int}
 PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex}
 
-function is_neutron_ovs_base_plugin() {
+function is_neutron_ovs_base_plugin {
     # Yes, we use OVS.
     return 0
 }
 
-function _neutron_ovs_base_setup_bridge() {
+function _neutron_ovs_base_setup_bridge {
     local bridge=$1
     neutron-ovs-cleanup
     sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge
     sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge
 }
 
-function neutron_ovs_base_cleanup() {
+function neutron_ovs_base_cleanup {
     # remove all OVS ports that look like Neutron created ports
     for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do
         sudo ovs-vsctl del-port ${port}
@@ -32,7 +32,7 @@
     done
 }
 
-function _neutron_ovs_base_install_agent_packages() {
+function _neutron_ovs_base_install_agent_packages {
     local kernel_version
     # Install deps
     # FIXME add to ``files/apts/neutron``, but don't install if not needed!
@@ -50,11 +50,11 @@
     fi
 }
 
-function _neutron_ovs_base_configure_debug_command() {
+function _neutron_ovs_base_configure_debug_command {
     iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE
 }
 
-function _neutron_ovs_base_configure_firewall_driver() {
+function _neutron_ovs_base_configure_firewall_driver {
     if [[ "$Q_USE_SECGROUP" == "True" ]]; then
         iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
     else
@@ -62,7 +62,7 @@
     fi
 }
 
-function _neutron_ovs_base_configure_l3_agent() {
+function _neutron_ovs_base_configure_l3_agent {
     iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE
 
     neutron-ovs-cleanup
@@ -72,7 +72,7 @@
     sudo ip addr flush dev $PUBLIC_BRIDGE
 }
 
-function _neutron_ovs_base_configure_nova_vif_driver() {
+function _neutron_ovs_base_configure_nova_vif_driver {
     :
 }
 
diff --git a/lib/neutron_plugins/plumgrid b/lib/neutron_plugins/plumgrid
index bccd301..19f94cb 100644
--- a/lib/neutron_plugins/plumgrid
+++ b/lib/neutron_plugins/plumgrid
@@ -6,15 +6,15 @@
 MY_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
-function neutron_plugin_create_nova_conf() {
+function neutron_plugin_create_nova_conf {
     :
 }
 
-function neutron_plugin_setup_interface_driver() {
+function neutron_plugin_setup_interface_driver {
     :
 }
 
-function neutron_plugin_configure_common() {
+function neutron_plugin_configure_common {
     Q_PLUGIN_CONF_PATH=etc/neutron/plugins/plumgrid
     Q_PLUGIN_CONF_FILENAME=plumgrid.ini
     Q_DB_NAME="plumgrid_neutron"
@@ -26,7 +26,7 @@
     PLUMGRID_TIMEOUT=${PLUMGRID_TIMEOUT:-70}
 }
 
-function neutron_plugin_configure_service() {
+function neutron_plugin_configure_service {
     iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector director_server $PLUMGRID_DIRECTOR_IP
     iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector director_server_port $PLUMGRID_DIRECTOR_PORT
     iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector username $PLUMGRID_ADMIN
@@ -34,21 +34,21 @@
     iniset /$Q_PLUGIN_CONF_FILE PLUMgridDirector servertimeout $PLUMGRID_TIMEOUT
 }
 
-function neutron_plugin_configure_debug_command() {
+function neutron_plugin_configure_debug_command {
     :
 }
 
-function is_neutron_ovs_base_plugin() {
+function is_neutron_ovs_base_plugin {
     # False
     return 1
 }
 
-function has_neutron_plugin_security_group() {
+function has_neutron_plugin_security_group {
     # False
     return 1
 }
 
-function neutron_plugin_check_adv_test_requirements() {
+function neutron_plugin_check_adv_test_requirements {
     is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
 }
 # Restore xtrace
diff --git a/lib/neutron_plugins/ryu b/lib/neutron_plugins/ryu
index 334c227..9ae36d3 100644
--- a/lib/neutron_plugins/ryu
+++ b/lib/neutron_plugins/ryu
@@ -8,12 +8,12 @@
 source $TOP_DIR/lib/neutron_plugins/ovs_base
 source $TOP_DIR/lib/neutron_thirdparty/ryu      # for configuration value
 
-function neutron_plugin_create_nova_conf() {
+function neutron_plugin_create_nova_conf {
     _neutron_ovs_base_configure_nova_vif_driver
     iniset $NOVA_CONF DEFAULT libvirt_ovs_integration_bridge "$OVS_BRIDGE"
 }
 
-function neutron_plugin_install_agent_packages() {
+function neutron_plugin_install_agent_packages {
     _neutron_ovs_base_install_agent_packages
 
     # neutron_ryu_agent requires ryu module
@@ -22,28 +22,28 @@
     configure_ryu
 }
 
-function neutron_plugin_configure_common() {
+function neutron_plugin_configure_common {
     Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ryu
     Q_PLUGIN_CONF_FILENAME=ryu.ini
     Q_DB_NAME="ovs_neutron"
     Q_PLUGIN_CLASS="neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2"
 }
 
-function neutron_plugin_configure_debug_command() {
+function neutron_plugin_configure_debug_command {
     _neutron_ovs_base_configure_debug_command
     iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT
 }
 
-function neutron_plugin_configure_dhcp_agent() {
+function neutron_plugin_configure_dhcp_agent {
     iniset $Q_DHCP_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT
 }
 
-function neutron_plugin_configure_l3_agent() {
+function neutron_plugin_configure_l3_agent {
     iniset $Q_L3_CONF_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT
     _neutron_ovs_base_configure_l3_agent
 }
 
-function neutron_plugin_configure_plugin_agent() {
+function neutron_plugin_configure_plugin_agent {
     # Set up integration bridge
     _neutron_ovs_base_setup_bridge $OVS_BRIDGE
     if [ -n "$RYU_INTERNAL_INTERFACE" ]; then
@@ -55,24 +55,24 @@
     _neutron_ovs_base_configure_firewall_driver
 }
 
-function neutron_plugin_configure_service() {
+function neutron_plugin_configure_service {
     iniset /$Q_PLUGIN_CONF_FILE ovs openflow_rest_api $RYU_API_HOST:$RYU_API_PORT
 
     _neutron_ovs_base_configure_firewall_driver
 }
 
-function neutron_plugin_setup_interface_driver() {
+function neutron_plugin_setup_interface_driver {
     local conf_file=$1
     iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
     iniset $conf_file DEFAULT ovs_use_veth True
 }
 
-function has_neutron_plugin_security_group() {
+function has_neutron_plugin_security_group {
     # 0 means True here
     return 0
 }
 
-function neutron_plugin_check_adv_test_requirements() {
+function neutron_plugin_check_adv_test_requirements {
     is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
 }
 
diff --git a/lib/neutron_plugins/services/firewall b/lib/neutron_plugins/services/firewall
index 8273e54..ab6c324 100644
--- a/lib/neutron_plugins/services/firewall
+++ b/lib/neutron_plugins/services/firewall
@@ -7,11 +7,11 @@
 
 FWAAS_PLUGIN=neutron.services.firewall.fwaas_plugin.FirewallPlugin
 
-function neutron_fwaas_configure_common() {
+function neutron_fwaas_configure_common {
     _neutron_service_plugin_class_add $FWAAS_PLUGIN
 }
 
-function neutron_fwaas_configure_driver() {
+function neutron_fwaas_configure_driver {
     FWAAS_DRIVER_CONF_FILENAME=/etc/neutron/fwaas_driver.ini
     cp $NEUTRON_DIR/etc/fwaas_driver.ini $FWAAS_DRIVER_CONF_FILENAME
 
@@ -19,7 +19,7 @@
     iniset_multiline $FWAAS_DRIVER_CONF_FILENAME fwaas driver "neutron.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver"
 }
 
-function neutron_fwaas_stop() {
+function neutron_fwaas_stop {
     :
 }
 
diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer
index 5d7a94e..531f52f 100644
--- a/lib/neutron_plugins/services/loadbalancer
+++ b/lib/neutron_plugins/services/loadbalancer
@@ -9,7 +9,7 @@
 AGENT_LBAAS_BINARY="$NEUTRON_BIN_DIR/neutron-lbaas-agent"
 LBAAS_PLUGIN=neutron.services.loadbalancer.plugin.LoadBalancerPlugin
 
-function neutron_agent_lbaas_install_agent_packages() {
+function neutron_agent_lbaas_install_agent_packages {
     if is_ubuntu || is_fedora; then
         install_package haproxy
     elif is_suse; then
@@ -18,11 +18,11 @@
     fi
 }
 
-function neutron_agent_lbaas_configure_common() {
+function neutron_agent_lbaas_configure_common {
     _neutron_service_plugin_class_add $LBAAS_PLUGIN
 }
 
-function neutron_agent_lbaas_configure_agent() {
+function neutron_agent_lbaas_configure_agent {
     LBAAS_AGENT_CONF_PATH=/etc/neutron/services/loadbalancer/haproxy
     mkdir -p $LBAAS_AGENT_CONF_PATH
 
@@ -38,10 +38,11 @@
 
     if is_fedora; then
         iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT user_group "nobody"
+        iniset $LBAAS_AGENT_CONF_FILENAME haproxy user_group "nobody"
     fi
 }
 
-function neutron_lbaas_stop() {
+function neutron_lbaas_stop {
     pids=$(ps aux | awk '/haproxy/ { print $2 }')
     [ ! -z "$pids" ] && sudo kill $pids
 }
diff --git a/lib/neutron_plugins/services/metering b/lib/neutron_plugins/services/metering
index 37952bb..0e5f75b 100644
--- a/lib/neutron_plugins/services/metering
+++ b/lib/neutron_plugins/services/metering
@@ -9,11 +9,11 @@
 AGENT_METERING_BINARY="$NEUTRON_BIN_DIR/neutron-metering-agent"
 METERING_PLUGIN="neutron.services.metering.metering_plugin.MeteringPlugin"
 
-function neutron_agent_metering_configure_common() {
+function neutron_agent_metering_configure_common {
     _neutron_service_plugin_class_add $METERING_PLUGIN
 }
 
-function neutron_agent_metering_configure_agent() {
+function neutron_agent_metering_configure_agent {
     METERING_AGENT_CONF_PATH=/etc/neutron/services/metering
     mkdir -p $METERING_AGENT_CONF_PATH
 
@@ -22,7 +22,7 @@
     cp $NEUTRON_DIR/etc/metering_agent.ini $METERING_AGENT_CONF_FILENAME
 }
 
-function neutron_metering_stop() {
+function neutron_metering_stop {
     :
 }
 
diff --git a/lib/neutron_plugins/services/vpn b/lib/neutron_plugins/services/vpn
index 02370e7..e56d361 100644
--- a/lib/neutron_plugins/services/vpn
+++ b/lib/neutron_plugins/services/vpn
@@ -10,15 +10,15 @@
 VPN_PLUGIN="neutron.services.vpn.plugin.VPNDriverPlugin"
 IPSEC_PACKAGE=${IPSEC_PACKAGE:-"openswan"}
 
-function neutron_vpn_install_agent_packages() {
+function neutron_vpn_install_agent_packages {
     install_package $IPSEC_PACKAGE
 }
 
-function neutron_vpn_configure_common() {
+function neutron_vpn_configure_common {
     _neutron_service_plugin_class_add $VPN_PLUGIN
 }
 
-function neutron_vpn_stop() {
+function neutron_vpn_stop {
     local ipsec_data_dir=$DATA_DIR/neutron/ipsec
     local pids
     if [ -d $ipsec_data_dir ]; then
diff --git a/lib/neutron_plugins/vmware_nsx b/lib/neutron_plugins/vmware_nsx
index 74f98df..fe79354 100644
--- a/lib/neutron_plugins/vmware_nsx
+++ b/lib/neutron_plugins/vmware_nsx
@@ -7,7 +7,7 @@
 
 source $TOP_DIR/lib/neutron_plugins/ovs_base
 
-function setup_integration_bridge() {
+function setup_integration_bridge {
     _neutron_ovs_base_setup_bridge $OVS_BRIDGE
     # Set manager to NSX controller (1st of list)
     if [[ "$NSX_CONTROLLERS" != "" ]]; then
@@ -20,53 +20,53 @@
     sudo ovs-vsctl set-manager ssl:$OVS_MGR_IP
 }
 
-function is_neutron_ovs_base_plugin() {
+function is_neutron_ovs_base_plugin {
     # NSX uses OVS, but not the l3-agent
     return 0
 }
 
-function neutron_plugin_create_nova_conf() {
+function neutron_plugin_create_nova_conf {
     # if n-cpu is enabled, then setup integration bridge
     if is_service_enabled n-cpu; then
         setup_integration_bridge
     fi
 }
 
-function neutron_plugin_install_agent_packages() {
+function neutron_plugin_install_agent_packages {
     # VMware NSX Plugin does not run q-agt, but it currently needs dhcp and metadata agents
     _neutron_ovs_base_install_agent_packages
 }
 
-function neutron_plugin_configure_common() {
+function neutron_plugin_configure_common {
     Q_PLUGIN_CONF_PATH=etc/neutron/plugins/vmware
     Q_PLUGIN_CONF_FILENAME=nsx.ini
     Q_DB_NAME="neutron_nsx"
     Q_PLUGIN_CLASS="neutron.plugins.vmware.plugin.NsxPlugin"
 }
 
-function neutron_plugin_configure_debug_command() {
+function neutron_plugin_configure_debug_command {
     sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE
     iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT external_network_bridge "$PUBLIC_BRIDGE"
 }
 
-function neutron_plugin_configure_dhcp_agent() {
+function neutron_plugin_configure_dhcp_agent {
     setup_integration_bridge
     iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True
     iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network True
     iniset $Q_DHCP_CONF_FILE DEFAULT ovs_use_veth True
 }
 
-function neutron_plugin_configure_l3_agent() {
+function neutron_plugin_configure_l3_agent {
     # VMware NSX plugin does not run L3 agent
     die $LINENO "q-l3 should must not be executed with VMware NSX plugin!"
 }
 
-function neutron_plugin_configure_plugin_agent() {
+function neutron_plugin_configure_plugin_agent {
     # VMware NSX plugin does not run L2 agent
     die $LINENO "q-agt must not be executed with VMware NSX plugin!"
 }
 
-function neutron_plugin_configure_service() {
+function neutron_plugin_configure_service {
     if [[ "$MAX_LP_PER_BRIDGED_LS" != "" ]]; then
         iniset /$Q_PLUGIN_CONF_FILE nsx max_lp_per_bridged_ls $MAX_LP_PER_BRIDGED_LS
     fi
@@ -131,17 +131,17 @@
     fi
 }
 
-function neutron_plugin_setup_interface_driver() {
+function neutron_plugin_setup_interface_driver {
     local conf_file=$1
     iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
 }
 
-function has_neutron_plugin_security_group() {
+function has_neutron_plugin_security_group {
     # 0 means True here
     return 0
 }
 
-function neutron_plugin_check_adv_test_requirements() {
+function neutron_plugin_check_adv_test_requirements {
     is_service_enabled q-dhcp && return 0
 }
 
diff --git a/lib/neutron_thirdparty/bigswitch_floodlight b/lib/neutron_thirdparty/bigswitch_floodlight
index 1fd4fd8..f03de56 100644
--- a/lib/neutron_thirdparty/bigswitch_floodlight
+++ b/lib/neutron_thirdparty/bigswitch_floodlight
@@ -8,11 +8,11 @@
 BS_FL_CONTROLLERS_PORT=${BS_FL_CONTROLLERS_PORT:-localhost:80}
 BS_FL_OF_PORT=${BS_FL_OF_PORT:-6633}
 
-function configure_bigswitch_floodlight() {
+function configure_bigswitch_floodlight {
     :
 }
 
-function init_bigswitch_floodlight() {
+function init_bigswitch_floodlight {
     install_neutron_agent_packages
 
     echo -n "Installing OVS managed by the openflow controllers:"
@@ -24,8 +24,7 @@
     sudo ovs-vsctl --no-wait br-set-external-id ${OVS_BRIDGE} bridge-id ${OVS_BRIDGE}
 
     ctrls=
-    for ctrl in `echo ${BS_FL_CONTROLLERS_PORT} | tr ',' ' '`
-    do
+    for ctrl in `echo ${BS_FL_CONTROLLERS_PORT} | tr ',' ' '`; do
         ctrl=${ctrl%:*}
         ctrls="${ctrls} tcp:${ctrl}:${BS_FL_OF_PORT}"
     done
@@ -33,19 +32,19 @@
     sudo ovs-vsctl --no-wait set-controller ${OVS_BRIDGE} ${ctrls}
 }
 
-function install_bigswitch_floodlight() {
+function install_bigswitch_floodlight {
     :
 }
 
-function start_bigswitch_floodlight() {
+function start_bigswitch_floodlight {
     :
 }
 
-function stop_bigswitch_floodlight() {
+function stop_bigswitch_floodlight {
     :
 }
 
-function check_bigswitch_floodlight() {
+function check_bigswitch_floodlight {
     :
 }
 
diff --git a/lib/neutron_thirdparty/midonet b/lib/neutron_thirdparty/midonet
index e672528..ad417bb 100644
--- a/lib/neutron_thirdparty/midonet
+++ b/lib/neutron_thirdparty/midonet
@@ -10,53 +10,38 @@
 
 # MidoNet devstack destination dir
 MIDONET_DIR=${MIDONET_DIR:-$DEST/midonet}
-MIDONET_API_PORT=${MIDONET_API_PORT:-8080}
-MIDONET_API_URL=${MIDONET_API_URL:-http://localhost:$MIDONET_API_PORT/midonet-api}
 
 # MidoNet client repo
 MIDONET_CLIENT_REPO=${MIDONET_CLIENT_REPO:-https://github.com/midokura/python-midonetclient.git}
 MIDONET_CLIENT_BRANCH=${MIDONET_CLIENT_BRANCH:-master}
 MIDONET_CLIENT_DIR=${MIDONET_CLIENT_DIR:-$MIDONET_DIR/python-midonetclient}
 
-# MidoNet OpenStack repo
-MIDONET_OS_REPO=${MIDONET_OS_REPO:-https://github.com/midokura/midonet-openstack.git}
-MIDONET_OS_BRANCH=${MIDONET_OS_BRANCH:-master}
-MIDONET_OS_DIR=${MIDONET_OS_DIR:-$MIDONET_DIR/midonet-openstack}
-MIDONET_SETUP_SCRIPT=${MIDONET_SETUP_SCRIPT:-$MIDONET_OS_DIR/bin/setup_midonet_topology.py}
-
 # Save trace setting
 MY_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
-function configure_midonet() {
+function configure_midonet {
     :
 }
 
-function init_midonet() {
-
-    # Initialize DB.  Evaluate the output of setup_midonet_topology.py to set
-    # env variables for provider router ID.
-    eval `python $MIDONET_SETUP_SCRIPT $MIDONET_API_URL admin $ADMIN_PASSWORD admin provider_devices`
-    die_if_not_set $LINENO provider_router_id "Error running midonet setup script, provider_router_id was not set."
-
-    iniset /$Q_PLUGIN_CONF_FILE MIDONET provider_router_id $provider_router_id
+function init_midonet {
+    :
 }
 
-function install_midonet() {
+function install_midonet {
     git_clone $MIDONET_CLIENT_REPO $MIDONET_CLIENT_DIR $MIDONET_CLIENT_BRANCH
-    git_clone $MIDONET_OS_REPO $MIDONET_OS_DIR $MIDONET_OS_BRANCH
-    export PYTHONPATH=$MIDONET_CLIENT_DIR/src:$MIDONET_OS_DIR/src:$PYTHONPATH
+    export PYTHONPATH=$MIDONET_CLIENT_DIR/src:$PYTHONPATH
 }
 
-function start_midonet() {
+function start_midonet {
     :
 }
 
-function stop_midonet() {
+function stop_midonet {
     :
 }
 
-function check_midonet() {
+function check_midonet {
     :
 }
 
diff --git a/lib/neutron_thirdparty/ryu b/lib/neutron_thirdparty/ryu
index 5edf273..424a900 100644
--- a/lib/neutron_thirdparty/ryu
+++ b/lib/neutron_thirdparty/ryu
@@ -21,14 +21,14 @@
 # configure_ryu can be called multiple times as neutron_pluing/ryu may call
 # this function for neutron-ryu-agent
 _RYU_CONFIGURED=${_RYU_CONFIGURED:-False}
-function configure_ryu() {
+function configure_ryu {
     if [[ "$_RYU_CONFIGURED" == "False" ]]; then
         setup_develop $RYU_DIR
         _RYU_CONFIGURED=True
     fi
 }
 
-function init_ryu() {
+function init_ryu {
     RYU_CONF_DIR=/etc/ryu
     if [[ ! -d $RYU_CONF_DIR ]]; then
         sudo mkdir -p $RYU_CONF_DIR
@@ -60,22 +60,22 @@
 # Make this function idempotent and avoid cloning same repo many times
 # with RECLONE=yes
 _RYU_INSTALLED=${_RYU_INSTALLED:-False}
-function install_ryu() {
+function install_ryu {
     if [[ "$_RYU_INSTALLED" == "False" ]]; then
         git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH
         _RYU_INSTALLED=True
     fi
 }
 
-function start_ryu() {
+function start_ryu {
     screen_it ryu "cd $RYU_DIR && $RYU_DIR/bin/ryu-manager --config-file $RYU_CONF"
 }
 
-function stop_ryu() {
+function stop_ryu {
     :
 }
 
-function check_ryu() {
+function check_ryu {
     :
 }
 
diff --git a/lib/neutron_thirdparty/trema b/lib/neutron_thirdparty/trema
index 2b12564..d465ac7 100644
--- a/lib/neutron_thirdparty/trema
+++ b/lib/neutron_thirdparty/trema
@@ -31,7 +31,7 @@
 TREMA_SS_APACHE_CONFIG=/etc/apache2/sites-available/sliceable_switch.conf
 
 # configure_trema - Set config files, create data dirs, etc
-function configure_trema() {
+function configure_trema {
     # prepare dir
     for d in $TREMA_SS_ETC_DIR $TREMA_SS_DB_DIR $TREMA_SS_SCRIPT_DIR; do
         sudo mkdir -p $d
@@ -41,7 +41,7 @@
 }
 
 # init_trema - Initialize databases, etc.
-function init_trema() {
+function init_trema {
     local _pwd=$(pwd)
 
     # Initialize databases for Sliceable Switch
@@ -70,7 +70,7 @@
         $TREMA_SS_CONFIG
 }
 
-function gem_install() {
+function gem_install {
     [[ "$OFFLINE" = "True" ]] && return
     [ -n "$RUBYGEMS_CMD" ] || get_gem_command
 
@@ -79,7 +79,7 @@
     sudo $RUBYGEMS_CMD install $pkg
 }
 
-function get_gem_command() {
+function get_gem_command {
     # Trema requires ruby 1.8, so gem1.8 is checked first
     RUBYGEMS_CMD=$(which gem1.8 || which gem)
     if [ -z "$RUBYGEMS_CMD" ]; then
@@ -87,7 +87,7 @@
     fi
 }
 
-function install_trema() {
+function install_trema {
     # Trema
     gem_install trema
     # Sliceable Switch
@@ -97,7 +97,7 @@
     make -C $TREMA_DIR/apps/sliceable_switch
 }
 
-function start_trema() {
+function start_trema {
     # APACHE_NAME is defined in init_horizon (in lib/horizon)
     restart_service $APACHE_NAME
 
@@ -105,11 +105,11 @@
         trema run -d -c $TREMA_SS_CONFIG
 }
 
-function stop_trema() {
+function stop_trema {
     sudo TREMA_TMP=$TREMA_TMP_DIR trema killall
 }
 
-function check_trema() {
+function check_trema {
     :
 }
 
diff --git a/lib/neutron_thirdparty/vmware_nsx b/lib/neutron_thirdparty/vmware_nsx
index 4eb177a..3fecc62 100644
--- a/lib/neutron_thirdparty/vmware_nsx
+++ b/lib/neutron_thirdparty/vmware_nsx
@@ -22,11 +22,11 @@
 # is invoked by unstack.sh
 FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24}
 
-function configure_vmware_nsx() {
+function configure_vmware_nsx {
     :
 }
 
-function init_vmware_nsx() {
+function init_vmware_nsx {
     if ! is_set NSX_GATEWAY_NETWORK_CIDR; then
         NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/}
         echo "The IP address to set on br-ex was not specified. "
@@ -52,15 +52,15 @@
     sudo ip addr add dev $PUBLIC_BRIDGE $NSX_GATEWAY_NETWORK_CIDR
 }
 
-function install_vmware_nsx() {
+function install_vmware_nsx {
     :
 }
 
-function start_vmware_nsx() {
+function start_vmware_nsx {
     :
 }
 
-function stop_vmware_nsx() {
+function stop_vmware_nsx {
     if ! is_set NSX_GATEWAY_NETWORK_CIDR; then
         NSX_GATEWAY_NETWORK_CIDR=$PUBLIC_NETWORK_GATEWAY/${FLOATING_RANGE#*/}
         echo "The IP address expected on br-ex was not specified. "
@@ -78,7 +78,7 @@
     done
 }
 
-function check_vmware_nsx() {
+function check_vmware_nsx {
     neutron-check-nsx-config $NEUTRON_CONF_DIR/plugins/vmware/nsx.ini
 }
 
diff --git a/lib/nova b/lib/nova
index dbc5c3d..583a592 100644
--- a/lib/nova
+++ b/lib/nova
@@ -129,8 +129,22 @@
 # Functions
 # ---------
 
+# Test if any Nova services are enabled
+# is_nova_enabled
+function is_nova_enabled {
+    [[ ,${ENABLED_SERVICES} =~ ,"n-" ]] && return 0
+    return 1
+}
+
+# Test if any Nova Cell services are enabled
+# is_nova_enabled
+function is_n-cell_enabled {
+    [[ ,${ENABLED_SERVICES} =~ ,"n-cell-" ]] && return 0
+    return 1
+}
+
 # Helper to clean iptables rules
-function clean_iptables() {
+function clean_iptables {
     # Delete rules
     sudo iptables -S -v | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-A" |  sed "s/-A/-D/g" | awk '{print "sudo iptables",$0}' | bash
     # Delete nat rules
@@ -143,7 +157,7 @@
 
 # cleanup_nova() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
-function cleanup_nova() {
+function cleanup_nova {
     if is_service_enabled n-cpu; then
         # Clean iptables from previous runs
         clean_iptables
@@ -177,7 +191,7 @@
 }
 
 # configure_nova_rootwrap() - configure Nova's rootwrap
-function configure_nova_rootwrap() {
+function configure_nova_rootwrap {
     # Deploy new rootwrap filters files (owned by root).
     # Wipe any existing rootwrap.d files first
     if [[ -d $NOVA_CONF_DIR/rootwrap.d ]]; then
@@ -205,7 +219,7 @@
 }
 
 # configure_nova() - Set config files, create data dirs, etc
-function configure_nova() {
+function configure_nova {
     # Put config files in ``/etc/nova`` for everyone to find
     if [[ ! -d $NOVA_CONF_DIR ]]; then
         sudo mkdir -p $NOVA_CONF_DIR
@@ -231,10 +245,9 @@
         inicomment $NOVA_API_PASTE_INI filter:authtoken cafile
         inicomment $NOVA_API_PASTE_INI filter:authtoken admin_user
         inicomment $NOVA_API_PASTE_INI filter:authtoken admin_password
+        inicomment $NOVA_API_PASTE_INI filter:authtoken signing_dir
     fi
 
-    inicomment $NOVA_API_PASTE_INI filter:authtoken signing_dir
-
     if is_service_enabled n-cpu; then
         # Force IP forwarding on, just on case
         sudo sysctl -w net.ipv4.ip_forward=1
@@ -310,41 +323,41 @@
 # Migrated from keystone_data.sh
 create_nova_accounts() {
 
-    SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
+    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
 
     # Nova
     if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
-        NOVA_USER=$(keystone user-create \
-            --name=nova \
-            --pass="$SERVICE_PASSWORD" \
-            --tenant-id $SERVICE_TENANT \
-            --email=nova@example.com \
+        NOVA_USER=$(openstack user create \
+            nova \
+            --password "$SERVICE_PASSWORD" \
+            --project $SERVICE_TENANT \
+            --email nova@example.com \
             | grep " id " | get_field 2)
-        keystone user-role-add \
-            --tenant-id $SERVICE_TENANT \
-            --user-id $NOVA_USER \
-            --role-id $ADMIN_ROLE
+        openstack role add \
+            $ADMIN_ROLE \
+            --project $SERVICE_TENANT \
+            --user $NOVA_USER
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-            NOVA_SERVICE=$(keystone service-create \
-                --name=nova \
+            NOVA_SERVICE=$(openstack service create \
+                nova \
                 --type=compute \
                 --description="Nova Compute Service" \
                 | grep " id " | get_field 2)
-            keystone endpoint-create \
+            openstack endpoint create \
+                $NOVA_SERVICE \
                 --region RegionOne \
-                --service_id $NOVA_SERVICE \
                 --publicurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \
                 --adminurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \
                 --internalurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s"
-            NOVA_V3_SERVICE=$(keystone service-create \
-                --name=novav3 \
+            NOVA_V3_SERVICE=$(openstack service create \
+                novav3 \
                 --type=computev3 \
                 --description="Nova Compute Service V3" \
                 | grep " id " | get_field 2)
-            keystone endpoint-create \
+            openstack endpoint create \
+                $NOVA_V3_SERVICE \
                 --region RegionOne \
-                --service_id $NOVA_V3_SERVICE \
                 --publicurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3" \
                 --adminurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3" \
                 --internalurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3"
@@ -353,7 +366,7 @@
 }
 
 # create_nova_conf() - Create a new nova.conf file
-function create_nova_conf() {
+function create_nova_conf {
     # Remove legacy ``nova.conf``
     rm -f $NOVA_DIR/bin/nova.conf
 
@@ -389,6 +402,10 @@
     fi
 
     if is_service_enabled n-api; then
+        if is_service_enabled n-api-meta; then
+            # If running n-api-meta as a separate service
+            NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/,metadata//")
+        fi
         iniset $NOVA_CONF DEFAULT enabled_apis "$NOVA_ENABLED_APIS"
         if is_service_enabled tls-proxy; then
             # Set the service port for a proxy to take the original
@@ -442,7 +459,7 @@
         iniset $NOVA_CONF DEFAULT instance_usage_audit "True"
         iniset $NOVA_CONF DEFAULT instance_usage_audit_period "hour"
         iniset $NOVA_CONF DEFAULT notify_on_state_change "vm_and_task_state"
-        iniset $NOVA_CONF DEFAULT notification_driver "nova.openstack.common.notifier.rpc_notifier"
+        iniset $NOVA_CONF DEFAULT notification_driver "messaging"
     fi
 
     # Provide some transition from ``EXTRA_FLAGS`` to ``EXTRA_OPTS``
@@ -495,15 +512,9 @@
     iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST"
     iniset_rpc_backend nova $NOVA_CONF DEFAULT
     iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT"
-
-    if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
-        # File injection is being disabled by default in the near future -
-        # disable it here for now to avoid surprises later.
-        iniset $NOVA_CONF libvirt inject_partition '-2'
-    fi
 }
 
-function init_nova_cells() {
+function init_nova_cells {
     if is_service_enabled n-cell; then
         cp $NOVA_CONF $NOVA_CELLS_CONF
         iniset $NOVA_CELLS_CONF DEFAULT sql_connection `database_connection_url $NOVA_CELLS_DB`
@@ -530,14 +541,14 @@
 }
 
 # create_nova_cache_dir() - Part of the init_nova() process
-function create_nova_cache_dir() {
+function create_nova_cache_dir {
     # Create cache dir
     sudo mkdir -p $NOVA_AUTH_CACHE_DIR
     sudo chown $STACK_USER $NOVA_AUTH_CACHE_DIR
     rm -f $NOVA_AUTH_CACHE_DIR/*
 }
 
-function create_nova_conf_nova_network() {
+function create_nova_conf_nova_network {
     iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NETWORK_MANAGER"
     iniset $NOVA_CONF DEFAULT public_interface "$PUBLIC_INTERFACE"
     iniset $NOVA_CONF DEFAULT vlan_interface "$VLAN_INTERFACE"
@@ -548,14 +559,14 @@
 }
 
 # create_nova_keys_dir() - Part of the init_nova() process
-function create_nova_keys_dir() {
+function create_nova_keys_dir {
     # Create keys dir
     sudo mkdir -p ${NOVA_STATE_PATH}/keys
     sudo chown -R $STACK_USER ${NOVA_STATE_PATH}
 }
 
 # init_nova() - Initialize databases, etc.
-function init_nova() {
+function init_nova {
     # All nova components talk to a central database.
     # Only do this step once on the API node for an entire cluster.
     if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then
@@ -584,14 +595,14 @@
 }
 
 # install_novaclient() - Collect source and prepare
-function install_novaclient() {
+function install_novaclient {
     git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH
     setup_develop $NOVACLIENT_DIR
     sudo install -D -m 0644 -o $STACK_USER {$NOVACLIENT_DIR/tools/,/etc/bash_completion.d/}nova.bash_completion
 }
 
 # install_nova() - Collect source and prepare
-function install_nova() {
+function install_nova {
     if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
         install_nova_hypervisor
     fi
@@ -626,7 +637,7 @@
 }
 
 # start_nova_api() - Start the API process ahead of other things
-function start_nova_api() {
+function start_nova_api {
     # Get right service port for testing
     local service_port=$NOVA_SERVICE_PORT
     if is_service_enabled tls-proxy; then
@@ -646,7 +657,7 @@
 }
 
 # start_nova_compute() - Start the compute process
-function start_nova_compute() {
+function start_nova_compute {
     if is_service_enabled n-cell; then
         local compute_cell_conf=$NOVA_CELLS_CONF
     else
@@ -681,7 +692,7 @@
 }
 
 # start_nova() - Start running processes, including screen
-function start_nova_rest() {
+function start_nova_rest {
     local api_cell_conf=$NOVA_CONF
     if is_service_enabled n-cell; then
         local compute_cell_conf=$NOVA_CELLS_CONF
@@ -710,13 +721,13 @@
         screen_it n-obj "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-objectstore --config-file $api_cell_conf"
 }
 
-function start_nova() {
+function start_nova {
     start_nova_compute
     start_nova_rest
 }
 
 # stop_nova() - Stop running processes (non-screen)
-function stop_nova() {
+function stop_nova {
     # Kill the nova screen windows
     # Some services are listed here twice since more than one instance
     # of a service may be running in certain configs.
diff --git a/lib/nova_plugins/hypervisor-baremetal b/lib/nova_plugins/hypervisor-baremetal
index 660c977..2da1097 100644
--- a/lib/nova_plugins/hypervisor-baremetal
+++ b/lib/nova_plugins/hypervisor-baremetal
@@ -33,13 +33,13 @@
 # ------------
 
 # clean_nova_hypervisor - Clean up an installation
-function cleanup_nova_hypervisor() {
+function cleanup_nova_hypervisor {
     # This function intentionally left blank
     :
 }
 
 # configure_nova_hypervisor - Set config files, create data dirs, etc
-function configure_nova_hypervisor() {
+function configure_nova_hypervisor {
     configure_baremetal_nova_dirs
 
     iniset $NOVA_CONF baremetal sql_connection `database_connection_url nova_bm`
@@ -67,19 +67,19 @@
 }
 
 # install_nova_hypervisor() - Install external components
-function install_nova_hypervisor() {
+function install_nova_hypervisor {
     # This function intentionally left blank
     :
 }
 
 # start_nova_hypervisor - Start any required external services
-function start_nova_hypervisor() {
+function start_nova_hypervisor {
     # This function intentionally left blank
     :
 }
 
 # stop_nova_hypervisor - Stop any external services
-function stop_nova_hypervisor() {
+function stop_nova_hypervisor {
     # This function intentionally left blank
     :
 }
diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker
index bb934b8..cdbc4d1 100644
--- a/lib/nova_plugins/hypervisor-docker
+++ b/lib/nova_plugins/hypervisor-docker
@@ -31,8 +31,8 @@
 DOCKER_PID_FILE=/var/run/docker.pid
 DOCKER_REGISTRY_PORT=${DOCKER_REGISTRY_PORT:-5042}
 
-DOCKER_IMAGE=${DOCKER_IMAGE:-busybox:latest}
-DOCKER_IMAGE_NAME=busybox
+DOCKER_IMAGE=${DOCKER_IMAGE:-cirros:latest}
+DOCKER_IMAGE_NAME=$DEFAULT_IMAGE_NAME
 DOCKER_REGISTRY_IMAGE=${DOCKER_REGISTRY_IMAGE:-registry:latest}
 DOCKER_REGISTRY_IMAGE_NAME=registry
 DOCKER_REPOSITORY_NAME=${SERVICE_HOST}:${DOCKER_REGISTRY_PORT}/${DOCKER_IMAGE_NAME}
@@ -44,7 +44,7 @@
 # ------------
 
 # clean_nova_hypervisor - Clean up an installation
-function cleanup_nova_hypervisor() {
+function cleanup_nova_hypervisor {
     stop_service docker
 
     # Clean out work area
@@ -52,13 +52,13 @@
 }
 
 # configure_nova_hypervisor - Set config files, create data dirs, etc
-function configure_nova_hypervisor() {
+function configure_nova_hypervisor {
     iniset $NOVA_CONF DEFAULT compute_driver docker.DockerDriver
     iniset $GLANCE_API_CONF DEFAULT container_formats ami,ari,aki,bare,ovf,docker
 }
 
 # install_nova_hypervisor() - Install external components
-function install_nova_hypervisor() {
+function install_nova_hypervisor {
     # So far this is Ubuntu only
     if ! is_ubuntu; then
         die $LINENO "Docker is only supported on Ubuntu at this time"
@@ -77,7 +77,7 @@
 }
 
 # start_nova_hypervisor - Start any required external services
-function start_nova_hypervisor() {
+function start_nova_hypervisor {
     local docker_pid
     read docker_pid <$DOCKER_PID_FILE
     if [[ -z $docker_pid ]] || ! ps -p $docker_pid | grep [d]ocker; then
@@ -104,14 +104,13 @@
     fi
 
     # Make sure we copied the image in Glance
-    DOCKER_IMAGE=$(glance image-list | egrep " $DOCKER_IMAGE_NAME ")
-    if ! is_set DOCKER_IMAGE ; then
+    if ! (glance image-show "$DOCKER_IMAGE"); then
         docker push $DOCKER_REPOSITORY_NAME
     fi
 }
 
 # stop_nova_hypervisor - Stop any external services
-function stop_nova_hypervisor() {
+function stop_nova_hypervisor {
     # Stop the docker registry container
     docker kill $(docker ps | grep docker-registry | cut -d' ' -f1)
 }
diff --git a/lib/nova_plugins/hypervisor-fake b/lib/nova_plugins/hypervisor-fake
index fe0d190..e7a833f 100644
--- a/lib/nova_plugins/hypervisor-fake
+++ b/lib/nova_plugins/hypervisor-fake
@@ -27,13 +27,13 @@
 # ------------
 
 # clean_nova_hypervisor - Clean up an installation
-function cleanup_nova_hypervisor() {
+function cleanup_nova_hypervisor {
     # This function intentionally left blank
     :
 }
 
 # configure_nova_hypervisor - Set config files, create data dirs, etc
-function configure_nova_hypervisor() {
+function configure_nova_hypervisor {
     iniset $NOVA_CONF DEFAULT compute_driver "nova.virt.fake.FakeDriver"
     # Disable arbitrary limits
     iniset $NOVA_CONF DEFAULT quota_instances -1
@@ -51,19 +51,19 @@
 }
 
 # install_nova_hypervisor() - Install external components
-function install_nova_hypervisor() {
+function install_nova_hypervisor {
     # This function intentionally left blank
     :
 }
 
 # start_nova_hypervisor - Start any required external services
-function start_nova_hypervisor() {
+function start_nova_hypervisor {
     # This function intentionally left blank
     :
 }
 
 # stop_nova_hypervisor - Stop any external services
-function stop_nova_hypervisor() {
+function stop_nova_hypervisor {
     # This function intentionally left blank
     :
 }
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index 42d3af1..bbf6554 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -23,18 +23,21 @@
 # Defaults
 # --------
 
+# File injection is disabled by default in Nova.  This will turn it back on.
+ENABLE_FILE_INJECTION=${ENABLE_FILE_INJECTION:-False}
+
 
 # Entry Points
 # ------------
 
 # clean_nova_hypervisor - Clean up an installation
-function cleanup_nova_hypervisor() {
+function cleanup_nova_hypervisor {
     # This function intentionally left blank
     :
 }
 
 # configure_nova_hypervisor - Set config files, create data dirs, etc
-function configure_nova_hypervisor() {
+function configure_nova_hypervisor {
     if is_service_enabled neutron && is_neutron_ovs_base_plugin && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF; then
         # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces
         cat <<EOF | sudo tee -a $QEMU_CONF
@@ -55,40 +58,40 @@
 
     if is_fedora || is_suse; then
         if is_fedora && [[ $DISTRO =~ (rhel6) || "$os_RELEASE" -le "17" ]]; then
-            sudo bash -c "cat <<EOF >/etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla
+            cat <<EOF | sudo tee /etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla
 [libvirt Management Access]
 Identity=unix-group:$LIBVIRT_GROUP
 Action=org.libvirt.unix.manage
 ResultAny=yes
 ResultInactive=yes
 ResultActive=yes
-EOF"
+EOF
         elif is_suse && [[ $os_RELEASE = 12.2 || "$os_VENDOR" = "SUSE LINUX" ]]; then
             # openSUSE < 12.3 or SLE
             # Work around the fact that polkit-default-privs overrules pklas
             # with 'unix-group:$group'.
-            sudo bash -c "cat <<EOF >/etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla
+            cat <<EOF | sudo tee /etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla
 [libvirt Management Access]
 Identity=unix-user:$STACK_USER
 Action=org.libvirt.unix.manage
 ResultAny=yes
 ResultInactive=yes
 ResultActive=yes
-EOF"
+EOF
         else
             # Starting with fedora 18 and opensuse-12.3 enable stack-user to
             # virsh -c qemu:///system by creating a policy-kit rule for
             # stack-user using the new Javascript syntax
             rules_dir=/etc/polkit-1/rules.d
             sudo mkdir -p $rules_dir
-            sudo bash -c "cat <<EOF > $rules_dir/50-libvirt-$STACK_USER.rules
+            cat <<EOF | sudo tee $rules_dir/50-libvirt-$STACK_USER.rules
 polkit.addRule(function(action, subject) {
     if (action.id == 'org.libvirt.unix.manage' &&
-        subject.user == '"$STACK_USER"') {
+        subject.user == '$STACK_USER') {
         return polkit.Result.YES;
     }
 });
-EOF"
+EOF
             unset rules_dir
         fi
     fi
@@ -116,18 +119,33 @@
     if is_arch "ppc64"; then
         iniset $NOVA_CONF DEFAULT vnc_enabled "false"
     fi
+
+    ENABLE_FILE_INJECTION=$(trueorfalse False $ENABLE_FILE_INJECTION)
+    if [[ "$ENABLE_FILE_INJECTION" = "True" ]] ; then
+        # When libguestfs is available for file injection, enable using
+        # libguestfs to inspect the image and figure out the proper
+        # partition to inject into.
+        iniset $NOVA_CONF libvirt inject_partition '-1'
+        iniset $NOVA_CONF libvirt inject_key 'true'
+    else
+        # File injection is being disabled by default in the near future -
+        # disable it here for now to avoid surprises later.
+        iniset $NOVA_CONF libvirt inject_partition '-2'
+    fi
 }
 
 # install_nova_hypervisor() - Install external components
-function install_nova_hypervisor() {
+function install_nova_hypervisor {
     if is_ubuntu; then
         install_package kvm
         install_package libvirt-bin
         install_package python-libvirt
+        install_package python-guestfs
     elif is_fedora || is_suse; then
         install_package kvm
         install_package libvirt
         install_package libvirt-python
+        install_package python-libguestfs
     fi
 
     # Install and configure **LXC** if specified.  LXC is another approach to
@@ -147,13 +165,13 @@
 }
 
 # start_nova_hypervisor - Start any required external services
-function start_nova_hypervisor() {
+function start_nova_hypervisor {
     # This function intentionally left blank
     :
 }
 
 # stop_nova_hypervisor - Stop any external services
-function stop_nova_hypervisor() {
+function stop_nova_hypervisor {
     # This function intentionally left blank
     :
 }
diff --git a/lib/nova_plugins/hypervisor-openvz b/lib/nova_plugins/hypervisor-openvz
index fc5ed0c..a1636ad 100644
--- a/lib/nova_plugins/hypervisor-openvz
+++ b/lib/nova_plugins/hypervisor-openvz
@@ -27,13 +27,13 @@
 # ------------
 
 # clean_nova_hypervisor - Clean up an installation
-function cleanup_nova_hypervisor() {
+function cleanup_nova_hypervisor {
     # This function intentionally left blank
     :
 }
 
 # configure_nova_hypervisor - Set config files, create data dirs, etc
-function configure_nova_hypervisor() {
+function configure_nova_hypervisor {
     iniset $NOVA_CONF DEFAULT compute_driver "openvz.OpenVzDriver"
     iniset $NOVA_CONF DEFAULT connection_type "openvz"
     LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"}
@@ -41,19 +41,19 @@
 }
 
 # install_nova_hypervisor() - Install external components
-function install_nova_hypervisor() {
+function install_nova_hypervisor {
     # This function intentionally left blank
     :
 }
 
 # start_nova_hypervisor - Start any required external services
-function start_nova_hypervisor() {
+function start_nova_hypervisor {
     # This function intentionally left blank
     :
 }
 
 # stop_nova_hypervisor - Stop any external services
-function stop_nova_hypervisor() {
+function stop_nova_hypervisor {
     # This function intentionally left blank
     :
 }
diff --git a/lib/nova_plugins/hypervisor-vsphere b/lib/nova_plugins/hypervisor-vsphere
index 1666246..b04aeda 100644
--- a/lib/nova_plugins/hypervisor-vsphere
+++ b/lib/nova_plugins/hypervisor-vsphere
@@ -27,13 +27,13 @@
 # ------------
 
 # clean_nova_hypervisor - Clean up an installation
-function cleanup_nova_hypervisor() {
+function cleanup_nova_hypervisor {
     # This function intentionally left blank
     :
 }
 
 # configure_nova_hypervisor - Set config files, create data dirs, etc
-function configure_nova_hypervisor() {
+function configure_nova_hypervisor {
     iniset $NOVA_CONF DEFAULT compute_driver "vmwareapi.VMwareVCDriver"
     VMWAREAPI_USER=${VMWAREAPI_USER:-"root"}
     iniset $NOVA_CONF vmware host_ip "$VMWAREAPI_IP"
@@ -46,19 +46,19 @@
 }
 
 # install_nova_hypervisor() - Install external components
-function install_nova_hypervisor() {
+function install_nova_hypervisor {
     # This function intentionally left blank
     :
 }
 
 # start_nova_hypervisor - Start any required external services
-function start_nova_hypervisor() {
+function start_nova_hypervisor {
     # This function intentionally left blank
     :
 }
 
 # stop_nova_hypervisor - Stop any external services
-function stop_nova_hypervisor() {
+function stop_nova_hypervisor {
     # This function intentionally left blank
     :
 }
diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver
index f47994f..10bda2c 100644
--- a/lib/nova_plugins/hypervisor-xenserver
+++ b/lib/nova_plugins/hypervisor-xenserver
@@ -37,13 +37,13 @@
 # ------------
 
 # clean_nova_hypervisor - Clean up an installation
-function cleanup_nova_hypervisor() {
+function cleanup_nova_hypervisor {
     # This function intentionally left blank
     :
 }
 
 # configure_nova_hypervisor - Set config files, create data dirs, etc
-function configure_nova_hypervisor() {
+function configure_nova_hypervisor {
     if [ -z "$XENAPI_CONNECTION_URL" ]; then
         die $LINENO "XENAPI_CONNECTION_URL is not specified"
     fi
@@ -56,22 +56,50 @@
     # Need to avoid crash due to new firewall support
     XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"}
     iniset $NOVA_CONF DEFAULT firewall_driver "$XEN_FIREWALL_DRIVER"
+
+    local dom0_ip
+    dom0_ip=$(echo "$XENAPI_CONNECTION_URL" | cut -d "/" -f 3-)
+
+    local ssh_dom0
+    ssh_dom0="sudo -u $DOMZERO_USER ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@$dom0_ip"
+
+    # install nova plugins to dom0
+    tar -czf - -C $NOVA_DIR/plugins/xenserver/xenapi/etc/xapi.d/plugins/ ./ |
+        $ssh_dom0 'tar -xzf - -C /etc/xapi.d/plugins/ && chmod a+x /etc/xapi.d/plugins/*'
+
+    # install console logrotate script
+    tar -czf - -C $NOVA_DIR/tools/xenserver/ rotate_xen_guest_logs.sh |
+        $ssh_dom0 'tar -xzf - -C /root/ && chmod +x /root/rotate_xen_guest_logs.sh && mkdir -p /var/log/xen/guest'
+
+    # Create a cron job that will rotate guest logs
+    $ssh_dom0 crontab - << CRONTAB
+* * * * * /root/rotate_xen_guest_logs.sh
+CRONTAB
+
+    # Create directories for kernels and images
+    {
+        echo "set -eux"
+        cat $TOP_DIR/tools/xen/functions
+        echo "create_directory_for_images"
+        echo "create_directory_for_kernels"
+    } | $ssh_dom0
+
 }
 
 # install_nova_hypervisor() - Install external components
-function install_nova_hypervisor() {
+function install_nova_hypervisor {
     # This function intentionally left blank
     :
 }
 
 # start_nova_hypervisor - Start any required external services
-function start_nova_hypervisor() {
+function start_nova_hypervisor {
     # This function intentionally left blank
     :
 }
 
 # stop_nova_hypervisor - Stop any external services
-function stop_nova_hypervisor() {
+function stop_nova_hypervisor {
     # This function intentionally left blank
     :
 }
diff --git a/lib/oslo b/lib/oslo
index f644ed7..8ef179c 100644
--- a/lib/oslo
+++ b/lib/oslo
@@ -20,19 +20,27 @@
 
 # Defaults
 # --------
+CLIFF_DIR=$DEST/cliff
 OSLOCFG_DIR=$DEST/oslo.config
 OSLOMSG_DIR=$DEST/oslo.messaging
 OSLORWRAP_DIR=$DEST/oslo.rootwrap
+OSLOVMWARE_DIR=$DEST/oslo.vmware
+PYCADF_DIR=$DEST/pycadf
+STEVEDORE_DIR=$DEST/stevedore
+TASKFLOW_DIR=$DEST/taskflow
 
 # Entry Points
 # ------------
 
 # install_oslo() - Collect source and prepare
-function install_oslo() {
+function install_oslo {
     # TODO(sdague): remove this once we get to Icehouse, this just makes
     # for a smoother transition of existing users.
     cleanup_oslo
 
+    git_clone $CLIFF_REPO $CLIFF_DIR $CLIFF_BRANCH
+    setup_develop $CLIFF_DIR
+
     git_clone $OSLOCFG_REPO $OSLOCFG_DIR $OSLOCFG_BRANCH
     setup_develop $OSLOCFG_DIR
 
@@ -41,10 +49,22 @@
 
     git_clone $OSLORWRAP_REPO $OSLORWRAP_DIR $OSLORWRAP_BRANCH
     setup_develop $OSLORWRAP_DIR
+
+    git_clone $OSLOVMWARE_REPO $OSLOVMWARE_DIR $OSLOVMWARE_BRANCH
+    setup_develop $OSLOVMWARE_DIR
+
+    git_clone $PYCADF_REPO $PYCADF_DIR $PYCADF_BRANCH
+    setup_develop $PYCADF_DIR
+
+    git_clone $STEVEDORE_REPO $STEVEDORE_DIR $STEVEDORE_BRANCH
+    setup_develop $STEVEDORE_DIR
+
+    git_clone $TASKFLOW_REPO $TASKFLOW_DIR $TASKFLOW_BRANCH
+    setup_develop $TASKFLOW_DIR
 }
 
 # cleanup_oslo() - purge possibly old versions of oslo
-function cleanup_oslo() {
+function cleanup_oslo {
     # this means we've got an old oslo installed, lets get rid of it
     if ! python -c 'import oslo.config' 2>/dev/null; then
         echo "Found old oslo.config... removing to ensure consistency"
diff --git a/lib/rpc_backend b/lib/rpc_backend
index 3651bc0..a0424b1 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -25,7 +25,7 @@
 
 # Make sure we only have one rpc backend enabled.
 # Also check the specified rpc backend is available on your platform.
-function check_rpc_backend() {
+function check_rpc_backend {
     local rpc_needed=1
     # We rely on the fact that filenames in lib/* match the service names
     # that can be passed as arguments to is_service_enabled.
@@ -67,7 +67,7 @@
         sudo killall epmd || sudo killall -9 epmd
         if is_ubuntu; then
             # And the Erlang runtime too
-            sudo aptitude purge -y ~nerlang
+            apt_get purge -y erlang*
         fi
     elif is_service_enabled qpid; then
         if is_fedora; then
@@ -91,7 +91,7 @@
 }
 
 # install rpc backend
-function install_rpc_backend() {
+function install_rpc_backend {
     if is_service_enabled rabbit; then
         # Install rabbitmq-server
         # the temp file is necessary due to LP: #878600
@@ -135,7 +135,7 @@
 }
 
 # restart the rpc backend
-function restart_rpc_backend() {
+function restart_rpc_backend {
     if is_service_enabled rabbit; then
         # Start rabbitmq-server
         echo_summary "Starting RabbitMQ"
@@ -165,7 +165,7 @@
 }
 
 # iniset cofiguration
-function iniset_rpc_backend() {
+function iniset_rpc_backend {
     local package=$1
     local file=$2
     local section=$3
@@ -193,7 +193,7 @@
 
 # Check if qpid can be used on the current distro.
 # qpid_is_supported
-function qpid_is_supported() {
+function qpid_is_supported {
     if [[ -z "$DISTRO" ]]; then
         GetDistro
     fi
diff --git a/lib/savanna b/lib/savanna
index 6f42311..2cb092c 100644
--- a/lib/savanna
+++ b/lib/savanna
@@ -10,6 +10,7 @@
 # configure_savanna
 # start_savanna
 # stop_savanna
+# cleanup_savanna
 
 # Save trace setting
 XTRACE=$(set +o | grep xtrace)
@@ -33,6 +34,8 @@
 SAVANNA_SERVICE_PORT=${SAVANNA_SERVICE_PORT:-8386}
 SAVANNA_SERVICE_PROTOCOL=${SAVANNA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
 
+SAVANNA_AUTH_CACHE_DIR=${SAVANNA_AUTH_CACHE_DIR:-/var/cache/savanna}
+
 # Support entry points installation of console scripts
 if [[ -d $SAVANNA_DIR/bin ]]; then
     SAVANNA_BIN_DIR=$SAVANNA_DIR/bin
@@ -52,39 +55,47 @@
 # Tenant      User       Roles
 # ------------------------------
 # service     savanna    admin
-function create_savanna_accounts() {
+function create_savanna_accounts {
 
-    SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
+    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
 
-    SAVANNA_USER=$(keystone user-create \
-        --name=savanna \
-        --pass="$SERVICE_PASSWORD" \
-        --tenant-id $SERVICE_TENANT \
-        --email=savanna@example.com \
+    SAVANNA_USER=$(openstack user create \
+        savanna \
+        --password "$SERVICE_PASSWORD" \
+        --project $SERVICE_TENANT \
+        --email savanna@example.com \
         | grep " id " | get_field 2)
-    keystone user-role-add \
-        --tenant-id $SERVICE_TENANT \
-        --user-id $SAVANNA_USER \
-        --role-id $ADMIN_ROLE
+    openstack role add \
+        $ADMIN_ROLE \
+        --project $SERVICE_TENANT \
+        --user $SAVANNA_USER
 
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-        SAVANNA_SERVICE=$(keystone service-create \
-            --name=savanna \
+        SAVANNA_SERVICE=$(openstack service create \
+            savanna \
             --type=data_processing \
             --description="Savanna Data Processing" \
             | grep " id " | get_field 2)
-        keystone endpoint-create \
+        openstack endpoint create \
+            $SAVANNA_SERVICE \
             --region RegionOne \
-            --service_id $SAVANNA_SERVICE \
             --publicurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
             --adminurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
             --internalurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s"
     fi
 }
 
+# cleanup_savanna() - Remove residual data files, anything left over from
+# previous runs that would need to clean up.
+function cleanup_savanna {
+
+    # Cleanup auth cache dir
+    sudo rm -rf $SAVANNA_AUTH_CACHE_DIR
+}
+
 # configure_savanna() - Set config files, create data dirs, etc
-function configure_savanna() {
+function configure_savanna {
 
     if [[ ! -d $SAVANNA_CONF_DIR ]]; then
         sudo mkdir -p $SAVANNA_CONF_DIR
@@ -94,9 +105,27 @@
     # Copy over savanna configuration file and configure common parameters.
     cp $SAVANNA_DIR/etc/savanna/savanna.conf.sample $SAVANNA_CONF_FILE
 
+    # Create auth cache dir
+    sudo mkdir -p $SAVANNA_AUTH_CACHE_DIR
+    sudo chown $STACK_USER $SAVANNA_AUTH_CACHE_DIR
+    rm -rf $SAVANNA_AUTH_CACHE_DIR/*
+
+    # Set obsolete keystone auth configs for backward compatibility
+    iniset $SAVANNA_CONF_FILE DEFAULT os_auth_host $KEYSTONE_SERVICE_HOST
+    iniset $SAVANNA_CONF_FILE DEFAULT os_auth_port $KEYSTONE_SERVICE_PORT
+    iniset $SAVANNA_CONF_FILE DEFAULT os_auth_protocol $KEYSTONE_SERVICE_PROTOCOL
     iniset $SAVANNA_CONF_FILE DEFAULT os_admin_password $SERVICE_PASSWORD
     iniset $SAVANNA_CONF_FILE DEFAULT os_admin_username savanna
     iniset $SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME
+
+    # Set actual keystone auth configs
+    iniset $SAVANNA_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
+    iniset $SAVANNA_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
+    iniset $SAVANNA_CONF_FILE keystone_authtoken admin_user savanna
+    iniset $SAVANNA_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD
+    iniset $SAVANNA_CONF_FILE keystone_authtoken signing_dir $SAVANNA_AUTH_CACHE_DIR
+    iniset $SAVANNA_CONF_FILE keystone_authtoken cafile $KEYSTONE_SSL_CA
+
     iniset $SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG
 
     iniset $SAVANNA_CONF_FILE database connection `database_connection_url savanna`
@@ -106,6 +135,12 @@
         iniset $SAVANNA_CONF_FILE DEFAULT use_floating_ips true
     fi
 
+    if is_service_enabled heat; then
+        iniset $SAVANNA_CONF_FILE DEFAULT infrastructure_engine heat
+    else
+        iniset $SAVANNA_CONF_FILE DEFAULT infrastructure_engine savanna
+    fi
+
     iniset $SAVANNA_CONF_FILE DEFAULT use_syslog $SYSLOG
 
     recreate_database savanna utf8
@@ -113,18 +148,18 @@
 }
 
 # install_savanna() - Collect source and prepare
-function install_savanna() {
+function install_savanna {
     git_clone $SAVANNA_REPO $SAVANNA_DIR $SAVANNA_BRANCH
     setup_develop $SAVANNA_DIR
 }
 
 # start_savanna() - Start running processes, including screen
-function start_savanna() {
+function start_savanna {
     screen_it savanna "cd $SAVANNA_DIR && $SAVANNA_BIN_DIR/savanna-api --config-file $SAVANNA_CONF_FILE"
 }
 
 # stop_savanna() - Stop running processes
-function stop_savanna() {
+function stop_savanna {
     # Kill the Savanna screen windows
     screen -S $SCREEN_NAME -p savanna -X kill
 }
diff --git a/lib/savanna-dashboard b/lib/savanna-dashboard
index 7713a78..6fe15a3 100644
--- a/lib/savanna-dashboard
+++ b/lib/savanna-dashboard
@@ -35,10 +35,11 @@
 # Functions
 # ---------
 
-function configure_savanna_dashboard() {
+function configure_savanna_dashboard {
 
-    echo -e "SAVANNA_URL = \"http://$SERVICE_HOST:8386/v1.1\"\nAUTO_ASSIGNMENT_ENABLED = False" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py
-    echo -e "HORIZON_CONFIG['dashboards'] += ('savanna',)\nINSTALLED_APPS += ('savannadashboard',)" >> $HORIZON_DIR/openstack_dashboard/settings.py
+    echo -e "AUTO_ASSIGNMENT_ENABLED = False" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py
+    echo -e "HORIZON_CONFIG['dashboards'] += ('savanna',)" >> $HORIZON_DIR/openstack_dashboard/settings.py
+    echo -e "INSTALLED_APPS += ('savannadashboard',)" >> $HORIZON_DIR/openstack_dashboard/settings.py
 
     if is_service_enabled neutron; then
         echo -e "SAVANNA_USE_NEUTRON = True" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py
@@ -46,19 +47,19 @@
 }
 
 # install_savanna_dashboard() - Collect source and prepare
-function install_savanna_dashboard() {
+function install_savanna_dashboard {
     install_python_savannaclient
     git_clone $SAVANNA_DASHBOARD_REPO $SAVANNA_DASHBOARD_DIR $SAVANNA_DASHBOARD_BRANCH
     setup_develop $SAVANNA_DASHBOARD_DIR
 }
 
-function install_python_savannaclient() {
+function install_python_savannaclient {
     git_clone $SAVANNA_PYTHONCLIENT_REPO $SAVANNA_PYTHONCLIENT_DIR $SAVANNA_PYTHONCLIENT_BRANCH
     setup_develop $SAVANNA_PYTHONCLIENT_DIR
 }
 
 # Cleanup file settings.py from Savanna
-function cleanup_savanna_dashboard() {
+function cleanup_savanna_dashboard {
     sed -i '/savanna/d' $HORIZON_DIR/openstack_dashboard/settings.py
 }
 
diff --git a/lib/stackforge b/lib/stackforge
index 718b818..dca08cc 100644
--- a/lib/stackforge
+++ b/lib/stackforge
@@ -6,8 +6,9 @@
 # This is appropriate for python libraries that release to pypi and are
 # expected to be used beyond OpenStack like, but are requirements
 # for core services in global-requirements.
-#    * wsme
-#    * pecan
+#
+#     * wsme
+#     * pecan
 #
 # This is not appropriate for stackforge projects which are early stage
 # OpenStack tools
@@ -33,7 +34,7 @@
 # ------------
 
 # install_stackforge() - Collect source and prepare
-function install_stackforge() {
+function install_stackforge {
     # TODO(sdague): remove this once we get to Icehouse, this just makes
     # for a smoother transition of existing users.
     cleanup_stackforge
@@ -46,7 +47,7 @@
 }
 
 # cleanup_stackforge() - purge possibly old versions of stackforge libraries
-function cleanup_stackforge() {
+function cleanup_stackforge {
     # this means we've got an old version installed, lets get rid of it
     # otherwise python hates itself
     for lib in wsme pecan; do
diff --git a/lib/swift b/lib/swift
index 54d6f1c..5d4d4ef 100644
--- a/lib/swift
+++ b/lib/swift
@@ -118,8 +118,15 @@
 # Functions
 # ---------
 
+# Test if any Swift services are enabled
+# is_swift_enabled
+function is_swift_enabled {
+    [[ ,${ENABLED_SERVICES} =~ ,"s-" ]] && return 0
+    return 1
+}
+
 # cleanup_swift() - Remove residual data files
-function cleanup_swift() {
+function cleanup_swift {
     rm -f ${SWIFT_CONF_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz}
     if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
         sudo umount ${SWIFT_DATA_DIR}/drives/sdb1
@@ -134,7 +141,7 @@
 }
 
 # _cleanup_swift_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file
-function _cleanup_swift_apache_wsgi() {
+function _cleanup_swift_apache_wsgi {
     sudo rm -f $SWIFT_APACHE_WSGI_DIR/*.wsgi
     disable_apache_site proxy-server
     for node_number in ${SWIFT_REPLICAS_SEQ}; do
@@ -147,7 +154,7 @@
 }
 
 # _config_swift_apache_wsgi() - Set WSGI config files of Swift
-function _config_swift_apache_wsgi() {
+function _config_swift_apache_wsgi {
     sudo mkdir -p ${SWIFT_APACHE_WSGI_DIR}
     local apache_vhost_dir=/etc/${APACHE_NAME}/$APACHE_CONF_DIR
     local proxy_port=${SWIFT_DEFAULT_BIND_PORT:-8080}
@@ -224,8 +231,48 @@
     done
 }
 
+# This function generates an object/container/account configuration
+# emulating 4 nodes on different ports
+function generate_swift_config {
+    local swift_node_config=$1
+    local node_id=$2
+    local bind_port=$3
+    local server_type=$4
+
+    log_facility=$[ node_id - 1 ]
+    node_path=${SWIFT_DATA_DIR}/${node_number}
+
+    iniuncomment ${swift_node_config} DEFAULT user
+    iniset ${swift_node_config} DEFAULT user ${STACK_USER}
+
+    iniuncomment ${swift_node_config} DEFAULT bind_port
+    iniset ${swift_node_config} DEFAULT bind_port ${bind_port}
+
+    iniuncomment ${swift_node_config} DEFAULT swift_dir
+    iniset ${swift_node_config} DEFAULT swift_dir ${SWIFT_CONF_DIR}
+
+    iniuncomment ${swift_node_config} DEFAULT devices
+    iniset ${swift_node_config} DEFAULT devices ${node_path}
+
+    iniuncomment ${swift_node_config} DEFAULT log_facility
+    iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility}
+
+    iniuncomment ${swift_node_config} DEFAULT workers
+    iniset ${swift_node_config} DEFAULT workers 1
+
+    iniuncomment ${swift_node_config} DEFAULT disable_fallocate
+    iniset ${swift_node_config} DEFAULT disable_fallocate true
+
+    iniuncomment ${swift_node_config} DEFAULT mount_check
+    iniset ${swift_node_config} DEFAULT mount_check false
+
+    iniuncomment ${swift_node_config} ${server_type}-replicator vm_test_mode
+    iniset ${swift_node_config} ${server_type}-replicator vm_test_mode yes
+}
+
+
 # configure_swift() - Set config files, create data dirs and loop image
-function configure_swift() {
+function configure_swift {
     local swift_pipeline="${SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH}"
     local node_number
     local swift_node_config
@@ -254,7 +301,7 @@
     # rsyncd.conf just prepared for 4 nodes
     if is_ubuntu; then
         sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync
-    else
+    elif [ -e /etc/xinetd.d/rsync ]; then
         sudo sed -i '/disable *= *yes/ { s/yes/no/ }' /etc/xinetd.d/rsync
     fi
 
@@ -341,7 +388,7 @@
 # NOTE(chmou): s3token middleware is not updated yet to use only
 # username and password.
 [filter:s3token]
-paste.filter_factory = keystone.middleware.s3_token:filter_factory
+paste.filter_factory = keystoneclient.middleware.s3_token:filter_factory
 auth_port = ${KEYSTONE_AUTH_PORT}
 auth_host = ${KEYSTONE_AUTH_HOST}
 auth_protocol = ${KEYSTONE_AUTH_PROTOCOL}
@@ -357,45 +404,6 @@
     cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONF_DIR}/swift.conf
     iniset ${SWIFT_CONF_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH}
 
-    # This function generates an object/container/account configuration
-    # emulating 4 nodes on different ports
-    function generate_swift_config() {
-        local swift_node_config=$1
-        local node_id=$2
-        local bind_port=$3
-        local server_type=$4
-
-        log_facility=$[ node_id - 1 ]
-        node_path=${SWIFT_DATA_DIR}/${node_number}
-
-        iniuncomment ${swift_node_config} DEFAULT user
-        iniset ${swift_node_config} DEFAULT user ${STACK_USER}
-
-        iniuncomment ${swift_node_config} DEFAULT bind_port
-        iniset ${swift_node_config} DEFAULT bind_port ${bind_port}
-
-        iniuncomment ${swift_node_config} DEFAULT swift_dir
-        iniset ${swift_node_config} DEFAULT swift_dir ${SWIFT_CONF_DIR}
-
-        iniuncomment ${swift_node_config} DEFAULT devices
-        iniset ${swift_node_config} DEFAULT devices ${node_path}
-
-        iniuncomment ${swift_node_config} DEFAULT log_facility
-        iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility}
-
-        iniuncomment ${swift_node_config} DEFAULT workers
-        iniset ${swift_node_config} DEFAULT workers 1
-
-        iniuncomment ${swift_node_config} DEFAULT disable_fallocate
-        iniset ${swift_node_config} DEFAULT disable_fallocate true
-
-        iniuncomment ${swift_node_config} DEFAULT mount_check
-        iniset ${swift_node_config} DEFAULT mount_check false
-
-        iniuncomment ${swift_node_config} ${server_type}-replicator vm_test_mode
-        iniset ${swift_node_config} ${server_type}-replicator vm_test_mode yes
-    }
-
     for node_number in ${SWIFT_REPLICAS_SEQ}; do
         swift_node_config=${SWIFT_CONF_DIR}/object-server/${node_number}.conf
         cp ${SWIFT_DIR}/etc/object-server.conf-sample ${swift_node_config}
@@ -452,7 +460,7 @@
 }
 
 # create_swift_disk - Create Swift backing disk
-function create_swift_disk() {
+function create_swift_disk {
     local node_number
 
     # First do a bit of setup by creating the directories and
@@ -512,46 +520,65 @@
 # swifttenanttest1   swiftusertest3     anotherrole
 # swifttenanttest2   swiftusertest2     admin
 
-function create_swift_accounts() {
+function create_swift_accounts {
+    # Defines specific passwords used by tools/create_userrc.sh
+    SWIFTUSERTEST1_PASSWORD=testing
+    SWIFTUSERTEST2_PASSWORD=testing2
+    SWIFTUSERTEST3_PASSWORD=testing3
+
     KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-sql}
 
-    SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
+    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
 
-    SWIFT_USER=$(keystone user-create --name=swift --pass="$SERVICE_PASSWORD" \
-        --tenant-id $SERVICE_TENANT --email=swift@example.com | grep " id " | get_field 2)
-    keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $SWIFT_USER --role-id $ADMIN_ROLE
+    SWIFT_USER=$(openstack user create \
+        swift \
+        --password "$SERVICE_PASSWORD" \
+        --project $SERVICE_TENANT \
+        --email=swift@example.com \
+        | grep " id " | get_field 2)
+    openstack role add \
+        $ADMIN_ROLE \
+        --project $SERVICE_TENANT \
+        --user $SWIFT_USER
 
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-        SWIFT_SERVICE=$(keystone service-create --name=swift --type="object-store" \
-            --description="Swift Service" | grep " id " | get_field 2)
-        keystone endpoint-create \
+        SWIFT_SERVICE=$(openstack service create \
+            swift \
+            --type="object-store" \
+            --description="Swift Service" \
+            | grep " id " | get_field 2)
+        openstack endpoint create \
+            $SWIFT_SERVICE \
             --region RegionOne \
-            --service_id $SWIFT_SERVICE \
             --publicurl "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" \
             --adminurl "http://$SERVICE_HOST:8080" \
             --internalurl "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s"
     fi
 
-    SWIFT_TENANT_TEST1=$(keystone tenant-create --name=swifttenanttest1 | grep " id " | get_field 2)
+    SWIFT_TENANT_TEST1=$(openstack project create swifttenanttest1 | grep " id " | get_field 2)
     die_if_not_set $LINENO SWIFT_TENANT_TEST1 "Failure creating SWIFT_TENANT_TEST1"
-    SWIFT_USER_TEST1=$(keystone user-create --name=swiftusertest1 --pass=testing --email=test@example.com | grep " id " | get_field 2)
+    SWIFT_USER_TEST1=$(openstack user create swiftusertest1 --password=$SWIFTUSERTEST1_PASSWORD \
+        --project "$SWIFT_TENANT_TEST1" --email=test@example.com | grep " id " | get_field 2)
     die_if_not_set $LINENO SWIFT_USER_TEST1 "Failure creating SWIFT_USER_TEST1"
-    keystone user-role-add --user-id $SWIFT_USER_TEST1 --role-id $ADMIN_ROLE --tenant-id $SWIFT_TENANT_TEST1
+    openstack role add --user $SWIFT_USER_TEST1 --project $SWIFT_TENANT_TEST1 $ADMIN_ROLE
 
-    SWIFT_USER_TEST3=$(keystone user-create --name=swiftusertest3 --pass=testing3 --email=test3@example.com | grep " id " | get_field 2)
+    SWIFT_USER_TEST3=$(openstack user create swiftusertest3 --password=$SWIFTUSERTEST3_PASSWORD \
+        --project "$SWIFT_TENANT_TEST1" --email=test3@example.com | grep " id " | get_field 2)
     die_if_not_set $LINENO SWIFT_USER_TEST3 "Failure creating SWIFT_USER_TEST3"
-    keystone user-role-add --user-id $SWIFT_USER_TEST3 --role-id $ANOTHER_ROLE --tenant-id $SWIFT_TENANT_TEST1
+    openstack role add --user $SWIFT_USER_TEST3 --project $SWIFT_TENANT_TEST1 $ANOTHER_ROLE
 
-    SWIFT_TENANT_TEST2=$(keystone tenant-create --name=swifttenanttest2 | grep " id " | get_field 2)
+    SWIFT_TENANT_TEST2=$(openstack project create swifttenanttest2 | grep " id " | get_field 2)
     die_if_not_set $LINENO SWIFT_TENANT_TEST2 "Failure creating SWIFT_TENANT_TEST2"
-    SWIFT_USER_TEST2=$(keystone user-create --name=swiftusertest2 --pass=testing2 --email=test2@example.com | grep " id " | get_field 2)
+
+    SWIFT_USER_TEST2=$(openstack user create swiftusertest2 --password=$SWIFTUSERTEST2_PASSWORD \
+        --project "$SWIFT_TENANT_TEST2" --email=test2@example.com | grep " id " | get_field 2)
     die_if_not_set $LINENO SWIFT_USER_TEST2 "Failure creating SWIFT_USER_TEST2"
-    keystone user-role-add --user-id $SWIFT_USER_TEST2 --role-id $ADMIN_ROLE --tenant-id $SWIFT_TENANT_TEST2
+    openstack role add --user $SWIFT_USER_TEST2 --project $SWIFT_TENANT_TEST2 $ADMIN_ROLE
 }
 
 # init_swift() - Initialize rings
-function init_swift() {
+function init_swift {
     local node_number
     # Make sure to kill all swift processes first
     swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
@@ -585,7 +612,7 @@
     rm -f $SWIFT_AUTH_CACHE_DIR/*
 }
 
-function install_swift() {
+function install_swift {
     git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH
     setup_develop $SWIFT_DIR
     if is_apache_enabled_service swift; then
@@ -593,13 +620,13 @@
     fi
 }
 
-function install_swiftclient() {
+function install_swiftclient {
     git_clone $SWIFTCLIENT_REPO $SWIFTCLIENT_DIR $SWIFTCLIENT_BRANCH
     setup_develop $SWIFTCLIENT_DIR
 }
 
 # start_swift() - Start running processes, including screen
-function start_swift() {
+function start_swift {
     # (re)start rsyslog
     restart_service rsyslog
     # (re)start memcached to make sure we have a clean memcache.
@@ -608,8 +635,10 @@
     # Start rsync
     if is_ubuntu; then
         sudo /etc/init.d/rsync restart || :
+    elif [ -e /etc/xinetd.d/rsync ]; then
+        start_service xinetd
     else
-        sudo systemctl start xinetd.service
+        start_service rsyncd
     fi
 
     if is_apache_enabled_service swift; then
@@ -647,7 +676,7 @@
 }
 
 # stop_swift() - Stop running processes (non-screen)
-function stop_swift() {
+function stop_swift {
 
     if is_apache_enabled_service swift; then
         swift-init --run-dir=${SWIFT_DATA_DIR}/run rest stop && return 0
@@ -657,10 +686,8 @@
     if type -p swift-init >/dev/null; then
         swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
     fi
-    for type in proxy object container account; do
-        # Dump all of the servers
-        pkill -f swift-
-    done
+    # Dump all of the servers
+    pkill -f swift-
 }
 
 # Restore xtrace
diff --git a/lib/tempest b/lib/tempest
index 06183b1..16f8744 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -63,11 +63,14 @@
 TEMPEST_VOLUME_VENDOR=${TEMPEST_VOLUME_VENDOR:-"Open Source"}
 TEMPEST_STORAGE_PROTOCOL=${TEMPEST_STORAGE_PROTOCOL:-iSCSI}
 
+# Neutron/Network variables
+IPV6_ENABLED=$(trueorfalse True $IPV6_ENABLED)
+
 # Functions
 # ---------
 
 # configure_tempest() - Set config files, create data dirs, etc
-function configure_tempest() {
+function configure_tempest {
     setup_develop $TEMPEST_DIR
     local image_lines
     local images
@@ -87,11 +90,6 @@
     local boto_instance_type="m1.tiny"
     local ssh_connect_method="fixed"
 
-    if [[ ! -d $TEMPEST_CONFIG_DIR ]]; then
-        sudo mkdir -p $TEMPEST_CONFIG_DIR
-    fi
-    sudo chown $STACK_USER $TEMPEST_CONFIG_DIR
-
     # TODO(afazekas):
     # sudo python setup.py deploy
 
@@ -142,8 +140,12 @@
 
     # Create tempest.conf from tempest.conf.sample
     # copy every time, because the image UUIDS are going to change
-    sudo cp $TEMPEST_DIR/etc/tempest.conf.sample $TEMPEST_CONFIG
-    sudo chmod 644 $TEMPEST_CONFIG
+    if [[ ! -d $TEMPEST_CONFIG_DIR ]]; then
+        sudo mkdir -p $TEMPEST_CONFIG_DIR
+    fi
+    sudo chown $STACK_USER $TEMPEST_CONFIG_DIR
+    cp $TEMPEST_DIR/etc/tempest.conf.sample $TEMPEST_CONFIG
+    chmod 644 $TEMPEST_CONFIG
 
     password=${ADMIN_PASSWORD:-secrete}
 
@@ -251,6 +253,7 @@
 
     # Identity
     iniset $TEMPEST_CONFIG identity uri "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v2.0/"
+    iniset $TEMPEST_CONFIG identity uri_v3 "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v3/"
     iniset $TEMPEST_CONFIG identity password "$password"
     iniset $TEMPEST_CONFIG identity alt_username $ALT_USERNAME
     iniset $TEMPEST_CONFIG identity alt_password "$password"
@@ -266,11 +269,6 @@
 
     # Compute
     iniset $TEMPEST_CONFIG compute change_password_available False
-    # Note(nati) current tempest don't create network for each tenant
-    # so reuse same tenant for now
-    if is_service_enabled neutron; then
-        TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-False}
-    fi
     iniset $TEMPEST_CONFIG compute allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True}
     iniset $TEMPEST_CONFIG compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED
     iniset $TEMPEST_CONFIG compute network_for_ssh $PRIVATE_NETWORK_NAME
@@ -289,11 +287,13 @@
     # Compute admin
     iniset $TEMPEST_CONFIG "compute-admin" password "$password" # DEPRECATED
 
+    # Network
     iniset $TEMPEST_CONFIG network api_version 2.0
     iniset $TEMPEST_CONFIG network tenant_networks_reachable "$tenant_networks_reachable"
     iniset $TEMPEST_CONFIG network public_network_id "$public_network_id"
     iniset $TEMPEST_CONFIG network public_router_id "$public_router_id"
     iniset $TEMPEST_CONFIG network default_network "$FIXED_RANGE"
+    iniset $TEMPEST_CONFIG network ipv6_enabled "$IPV6_ENABLED"
 
     # boto
     iniset $TEMPEST_CONFIG boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud"
@@ -318,12 +318,12 @@
     iniset $TEMPEST_CONFIG scenario large_ops_number ${TEMPEST_LARGE_OPS_NUMBER:-0}
 
     # Volume
-    if is_service_enabled c-bak; then
-        iniset $TEMPEST_CONFIG volume volume_backup_enabled "True"
+    if ! is_service_enabled c-bak; then
+        iniset $TEMPEST_CONFIG volume-feature-enabled backup False
     fi
     CINDER_MULTI_LVM_BACKEND=$(trueorfalse False $CINDER_MULTI_LVM_BACKEND)
     if [ $CINDER_MULTI_LVM_BACKEND == "True" ]; then
-        iniset $TEMPEST_CONFIG volume multi_backend_enabled "True"
+        iniset $TEMPEST_CONFIG volume-feature-enabled multi_backend "True"
         iniset $TEMPEST_CONFIG volume backend1_name "LVM_iSCSI"
         iniset $TEMPEST_CONFIG volume backend2_name "LVM_iSCSI_2"
     fi
@@ -352,9 +352,6 @@
         fi
     done
 
-    echo "Created tempest configuration file:"
-    cat $TEMPEST_CONFIG
-
     # Restore IFS
     IFS=$ifs
     #Restore errexit
@@ -362,12 +359,12 @@
 }
 
 # install_tempest() - Collect source and prepare
-function install_tempest() {
+function install_tempest {
     git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH
 }
 
 # init_tempest() - Initialize ec2 images
-function init_tempest() {
+function init_tempest {
     local base_image_name=cirros-0.3.1-x86_64
     # /opt/stack/devstack/files/images/cirros-0.3.1-x86_64-uec
     local image_dir="$FILES/images/${base_image_name}-uec"
diff --git a/lib/template b/lib/template
index 629e110..efe5826 100644
--- a/lib/template
+++ b/lib/template
@@ -10,6 +10,7 @@
 
 # ``stack.sh`` calls the entry points in this order:
 #
+# - is_XXXX_enabled
 # - install_XXXX
 # - configure_XXXX
 # - init_XXXX
@@ -35,9 +36,16 @@
 # Entry Points
 # ------------
 
+# Test if any XXXX services are enabled
+# is_XXXX_enabled
+function is_XXXX_enabled {
+    [[ ,${ENABLED_SERVICES} =~ ,"XX-" ]] && return 0
+    return 1
+}
+
 # cleanup_XXXX() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
-function cleanup_XXXX() {
+function cleanup_XXXX {
     # kill instances (nova)
     # delete image files (glance)
     # This function intentionally left blank
@@ -45,7 +53,7 @@
 }
 
 # configure_XXXX() - Set config files, create data dirs, etc
-function configure_XXXX() {
+function configure_XXXX {
     # sudo python setup.py deploy
     # iniset $XXXX_CONF ...
     # This function intentionally left blank
@@ -53,26 +61,26 @@
 }
 
 # init_XXXX() - Initialize databases, etc.
-function init_XXXX() {
+function init_XXXX {
     # clean up from previous (possibly aborted) runs
     # create required data files
     :
 }
 
 # install_XXXX() - Collect source and prepare
-function install_XXXX() {
+function install_XXXX {
     # git clone xxx
     :
 }
 
 # start_XXXX() - Start running processes, including screen
-function start_XXXX() {
+function start_XXXX {
     # screen_it XXXX "cd $XXXX_DIR && $XXXX_DIR/bin/XXXX-bin"
     :
 }
 
 # stop_XXXX() - Stop running processes (non-screen)
-function stop_XXXX() {
+function stop_XXXX {
     # FIXME(dtroyer): stop only our screen screen window?
     :
 }
diff --git a/lib/tls b/lib/tls
index 6134fa1..072059d 100644
--- a/lib/tls
+++ b/lib/tls
@@ -61,7 +61,7 @@
 OPENSSL=${OPENSSL:-/usr/bin/openssl}
 
 # Do primary CA configuration
-function configure_CA() {
+function configure_CA {
     # build common config file
 
     # Verify ``TLS_IP`` is good
@@ -73,7 +73,7 @@
 
 # Creates a new CA directory structure
 # create_CA_base ca-dir
-function create_CA_base() {
+function create_CA_base {
     local ca_dir=$1
 
     if [[ -d $ca_dir ]]; then
@@ -92,7 +92,7 @@
 
 # Create a new CA configuration file
 # create_CA_config ca-dir common-name
-function create_CA_config() {
+function create_CA_config {
     local ca_dir=$1
     local common_name=$2
 
@@ -145,7 +145,7 @@
 
 # Create a new signing configuration file
 # create_signing_config ca-dir
-function create_signing_config() {
+function create_signing_config {
     local ca_dir=$1
 
     echo "
@@ -225,7 +225,7 @@
 
 # make_cert creates and signs a new certificate with the given commonName and CA
 # make_cert ca-dir cert-name "common-name" ["alt-name" ...]
-function make_cert() {
+function make_cert {
     local ca_dir=$1
     local cert_name=$2
     local common_name=$3
@@ -261,7 +261,7 @@
 
 # Make an intermediate CA to sign everything else
 # make_int_CA ca-dir signing-ca-dir
-function make_int_CA() {
+function make_int_CA {
     local ca_dir=$1
     local signing_ca_dir=$2
 
@@ -291,7 +291,7 @@
 
 # Make a root CA to sign other CAs
 # make_root_CA ca-dir
-function make_root_CA() {
+function make_root_CA {
     local ca_dir=$1
 
     # Create the root CA
@@ -319,7 +319,7 @@
 # is a short-circuit boolean, i.e it returns on the first match.
 #
 # Uses global ``SSL_ENABLED_SERVICES``
-function is_ssl_enabled_service() {
+function is_ssl_enabled_service {
     services=$@
     for service in ${services}; do
         [[ ,${SSL_ENABLED_SERVICES}, =~ ,${service}, ]] && return 0
@@ -337,7 +337,7 @@
 # example for keystone this would be KEYSTONE_SSL_CERT, KEYSTONE_SSL_KEY and
 # KEYSTONE_SSL_CA. If it does not find these certificates the program will
 # quit.
-function ensure_certificates() {
+function ensure_certificates {
     local service=$1
 
     local cert_var="${service}_SSL_CERT"
@@ -362,7 +362,7 @@
 
 # Starts the TLS proxy for the given IP/ports
 # start_tls_proxy front-host front-port back-host back-port
-function start_tls_proxy() {
+function start_tls_proxy {
     local f_host=$1
     local f_port=$2
     local b_host=$3
diff --git a/lib/trove b/lib/trove
index 2000446..75b990f 100644
--- a/lib/trove
+++ b/lib/trove
@@ -53,7 +53,7 @@
 }
 
 # setup_trove_logging() - Adds logging configuration to conf files
-function setup_trove_logging() {
+function setup_trove_logging {
     local CONF=$1
     iniset $CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
     iniset $CONF DEFAULT use_syslog $SYSLOG
@@ -69,30 +69,31 @@
 # ------------------------------------------------------------------
 # service              trove     admin        # if enabled
 
-create_trove_accounts() {
+function create_trove_accounts {
     # Trove
-    SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    SERVICE_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
+    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    SERVICE_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
 
     if [[ "$ENABLED_SERVICES" =~ "trove" ]]; then
-        TROVE_USER=$(keystone user-create \
-            --name=trove \
-            --pass="$SERVICE_PASSWORD" \
-            --tenant-id $SERVICE_TENANT \
-            --email=trove@example.com \
+        TROVE_USER=$(openstack user create \
+            trove \
+            --password "$SERVICE_PASSWORD" \
+            --project $SERVICE_TENANT \
+            --email trove@example.com \
             | grep " id " | get_field 2)
-        keystone user-role-add --tenant-id $SERVICE_TENANT \
-            --user-id $TROVE_USER \
-            --role-id $SERVICE_ROLE
+        openstack role add \
+            $SERVICE_ROLE \
+            --project $SERVICE_TENANT \
+            --user $TROVE_USER
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-            TROVE_SERVICE=$(keystone service-create \
-                --name=trove \
+            TROVE_SERVICE=$(openstack service create \
+                trove \
                 --type=database \
                 --description="Trove Service" \
                 | grep " id " | get_field 2)
-            keystone endpoint-create \
+            openstack endpoint create \
+                $TROVE_SERVICE \
                 --region RegionOne \
-                --service_id $TROVE_SERVICE \
                 --publicurl "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \
                 --adminurl "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \
                 --internalurl "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s"
@@ -105,19 +106,19 @@
 
 # cleanup_trove() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
-function cleanup_trove() {
+function cleanup_trove {
     #Clean up dirs
     rm -fr $TROVE_AUTH_CACHE_DIR/*
     rm -fr $TROVE_CONF_DIR/*
 }
 
 # configure_troveclient() - Set config files, create data dirs, etc
-function configure_troveclient() {
+function configure_troveclient {
     setup_develop $TROVECLIENT_DIR
 }
 
 # configure_trove() - Set config files, create data dirs, etc
-function configure_trove() {
+function configure_trove {
     setup_develop $TROVE_DIR
 
     # Create the trove conf dir and cache dirs if they don't exist
@@ -148,8 +149,6 @@
     iniset $TROVE_CONF_DIR/trove.conf DEFAULT add_addresses True
 
     iniset $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample DEFAULT rabbit_password $RABBIT_PASSWORD
-    iniset $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample DEFAULT sql_connection `database_connection_url trove`
-    iniset $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample DEFAULT control_exchange trove
     sed -i "s/localhost/$NETWORK_GATEWAY/g" $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample
 
     setup_trove_logging $TROVE_CONF_DIR/trove.conf
@@ -183,17 +182,17 @@
 }
 
 # install_troveclient() - Collect source and prepare
-function install_troveclient() {
+function install_troveclient {
     git_clone $TROVECLIENT_REPO $TROVECLIENT_DIR $TROVECLIENT_BRANCH
 }
 
 # install_trove() - Collect source and prepare
-function install_trove() {
+function install_trove {
     git_clone $TROVE_REPO $TROVE_DIR $TROVE_BRANCH
 }
 
 # init_trove() - Initializes Trove Database as a Service
-function init_trove() {
+function init_trove {
     #(Re)Create trove db
     recreate_database trove utf8
 
@@ -202,14 +201,14 @@
 }
 
 # start_trove() - Start running processes, including screen
-function start_trove() {
+function start_trove {
     screen_it tr-api "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-api --config-file=$TROVE_CONF_DIR/trove.conf --debug 2>&1"
     screen_it tr-tmgr "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-taskmanager --config-file=$TROVE_CONF_DIR/trove-taskmanager.conf --debug 2>&1"
     screen_it tr-cond "cd $TROVE_DIR; $TROVE_BIN_DIR/trove-conductor --config-file=$TROVE_CONF_DIR/trove-conductor.conf --debug 2>&1"
 }
 
 # stop_trove() - Stop running processes
-function stop_trove() {
+function stop_trove {
     # Kill the trove screen windows
     for serv in tr-api tr-tmgr tr-cond; do
         screen_stop $serv
diff --git a/openrc b/openrc
index 784b00e..fc066ad 100644
--- a/openrc
+++ b/openrc
@@ -67,7 +67,7 @@
 # Identity API version
 export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-2.0}
 
-# Authenticating against an Openstack cloud using Keystone returns a **Token**
+# Authenticating against an OpenStack cloud using Keystone returns a **Token**
 # and **Service Catalog**.  The catalog contains the endpoints for all services
 # the user/tenant has access to - including nova, glance, keystone, swift, ...
 # We currently recommend using the 2.0 *identity api*.
diff --git a/run_tests.sh b/run_tests.sh
index 9d9d186..a0bfbee 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -20,10 +20,10 @@
 else
     LIBS=`find lib -type f | grep -v \.md`
     SCRIPTS=`find . -type f -name \*\.sh`
-    EXTRA="functions"
+    EXTRA="functions functions-common stackrc openrc exerciserc eucarc"
     FILES="$SCRIPTS $LIBS $EXTRA"
 fi
 
 echo "Running bash8..."
 
-./tools/bash8.py $FILES
+./tools/bash8.py -v $FILES
diff --git a/stack.sh b/stack.sh
index 45d47c8..ccd567e 100755
--- a/stack.sh
+++ b/stack.sh
@@ -5,11 +5,12 @@
 # **Glance**, **Heat**, **Horizon**, **Keystone**, **Nova**, **Neutron**,
 # and **Swift**
 
-# This script allows you to specify configuration options of what git
-# repositories to use, enabled services, network configuration and various
-# passwords.  If you are crafty you can run the script on multiple nodes using
-# shared settings for common resources (mysql, rabbitmq) and build a multi-node
-# developer install.
+# This script's options can be changed by setting appropriate environment
+# variables.  You can configure things like which git repositories to use,
+# services to enable, OS images to use, etc.  Default values are located in the
+# ``stackrc`` file. If you are crafty you can run the script on multiple nodes
+# using shared settings for common resources (eg., mysql or rabbitmq) and build
+# a multi-node developer install.
 
 # To keep this script simple we assume you are running on a recent **Ubuntu**
 # (12.04 Precise or newer) or **Fedora** (F18 or newer) machine.  (It may work
@@ -30,6 +31,9 @@
 LC_ALL=C
 export LC_ALL
 
+# Make sure umask is sane
+umask 022
+
 # Keep track of the devstack directory
 TOP_DIR=$(cd $(dirname "$0") && pwd)
 
@@ -161,42 +165,6 @@
 # Set up logging level
 VERBOSE=$(trueorfalse True $VERBOSE)
 
-
-# Additional repos
-# ================
-
-# Some distros need to add repos beyond the defaults provided by the vendor
-# to pick up required packages.
-
-# The Debian Wheezy official repositories do not contain all required packages,
-# add gplhost repository.
-if [[ "$os_VENDOR" =~ (Debian) ]]; then
-    echo 'deb http://archive.gplhost.com/debian grizzly main' | sudo tee /etc/apt/sources.list.d/gplhost_wheezy-backports.list
-    echo 'deb http://archive.gplhost.com/debian grizzly-backports main' | sudo tee -a /etc/apt/sources.list.d/gplhost_wheezy-backports.list
-    apt_get update
-    apt_get install --force-yes gplhost-archive-keyring
-fi
-
-if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then
-    # Installing Open vSwitch on RHEL6 requires enabling the RDO repo.
-    RHEL6_RDO_REPO_RPM=${RHEL6_RDO_REPO_RPM:-"http://rdo.fedorapeople.org/openstack-havana/rdo-release-havana.rpm"}
-    RHEL6_RDO_REPO_ID=${RHEL6_RDO_REPO_ID:-"openstack-havana"}
-    if ! yum repolist enabled $RHEL6_RDO_REPO_ID | grep -q $RHEL6_RDO_REPO_ID; then
-        echo "RDO repo not detected; installing"
-        yum_install $RHEL6_RDO_REPO_RPM || \
-            die $LINENO "Error installing RDO repo, cannot continue"
-    fi
-
-    # RHEL6 requires EPEL for many Open Stack dependencies
-    RHEL6_EPEL_RPM=${RHEL6_EPEL_RPM:-"http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm"}
-    if ! yum repolist enabled epel | grep -q 'epel'; then
-        echo "EPEL not detected; installing"
-        yum_install ${RHEL6_EPEL_RPM} || \
-            die $LINENO "Error installing EPEL repo, cannot continue"
-    fi
-fi
-
-
 # root Access
 # -----------
 
@@ -231,6 +199,47 @@
 sudo chown root:root $TEMPFILE
 sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh
 
+# Additional repos
+# ----------------
+
+# Some distros need to add repos beyond the defaults provided by the vendor
+# to pick up required packages.
+
+# The Debian Wheezy official repositories do not contain all required packages,
+# add gplhost repository.
+if [[ "$os_VENDOR" =~ (Debian) ]]; then
+    echo 'deb http://archive.gplhost.com/debian grizzly main' | sudo tee /etc/apt/sources.list.d/gplhost_wheezy-backports.list
+    echo 'deb http://archive.gplhost.com/debian grizzly-backports main' | sudo tee -a /etc/apt/sources.list.d/gplhost_wheezy-backports.list
+    apt_get update
+    apt_get install --force-yes gplhost-archive-keyring
+fi
+
+if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then
+    # Installing Open vSwitch on RHEL6 requires enabling the RDO repo.
+    RHEL6_RDO_REPO_RPM=${RHEL6_RDO_REPO_RPM:-"http://rdo.fedorapeople.org/openstack-havana/rdo-release-havana.rpm"}
+    RHEL6_RDO_REPO_ID=${RHEL6_RDO_REPO_ID:-"openstack-havana"}
+    if ! sudo yum repolist enabled $RHEL6_RDO_REPO_ID | grep -q $RHEL6_RDO_REPO_ID; then
+        echo "RDO repo not detected; installing"
+        yum_install $RHEL6_RDO_REPO_RPM || \
+            die $LINENO "Error installing RDO repo, cannot continue"
+    fi
+
+    # RHEL6 requires EPEL for many Open Stack dependencies
+    RHEL6_EPEL_RPM=${RHEL6_EPEL_RPM:-"http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm"}
+    if ! sudo yum repolist enabled epel | grep -q 'epel'; then
+        echo "EPEL not detected; installing"
+        yum_install ${RHEL6_EPEL_RPM} || \
+            die $LINENO "Error installing EPEL repo, cannot continue"
+    fi
+
+    # ... and also optional to be enabled
+    is_package_installed yum-utils || install_package yum-utils
+    sudo yum-config-manager --enable rhel-6-server-optional-rpms
+
+fi
+
+# Filesystem setup
+# ----------------
 
 # Create the destination directory and ensure it is writable by the user
 # and read/executable by everybody for daemons (e.g. apache run for horizon)
@@ -248,6 +257,15 @@
     sudo sed -i "s/\(^127.0.0.1.*\)/\1 $LOCAL_HOSTNAME/" /etc/hosts
 fi
 
+# Destination path for service data
+DATA_DIR=${DATA_DIR:-${DEST}/data}
+sudo mkdir -p $DATA_DIR
+safe_chown -R $STACK_USER $DATA_DIR
+
+
+# Common Configuration
+# --------------------
+
 # Set ``OFFLINE`` to ``True`` to configure ``stack.sh`` to run cleanly without
 # Internet access. ``stack.sh`` must have been previously run with Internet
 # access to install prerequisites and fetch repositories.
@@ -261,15 +279,6 @@
 # Whether to enable the debug log level in OpenStack services
 ENABLE_DEBUG_LOG_LEVEL=`trueorfalse True $ENABLE_DEBUG_LOG_LEVEL`
 
-# Destination path for service data
-DATA_DIR=${DATA_DIR:-${DEST}/data}
-sudo mkdir -p $DATA_DIR
-safe_chown -R $STACK_USER $DATA_DIR
-
-
-# Common Configuration
-# ====================
-
 # Set fixed and floating range here so we can make sure not to use addresses
 # from either range when attempting to guess the IP to use for the host.
 # Note that setting FIXED_RANGE may be necessary when running DevStack
@@ -294,12 +303,8 @@
 SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP}
 SYSLOG_PORT=${SYSLOG_PORT:-516}
 
-# Enable sysstat logging
-SYSSTAT_FILE=${SYSSTAT_FILE:-"sysstat.dat"}
-SYSSTAT_INTERVAL=${SYSSTAT_INTERVAL:-"1"}
-
-PIDSTAT_FILE=${PIDSTAT_FILE:-"pidstat.txt"}
-PIDSTAT_INTERVAL=${PIDSTAT_INTERVAL:-"5"}
+# for DSTAT logging
+DSTAT_FILE=${DSTAT_FILE:-"dstat.txt"}
 
 # Use color for logging output (only available if syslog is not used)
 LOG_COLOR=`trueorfalse True $LOG_COLOR`
@@ -336,7 +341,6 @@
 source $TOP_DIR/lib/neutron
 source $TOP_DIR/lib/baremetal
 source $TOP_DIR/lib/ldap
-source $TOP_DIR/lib/ironic
 
 # Extras Source
 # --------------
@@ -363,7 +367,11 @@
     var=$1; msg=$2
     pw=${!var}
 
-    localrc=$TOP_DIR/localrc
+    if [[ -f $RC_DIR/localrc ]]; then
+        localrc=$TOP_DIR/localrc
+    else
+        localrc=$TOP_DIR/.localrc.auto
+    fi
 
     # If the password is not defined yet, proceed to prompt user for a password.
     if [ ! $pw ]; then
@@ -461,7 +469,7 @@
 # -----------------
 
 # Draw a spinner so the user knows something is happening
-function spinner() {
+function spinner {
     local delay=0.75
     local spinstr='/-\|'
     printf "..." >&3
@@ -476,7 +484,7 @@
 
 # Echo text to the log file, summary log file and stdout
 # echo_summary "something to say"
-function echo_summary() {
+function echo_summary {
     if [[ -t 3 && "$VERBOSE" != "True" ]]; then
         kill >/dev/null 2>&1 $LAST_SPINNER_PID
         if [ ! -z "$LAST_SPINNER_PID" ]; then
@@ -492,7 +500,7 @@
 
 # Echo text only to stdout, no log files
 # echo_nolog "something not for the logs"
-function echo_nolog() {
+function echo_nolog {
     echo $@ >&3
 }
 
@@ -523,15 +531,17 @@
     exec 3>&1
     if [[ "$VERBOSE" == "True" ]]; then
         # Redirect stdout/stderr to tee to write the log file
-        exec 1> >( awk '
+        exec 1> >( awk -v logfile=${LOGFILE} '
+                /((set \+o$)|xtrace)/ { next }
                 {
-                    cmd ="date +\"%Y-%m-%d %H:%M:%S \""
+                    cmd ="date +\"%Y-%m-%d %H:%M:%S.%3N | \""
                     cmd | getline now
-                    close("date +\"%Y-%m-%d %H:%M:%S \"")
+                    close("date +\"%Y-%m-%d %H:%M:%S.%3N | \"")
                     sub(/^/, now)
                     print
-                    fflush()
-                }' | tee "${LOGFILE}" ) 2>&1
+                    print > logfile
+                    fflush("")
+                }' ) 2>&1
         # Set up a second fd for output
         exec 6> >( tee "${SUMFILE}" )
     else
@@ -579,24 +589,33 @@
 # -----------------------
 
 # Kill background processes on exit
-trap clean EXIT
-clean() {
+trap exit_trap EXIT
+function exit_trap {
     local r=$?
-    kill >/dev/null 2>&1 $(jobs -p)
+    jobs=$(jobs -p)
+    if [[ -n $jobs ]]; then
+        echo "exit_trap: cleaning up child processes"
+        kill 2>&1 $jobs
+    fi
     exit $r
 }
 
-
 # Exit on any errors so that errors don't compound
-trap failed ERR
-failed() {
+trap err_trap ERR
+function err_trap {
     local r=$?
-    kill >/dev/null 2>&1 $(jobs -p)
     set +o xtrace
-    [ -n "$LOGFILE" ] && echo "${0##*/} failed: full log in $LOGFILE"
+    if [[ -n "$LOGFILE" ]]; then
+        echo "${0##*/} failed: full log in $LOGFILE"
+    else
+        echo "${0##*/} failed"
+    fi
     exit $r
 }
 
+
+set -o errexit
+
 # Print the commands being run so that we can see the command that triggers
 # an error.  It is also useful for following along as the install occurs.
 set -o xtrace
@@ -746,11 +765,6 @@
     # don't be naive and add to existing line!
 fi
 
-if is_service_enabled ir-api ir-cond; then
-    install_ironic
-    install_ironicclient
-    configure_ironic
-fi
 
 # Extras Install
 # --------------
@@ -862,36 +876,17 @@
 # Initialize the directory for service status check
 init_service_check
 
-
-# Sysstat
+# Dstat
 # -------
 
-# If enabled, systat has to start early to track OpenStack service startup.
-if is_service_enabled sysstat; then
-    # what we want to measure
-    # -u : cpu statitics
-    # -q : load
-    # -b : io load rates
-    # -w : process creation and context switch rates
-    SYSSTAT_OPTS="-u -q -b -w"
-    if [[ -n ${SCREEN_LOGDIR} ]]; then
-        screen_it sysstat "cd $TOP_DIR; ./tools/sar_filter.py $SYSSTAT_OPTS -o $SCREEN_LOGDIR/$SYSSTAT_FILE $SYSSTAT_INTERVAL"
-    else
-        screen_it sysstat "./tools/sar_filter.py $SYSSTAT_OPTS $SYSSTAT_INTERVAL"
-    fi
+# A better kind of sysstat, with the top process per time slice
+DSTAT_OPTS="-tcndylp --top-cpu-adv"
+if [[ -n ${SCREEN_LOGDIR} ]]; then
+    screen_it dstat "cd $TOP_DIR; dstat $DSTAT_OPTS | tee $SCREEN_LOGDIR/$DSTAT_FILE"
+else
+    screen_it dstat "dstat $DSTAT_OPTS"
 fi
 
-if is_service_enabled pidstat; then
-    # Per-process stats
-    PIDSTAT_OPTS="-l -p ALL -T ALL"
-    if [[ -n ${SCREEN_LOGDIR} ]]; then
-        screen_it pidstat "cd $TOP_DIR; pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL > $SCREEN_LOGDIR/$PIDSTAT_FILE"
-    else
-        screen_it pidstat "pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL"
-    fi
-fi
-
-
 # Start Services
 # ==============
 
@@ -915,6 +910,9 @@
     # Do the keystone-specific bits from keystone_data.sh
     export OS_SERVICE_TOKEN=$SERVICE_TOKEN
     export OS_SERVICE_ENDPOINT=$SERVICE_ENDPOINT
+    # Add temporarily to make openstackclient work
+    export OS_TOKEN=$SERVICE_TOKEN
+    export OS_URL=$SERVICE_ENDPOINT
     create_keystone_accounts
     create_nova_accounts
     create_cinder_accounts
@@ -928,6 +926,10 @@
         create_swift_accounts
     fi
 
+    if is_service_enabled heat; then
+        create_heat_accounts
+    fi
+
     # ``keystone_data.sh`` creates services, admin and demo users, and roles.
     ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \
     SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \
@@ -937,6 +939,7 @@
         bash -x $FILES/keystone_data.sh
 
     # Set up auth creds now that keystone is bootstrapped
+    unset OS_TOKEN OS_URL
     export OS_AUTH_URL=$SERVICE_ENDPOINT
     export OS_TENANT_NAME=admin
     export OS_USERNAME=admin
@@ -966,15 +969,6 @@
 fi
 
 
-# Ironic
-# ------
-
-if is_service_enabled ir-api ir-cond; then
-    echo_summary "Configuring Ironic"
-    init_ironic
-fi
-
-
 # Neutron
 # -------
 
@@ -1096,15 +1090,50 @@
 fi
 
 # Launch the Glance services
-if is_service_enabled g-api g-reg; then
+if is_service_enabled glance; then
     echo_summary "Starting Glance"
     start_glance
 fi
 
-# Launch the Ironic services
-if is_service_enabled ir-api ir-cond; then
-    echo_summary "Starting Ironic"
-    start_ironic
+# Install Images
+# ==============
+
+# Upload an image to glance.
+#
+# The default image is cirros, a small testing image which lets you login as **root**
+# cirros has a ``cloud-init`` analog supporting login via keypair and sending
+# scripts as userdata.
+# See https://help.ubuntu.com/community/CloudInit for more on cloud-init
+#
+# Override ``IMAGE_URLS`` with a comma-separated list of UEC images.
+#  * **precise**: http://uec-images.ubuntu.com/precise/current/precise-server-cloudimg-amd64.tar.gz
+
+if is_service_enabled g-reg; then
+    TOKEN=$(keystone token-get | grep ' id ' | get_field 2)
+    die_if_not_set $LINENO TOKEN "Keystone fail to get token"
+
+    if is_baremetal; then
+        echo_summary "Creating and uploading baremetal images"
+
+        # build and upload separate deploy kernel & ramdisk
+        upload_baremetal_deploy $TOKEN
+
+        # upload images, separating out the kernel & ramdisk for PXE boot
+        for image_url in ${IMAGE_URLS//,/ }; do
+            upload_baremetal_image $image_url $TOKEN
+        done
+    else
+        echo_summary "Uploading images"
+
+        # Option to upload legacy ami-tty, which works with xenserver
+        if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then
+            IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz"
+        fi
+
+        for image_url in ${IMAGE_URLS//,/ }; do
+            upload_image $image_url $TOKEN
+        done
+    fi
 fi
 
 # Create an access key and secret key for nova ec2 register image
@@ -1124,8 +1153,8 @@
 # Create a randomized default value for the keymgr's fixed_key
 if is_service_enabled nova; then
     FIXED_KEY=""
-    for i in $(seq 1 64);
-        do FIXED_KEY+=$(echo "obase=16; $(($RANDOM % 16))" | bc);
+    for i in $(seq 1 64); do
+        FIXED_KEY+=$(echo "obase=16; $(($RANDOM % 16))" | bc);
     done;
     iniset $NOVA_CONF keymgr fixed_key "$FIXED_KEY"
 fi
@@ -1186,7 +1215,7 @@
 
 # Configure and launch heat engine, api and metadata
 if is_service_enabled heat; then
-    # Initialize heat, including replacing nova flavors
+    # Initialize heat
     echo_summary "Configuring Heat"
     init_heat
     echo_summary "Starting Heat"
@@ -1212,47 +1241,6 @@
 fi
 
 
-# Install Images
-# ==============
-
-# Upload an image to glance.
-#
-# The default image is cirros, a small testing image which lets you login as **root**
-# cirros has a ``cloud-init`` analog supporting login via keypair and sending
-# scripts as userdata.
-# See https://help.ubuntu.com/community/CloudInit for more on cloud-init
-#
-# Override ``IMAGE_URLS`` with a comma-separated list of UEC images.
-#  * **precise**: http://uec-images.ubuntu.com/precise/current/precise-server-cloudimg-amd64.tar.gz
-
-if is_service_enabled g-reg; then
-    TOKEN=$(keystone token-get | grep ' id ' | get_field 2)
-    die_if_not_set $LINENO TOKEN "Keystone fail to get token"
-
-    if is_baremetal; then
-        echo_summary "Creating and uploading baremetal images"
-
-        # build and upload separate deploy kernel & ramdisk
-        upload_baremetal_deploy $TOKEN
-
-        # upload images, separating out the kernel & ramdisk for PXE boot
-        for image_url in ${IMAGE_URLS//,/ }; do
-            upload_baremetal_image $image_url $TOKEN
-        done
-    else
-        echo_summary "Uploading images"
-
-        # Option to upload legacy ami-tty, which works with xenserver
-        if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then
-            IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz"
-        fi
-
-        for image_url in ${IMAGE_URLS//,/ }; do
-            upload_image $image_url $TOKEN
-        done
-    fi
-fi
-
 # If we are running nova with baremetal driver, there are a few
 # last-mile configuration bits to attend to, which must happen
 # after n-api and n-sch have started.
@@ -1355,11 +1343,6 @@
     echo "Horizon is now available at http://$SERVICE_HOST/"
 fi
 
-# Warn that the default flavors have been changed by Heat
-if is_service_enabled heat; then
-    echo "Heat has replaced the default flavors. View by running: nova flavor-list"
-fi
-
 # If Keystone is present you can point ``nova`` cli to this server
 if is_service_enabled key; then
     echo "Keystone is serving at $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/"
diff --git a/stackrc b/stackrc
index e89d25e..f235ccc 100644
--- a/stackrc
+++ b/stackrc
@@ -35,7 +35,18 @@
 #  enable_service neutron
 #  # Optional, to enable tempest configuration as part of devstack
 #  enable_service tempest
-ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,tempest,mysql
+
+# core compute (glance / keystone / nova (+ nova-network))
+ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,n-sch,n-novnc,n-xvnc,n-cauth
+# cinder
+ENABLED_SERVICES+=,c-sch,c-api,c-vol
+# heat
+ENABLED_SERVICES+=,h-eng,h-api,h-api-cfn,h-api-cw
+# dashboard
+ENABLED_SERVICES+=,horizon
+# additional services
+ENABLED_SERVICES+=,rabbit,tempest,mysql
+
 
 # Tell Tempest which services are available.  The default is set here as
 # Tempest falls late in the configuration sequence.  This differs from
@@ -69,6 +80,17 @@
 # (currently only implemented for MySQL backend)
 DATABASE_QUERY_LOGGING=$(trueorfalse True $DATABASE_QUERY_LOGGING)
 
+# Set a timeout for git operations.  If git is still running when the
+# timeout expires, the command will be retried up to 3 times.  This is
+# in the format for timeout(1);
+#
+#  DURATION is a floating point number with an optional suffix: 's'
+#  for seconds (the default), 'm' for minutes, 'h' for hours or 'd'
+#  for days.
+#
+# Zero disables timeouts
+GIT_TIMEOUT=${GIT_TIMEOUT:-0}
+
 # Repositories
 # ------------
 
@@ -140,6 +162,10 @@
 OPENSTACKCLIENT_REPO=${OPENSTACKCLIENT_REPO:-${GIT_BASE}/openstack/python-openstackclient.git}
 OPENSTACKCLIENT_BRANCH=${OPENSTACKCLIENT_BRANCH:-master}
 
+# cliff command line framework
+CLIFF_REPO=${CLIFF_REPO:-${GIT_BASE}/openstack/cliff.git}
+CLIFF_BRANCH=${CLIFF_BRANCH:-master}
+
 # oslo.config
 OSLOCFG_REPO=${OSLOCFG_REPO:-${GIT_BASE}/openstack/oslo.config.git}
 OSLOCFG_BRANCH=${OSLOCFG_BRANCH:-master}
@@ -152,6 +178,22 @@
 OSLORWRAP_REPO=${OSLORWRAP_REPO:-${GIT_BASE}/openstack/oslo.rootwrap.git}
 OSLORWRAP_BRANCH=${OSLORWRAP_BRANCH:-master}
 
+# oslo.vmware
+OSLOVMWARE_REPO=${OSLOVMWARE_REPO:-${GIT_BASE}/openstack/oslo.vmware.git}
+OSLOVMWARE_BRANCH=${OSLOVMWARE_BRANCH:-master}
+
+# pycadf auditing library
+PYCADF_REPO=${PYCADF_REPO:-${GIT_BASE}/openstack/pycadf.git}
+PYCADF_BRANCH=${PYCADF_BRANCH:-master}
+
+# stevedore plugin manager
+STEVEDORE_REPO=${STEVEDORE_REPO:-${GIT_BASE}/openstack/stevedore.git}
+STEVEDORE_BRANCH=${STEVEDORE_BRANCH:-master}
+
+# taskflow plugin manager
+TASKFLOW_REPO=${TASKFLOW_REPO:-${GIT_BASE}/openstack/taskflow.git}
+TASKFLOW_BRANCH=${TASKFLOW_BRANCH:-master}
+
 # pbr drives the setuptools configs
 PBR_REPO=${PBR_REPO:-${GIT_BASE}/openstack-dev/pbr.git}
 PBR_BRANCH=${PBR_BRANCH:-master}
@@ -171,7 +213,7 @@
 # storage service
 SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git}
 SWIFT_BRANCH=${SWIFT_BRANCH:-master}
-SWIFT3_REPO=${SWIFT3_REPO:-${GIT_BASE}/fujita/swift3.git}
+SWIFT3_REPO=${SWIFT3_REPO:-http://github.com/fujita/swift3.git}
 SWIFT3_BRANCH=${SWIFT3_BRANCH:-master}
 
 # python swift client library
@@ -245,6 +287,10 @@
     xenserver)
         # Xen config common to nova and neutron
         XENAPI_USER=${XENAPI_USER:-"root"}
+        # This user will be used for dom0 - domU communication
+        #   should be able to log in to dom0 without a password
+        #   will be used to install the plugins
+        DOMZERO_USER=${DOMZERO_USER:-"domzero"}
         ;;
     *)
         ;;
@@ -280,6 +326,9 @@
     openvz)
         DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-12.04-x86_64}
         IMAGE_URLS=${IMAGE_URLS:-"http://download.openvz.org/template/precreated/ubuntu-12.04-x86_64.tar.gz"};;
+    docker)
+        DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros}
+        IMAGE_URLS=${IMAGE_URLS:-};;
     libvirt)
         case "$LIBVIRT_TYPE" in
             lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc
diff --git a/tests/functions.sh b/tests/functions.sh
index 95dafe1..874d022 100755
--- a/tests/functions.sh
+++ b/tests/functions.sh
@@ -42,15 +42,14 @@
 
 echo "Testing enable_service()"
 
-function test_enable_service() {
+function test_enable_service {
     local start="$1"
     local add="$2"
     local finish="$3"
 
     ENABLED_SERVICES="$start"
     enable_service $add
-    if [ "$ENABLED_SERVICES" = "$finish" ]
-    then
+    if [ "$ENABLED_SERVICES" = "$finish" ]; then
         echo "OK: $start + $add -> $ENABLED_SERVICES"
     else
         echo "changing $start to $finish with $add failed: $ENABLED_SERVICES"
@@ -69,15 +68,14 @@
 test_enable_service 'a,b,-c' c 'a,b'
 test_enable_service 'a,b,c' -c 'a,b'
 
-function test_disable_service() {
+function test_disable_service {
     local start="$1"
     local del="$2"
     local finish="$3"
 
     ENABLED_SERVICES="$start"
     disable_service "$del"
-    if [ "$ENABLED_SERVICES" = "$finish" ]
-    then
+    if [ "$ENABLED_SERVICES" = "$finish" ]; then
         echo "OK: $start - $del -> $ENABLED_SERVICES"
     else
         echo "changing $start to $finish with $del failed: $ENABLED_SERVICES"
@@ -102,8 +100,7 @@
 ENABLED_SERVICES=a,b,c
 disable_all_services
 
-if [[ -z "$ENABLED_SERVICES" ]]
-then
+if [[ -z "$ENABLED_SERVICES" ]]; then
     echo "OK"
 else
     echo "disabling all services FAILED: $ENABLED_SERVICES"
@@ -112,14 +109,13 @@
 echo "Testing disable_negated_services()"
 
 
-function test_disable_negated_services() {
+function test_disable_negated_services {
     local start="$1"
     local finish="$2"
 
     ENABLED_SERVICES="$start"
     disable_negated_services
-    if [ "$ENABLED_SERVICES" = "$finish" ]
-    then
+    if [ "$ENABLED_SERVICES" = "$finish" ]; then
         echo "OK: $start + $add -> $ENABLED_SERVICES"
     else
         echo "changing $start to $finish failed: $ENABLED_SERVICES"
diff --git a/tests/test_config.sh b/tests/test_config.sh
index 39603c9..5700f8d 100755
--- a/tests/test_config.sh
+++ b/tests/test_config.sh
@@ -12,7 +12,7 @@
 
 # check_result() tests and reports the result values
 # check_result "actual" "expected"
-function check_result() {
+function check_result {
     local actual=$1
     local expected=$2
     if [[ "$actual" == "$expected" ]]; then
@@ -26,7 +26,7 @@
 type=new
 multi = foo2"
 
-function create_test1c() {
+function create_test1c {
     cat >test1c.conf <<EOF
 [eee]
 # original comment
@@ -34,7 +34,7 @@
 EOF
 }
 
-function create_test2a() {
+function create_test2a {
     cat >test2a.conf <<EOF
 [ddd]
 # original comment
diff --git a/tools/bash8.py b/tools/bash8.py
index 2623358..3abf87b 100755
--- a/tools/bash8.py
+++ b/tools/bash8.py
@@ -21,9 +21,21 @@
 # Currently Supported checks
 #
 # Errors
+# Basic white space errors, for consistent indenting
 # - E001: check that lines do not end with trailing whitespace
 # - E002: ensure that indents are only spaces, and not hard tabs
 # - E003: ensure all indents are a multiple of 4 spaces
+# - E004: file did not end with a newline
+#
+# Structure errors
+#
+# A set of rules that help keep things consistent in control blocks.
+# These are ignored on long lines that have a continuation, because
+# unrolling that is kind of "interesting"
+#
+# - E010: *do* not on the same line as *for*
+# - E011: *then* not on the same line as *if*
+# - E012: heredoc didn't end before EOF
 
 import argparse
 import fileinput
@@ -37,18 +49,44 @@
 def register_ignores(ignores):
     global IGNORE
     if ignores:
-        IGNORE='^(' + '|'.join(ignores.split(',')) + ')'
+        IGNORE = '^(' + '|'.join(ignores.split(',')) + ')'
 
 
 def should_ignore(error):
     return IGNORE and re.search(IGNORE, error)
 
 
-def print_error(error, line):
+def print_error(error, line,
+                filename=None, filelineno=None):
+    if not filename:
+        filename = fileinput.filename()
+    if not filelineno:
+        filelineno = fileinput.filelineno()
     global ERRORS
     ERRORS = ERRORS + 1
     print("%s: '%s'" % (error, line.rstrip('\n')))
-    print(" - %s: L%s" % (fileinput.filename(), fileinput.filelineno()))
+    print(" - %s: L%s" % (filename, filelineno))
+
+
+def not_continuation(line):
+    return not re.search('\\\\$', line)
+
+
+def check_for_do(line):
+    if not_continuation(line):
+        match = re.match('^\s*(for|while|until)\s', line)
+        if match:
+            operator = match.group(1).strip()
+            if not re.search(';\s*do(\b|$)', line):
+                print_error('E010: Do not on same line as %s' % operator,
+                            line)
+
+
+def check_if_then(line):
+    if not_continuation(line):
+        if re.search('^\s*if \[', line):
+            if not re.search(';\s*then(\b|$)', line):
+                print_error('E011: Then non on same line as if', line)
 
 
 def check_no_trailing_whitespace(line):
@@ -64,6 +102,21 @@
         if (len(m.group('indent')) % 4) != 0:
             print_error('E003: Indent not multiple of 4', line)
 
+def check_function_decl(line):
+    failed = False
+    if line.startswith("function"):
+        if not re.search('^function [\w-]* \{$', line):
+            failed = True
+    else:
+        # catch the case without "function", e.g.
+        # things like '^foo() {'
+        if re.search('^\s*?\(\)\s*?\{', line):
+            failed = True
+
+    if failed:
+        print_error('E020: Function declaration not in format '
+                    ' "^function name {$"', line)
+
 
 def starts_multiline(line):
     m = re.search("[^<]<<\s*(?P<token>\w+)", line)
@@ -79,17 +132,46 @@
     return False
 
 
-def check_files(files):
+def check_files(files, verbose):
     in_multiline = False
+    multiline_start = 0
+    multiline_line = ""
     logical_line = ""
     token = False
+    prev_file = None
+    prev_line = ""
+    prev_lineno = 0
+
     for line in fileinput.input(files):
+        if fileinput.isfirstline():
+            # if in_multiline when the new file starts then we didn't
+            # find the end of a heredoc in the last file.
+            if in_multiline:
+                print_error('E012: heredoc did not end before EOF',
+                            multiline_line,
+                            filename=prev_file, filelineno=multiline_start)
+                in_multiline = False
+
+            # last line of a previous file should always end with a
+            # newline
+            if prev_file and not prev_line.endswith('\n'):
+                print_error('E004: file did not end with a newline',
+                            prev_line,
+                            filename=prev_file, filelineno=prev_lineno)
+
+            prev_file = fileinput.filename()
+
+            if verbose:
+                print "Running bash8 on %s" % fileinput.filename()
+
         # NOTE(sdague): multiline processing of heredocs is interesting
         if not in_multiline:
             logical_line = line
             token = starts_multiline(line)
             if token:
                 in_multiline = True
+                multiline_start = fileinput.filelineno()
+                multiline_line = line
                 continue
         else:
             logical_line = logical_line + line
@@ -100,7 +182,12 @@
 
         check_no_trailing_whitespace(logical_line)
         check_indents(logical_line)
+        check_for_do(logical_line)
+        check_if_then(logical_line)
+        check_function_decl(logical_line)
 
+        prev_line = logical_line
+        prev_lineno = fileinput.filelineno()
 
 def get_options():
     parser = argparse.ArgumentParser(
@@ -108,13 +195,14 @@
     parser.add_argument('files', metavar='file', nargs='+',
                         help='files to scan for errors')
     parser.add_argument('-i', '--ignore', help='Rules to ignore')
+    parser.add_argument('-v', '--verbose', action='store_true', default=False)
     return parser.parse_args()
 
 
 def main():
     opts = get_options()
     register_ignores(opts.ignore)
-    check_files(opts.files)
+    check_files(opts.files, opts.verbose)
 
     if ERRORS > 0:
         print("%d bash8 error(s) found" % ERRORS)
diff --git a/tools/build_pxe_env.sh b/tools/build_pxe_env.sh
index e6f98b4..50d91d0 100755
--- a/tools/build_pxe_env.sh
+++ b/tools/build_pxe_env.sh
@@ -17,7 +17,7 @@
 PROGDIR=`dirname $0`
 
 # Clean up any resources that may be in use
-cleanup() {
+function cleanup {
     set +o errexit
 
     # Mop up temporary files
diff --git a/tools/build_ramdisk.sh b/tools/build_ramdisk.sh
index 7372555..50ba8ef 100755
--- a/tools/build_ramdisk.sh
+++ b/tools/build_ramdisk.sh
@@ -14,7 +14,7 @@
 fi
 
 # Clean up any resources that may be in use
-cleanup() {
+function cleanup {
     set +o errexit
 
     # Mop up temporary files
@@ -87,7 +87,7 @@
 # Finds and returns full device path for the next available NBD device.
 # Exits script if error connecting or none free.
 # map_nbd image
-function map_nbd() {
+function map_nbd {
     for i in `seq 0 15`; do
         if [ ! -e /sys/block/nbd$i/pid ]; then
             NBD=/dev/nbd$i
diff --git a/tools/build_tempest.sh b/tools/build_tempest.sh
deleted file mode 100755
index 6c527f5..0000000
--- a/tools/build_tempest.sh
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env bash
-#
-# **build_tempest.sh**
-
-# Checkout and prepare a Tempest repo: git://git.openstack.org/openstack/tempest.git
-
-function usage {
-    echo "$0 - Check out and prepare a Tempest repo"
-    echo ""
-    echo "Usage: $0"
-    exit 1
-}
-
-if [ "$1" = "-h" ]; then
-    usage
-fi
-
-# Clean up any resources that may be in use
-cleanup() {
-    set +o errexit
-
-    # Kill ourselves to signal any calling process
-    trap 2; kill -2 $$
-}
-
-trap cleanup SIGHUP SIGINT SIGTERM SIGQUIT EXIT
-
-# Keep track of the current directory
-TOOLS_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $TOOLS_DIR/..; pwd)
-
-# Import common functions
-. $TOP_DIR/functions
-
-# Abort if localrc is not set
-if [ ! -e $TOP_DIR/localrc ]; then
-    echo "You must have a localrc with ALL necessary passwords and configuration defined before proceeding."
-    echo "See stack.sh for required passwords."
-    exit 1
-fi
-
-# Source params
-source ./stackrc
-
-# Where Openstack code lives
-DEST=${DEST:-/opt/stack}
-
-TEMPEST_DIR=$DEST/tempest
-
-# Install tests and prerequisites
-git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH
-
-trap - SIGHUP SIGINT SIGTERM SIGQUIT EXIT
diff --git a/tools/build_uec_ramdisk.sh b/tools/build_uec_ramdisk.sh
index 3ab5daf..5f3acc5 100755
--- a/tools/build_uec_ramdisk.sh
+++ b/tools/build_uec_ramdisk.sh
@@ -20,7 +20,7 @@
 fi
 
 # Clean up resources that may be in use
-cleanup() {
+function cleanup {
     set +o errexit
 
     if [ -n "$MNT_DIR" ]; then
diff --git a/tools/build_usb_boot.sh b/tools/build_usb_boot.sh
index 8566229..c97e0a1 100755
--- a/tools/build_usb_boot.sh
+++ b/tools/build_usb_boot.sh
@@ -13,7 +13,7 @@
 PXEDIR=${PXEDIR:-/opt/ramstack/pxe}
 
 # Clean up any resources that may be in use
-cleanup() {
+function cleanup {
     set +o errexit
 
     # Mop up temporary files
diff --git a/tools/copy_dev_environment_to_uec.sh b/tools/copy_dev_environment_to_uec.sh
index 3fd4423..94a4926 100755
--- a/tools/copy_dev_environment_to_uec.sh
+++ b/tools/copy_dev_environment_to_uec.sh
@@ -22,7 +22,7 @@
 source ./stackrc
 
 # Echo usage
-usage() {
+function usage {
     echo "Add stack user and keys"
     echo ""
     echo "Usage: $0 [full path to raw uec base image]"
diff --git a/tools/create-stack-user.sh b/tools/create-stack-user.sh
index 50f6592..9c29ecd 100755
--- a/tools/create-stack-user.sh
+++ b/tools/create-stack-user.sh
@@ -15,6 +15,7 @@
 # and it was time for this nonsense to stop.  Run this script as root to create
 # the user and configure sudo.
 
+set -o errexit
 
 # Keep track of the devstack directory
 TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
@@ -27,12 +28,14 @@
 # and ``DISTRO``
 GetDistro
 
-# Needed to get ``ENABLED_SERVICES``
+# Needed to get ``ENABLED_SERVICES`` and ``STACK_USER``
 source $TOP_DIR/stackrc
 
 # Give the non-root user the ability to run as **root** via ``sudo``
 is_package_installed sudo || install_package sudo
 
+[[ -z "$STACK_USER" ]] && die "STACK_USER is not set. Exiting."
+
 if ! getent group $STACK_USER >/dev/null; then
     echo "Creating a group called $STACK_USER"
     groupadd $STACK_USER
diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh
index 5f4c486..47da334 100755
--- a/tools/create_userrc.sh
+++ b/tools/create_userrc.sh
@@ -11,8 +11,7 @@
 
 ACCOUNT_DIR=./accrc
 
-display_help()
-{
+function display_help {
 cat <<EOF
 
 usage: $0 <options..>
@@ -54,9 +53,7 @@
 EOF
 }
 
-if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-name:,os-tenant-id:,os-auth-url:,target-dir:,skip-tenant:,os-cacert:,help,debug -- "$@")
-then
-    #parse error
+if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-name:,os-tenant-id:,os-auth-url:,target-dir:,skip-tenant:,os-cacert:,help,debug -- "$@"); then
     display_help
     exit 1
 fi
@@ -71,8 +68,7 @@
 ROLE=Member
 USER_NAME=""
 USER_PASS=""
-while [ $# -gt 0 ]
-do
+while [ $# -gt 0 ]; do
     case "$1" in
     -h|--help) display_help; exit 0 ;;
     --os-username) export OS_USERNAME=$2; shift ;;
@@ -154,7 +150,7 @@
 fi
 
 
-function add_entry(){
+function add_entry {
     local user_id=$1
     local user_name=$2
     local tenant_id=$3
@@ -200,7 +196,7 @@
 export S3_URL="$S3_URL"
 # OpenStack USER ID = $user_id
 export OS_USERNAME="$user_name"
-# Openstack Tenant ID = $tenant_id
+# OpenStack Tenant ID = $tenant_id
 export OS_TENANT_NAME="$tenant_name"
 export OS_AUTH_URL="$OS_AUTH_URL"
 export OS_CACERT="$OS_CACERT"
@@ -216,7 +212,7 @@
 }
 
 #admin users expected
-function create_or_get_tenant(){
+function create_or_get_tenant {
     local tenant_name=$1
     local tenant_id=`keystone tenant-list | awk '/\|[[:space:]]*'"$tenant_name"'[[:space:]]*\|.*\|/ {print $2}'`
     if [ -n "$tenant_id" ]; then
@@ -226,7 +222,7 @@
     fi
 }
 
-function create_or_get_role(){
+function create_or_get_role {
     local role_name=$1
     local role_id=`keystone role-list| awk '/\|[[:space:]]*'"$role_name"'[[:space:]]*\|/ {print $2}'`
     if [ -n "$role_id" ]; then
@@ -237,7 +233,7 @@
 }
 
 # Provides empty string when the user does not exists
-function get_user_id(){
+function get_user_id {
     local user_name=$1
     keystone user-list | awk '/^\|[^|]*\|[[:space:]]*'"$user_name"'[[:space:]]*\|.*\|/ {print $2}'
 }
@@ -254,6 +250,14 @@
             if [ $MODE = one -a "$user_name" != "$USER_NAME" ]; then
                 continue;
             fi
+
+            # Checks for a specific password defined for an user.
+            # Example for an username johndoe:
+            #                     JOHNDOE_PASSWORD=1234
+            eval SPECIFIC_UPASSWORD="\$${USER_NAME^^}_PASSWORD"
+            if [ -n "$SPECIFIC_UPASSWORD" ]; then
+                USER_PASS=$SPECIFIC_UPASSWORD
+            fi
             add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS"
         done
     done
diff --git a/tools/docker/install_docker.sh b/tools/docker/install_docker.sh
index 4fa2386..27c8c82 100755
--- a/tools/docker/install_docker.sh
+++ b/tools/docker/install_docker.sh
@@ -30,15 +30,19 @@
 # Install Docker Service
 # ======================
 
-# Stop the auto-repo updates and do it when required here
-NO_UPDATE_REPOS=True
+if is_fedora; then
+    install_package docker-io socat
+else
+    # Stop the auto-repo updates and do it when required here
+    NO_UPDATE_REPOS=True
 
-# Set up home repo
-curl https://get.docker.io/gpg | sudo apt-key add -
-install_package python-software-properties && \
-    sudo sh -c "echo deb $DOCKER_APT_REPO docker main > /etc/apt/sources.list.d/docker.list"
-apt_get update
-install_package --force-yes lxc-docker socat
+    # Set up home repo
+    curl https://get.docker.io/gpg | sudo apt-key add -
+    install_package python-software-properties && \
+        sudo sh -c "echo deb $DOCKER_APT_REPO docker main > /etc/apt/sources.list.d/docker.list"
+    apt_get update
+    install_package --force-yes lxc-docker socat
+fi
 
 # Start the daemon - restart just in case the package ever auto-starts...
 restart_service docker
@@ -60,5 +64,5 @@
 docker tag $DOCKER_IMAGE $DOCKER_IMAGE_NAME
 
 # Get docker-registry image
-docker pull $REGISTRY_IMAGE
-docker tag $REGISTRY_IMAGE $REGISTRY_IMAGE_NAME
+docker pull $DOCKER_REGISTRY_IMAGE
+docker tag $DOCKER_REGISTRY_IMAGE $DOCKER_REGISTRY_IMAGE_NAME
diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh
index a28e10e..7833278 100755
--- a/tools/fixup_stuff.sh
+++ b/tools/fixup_stuff.sh
@@ -40,7 +40,7 @@
 # ---------------
 
 # get_package_path python-package    # in import notation
-function get_package_path() {
+function get_package_path {
     local package=$1
     echo $(python -c "import os; import $package; print(os.path.split(os.path.realpath($package.__file__))[0])")
 }
@@ -70,7 +70,8 @@
 fi
 
 # Ubuntu 12.04
-# -----
+# ------------
+
 # We can regularly get kernel crashes on the 12.04 default kernel, so attempt
 # to install a new kernel
 if [[ ${DISTRO} =~ (precise) ]]; then
diff --git a/tools/get_uec_image.sh b/tools/get_uec_image.sh
index da13f4b..225742c 100755
--- a/tools/get_uec_image.sh
+++ b/tools/get_uec_image.sh
@@ -18,7 +18,7 @@
 set -o errexit
 set -o xtrace
 
-usage() {
+function usage {
     echo "Usage: $0 - Download and prepare Ubuntu UEC images"
     echo ""
     echo "$0 [-r rootsize] release imagefile [kernel]"
@@ -31,7 +31,7 @@
 }
 
 # Clean up any resources that may be in use
-cleanup() {
+function cleanup {
     set +o errexit
 
     # Mop up temporary files
diff --git a/tools/info.sh b/tools/info.sh
index 3ab7966..a8f9544 100755
--- a/tools/info.sh
+++ b/tools/info.sh
@@ -61,7 +61,7 @@
 # -----
 
 # git_report <dir>
-function git_report() {
+function git_report {
     local dir=$1
     local proj ref branch head
     if [[ -d $dir/.git ]]; then
@@ -122,13 +122,11 @@
             ver=${BASH_REMATCH[2]}
         else
             # Unhandled format in freeze file
-            #echo "unknown: $p"
             continue
         fi
         echo "pip|${p}|${ver}"
     else
         # No match in freeze file
-        #echo "unknown: $p"
         continue
     fi
 done <$FREEZE_FILE
diff --git a/tools/install_openvpn.sh b/tools/install_openvpn.sh
index 2f52aa1..9a4f036 100755
--- a/tools/install_openvpn.sh
+++ b/tools/install_openvpn.sh
@@ -22,7 +22,7 @@
 fi
 
 # Do some IP manipulation
-function cidr2netmask() {
+function cidr2netmask {
     set -- $(( 5 - ($1 / 8) )) 255 255 255 255 $(( (255 << (8 - ($1 % 8))) & 255 )) 0 0 0
     if [[ $1 -gt 1 ]]; then
         shift $1
@@ -50,7 +50,7 @@
 VPN_DIR=/etc/openvpn
 CA_DIR=$VPN_DIR/easy-rsa
 
-usage() {
+function usage {
     echo "$0 - OpenVPN install and certificate generation"
     echo ""
     echo "$0 --client name"
@@ -102,7 +102,7 @@
     openvpn --genkey --secret $CA_DIR/keys/ta.key  ## Build a TLS key
 fi
 
-do_server() {
+function do_server {
     NAME=$1
     # Generate server certificate
     $CA_DIR/pkitool --server $NAME
@@ -162,7 +162,7 @@
     /etc/init.d/openvpn restart
 }
 
-do_client() {
+function do_client {
     NAME=$1
     # Generate a client certificate
     $CA_DIR/pkitool $NAME
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index d714d33..9fa161e 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -50,7 +50,7 @@
 GetDistro
 echo "Distro: $DISTRO"
 
-function get_versions() {
+function get_versions {
     PIP=$(which pip 2>/dev/null || which pip-python 2>/dev/null || true)
     if [[ -n $PIP ]]; then
         PIP_VERSION=$($PIP --version | awk '{ print $2}')
@@ -61,7 +61,7 @@
 }
 
 
-function install_get_pip() {
+function install_get_pip {
     if [[ ! -r $FILES/get-pip.py ]]; then
         (cd $FILES; \
             curl -O $PIP_GET_PIP_URL; \
@@ -70,7 +70,7 @@
     sudo -E python $FILES/get-pip.py
 }
 
-function install_pip_tarball() {
+function install_pip_tarball {
     (cd $FILES; \
         curl -O $PIP_TAR_URL; \
         tar xvfz pip-$INSTALL_PIP_VERSION.tar.gz 1>/dev/null; \
diff --git a/tools/jenkins/README.md b/tools/jenkins/README.md
index 371017d..3586da9 100644
--- a/tools/jenkins/README.md
+++ b/tools/jenkins/README.md
@@ -1,6 +1,6 @@
 Getting Started With Jenkins and Devstack
 =========================================
-This little corner of devstack is to show how to get an Openstack jenkins
+This little corner of devstack is to show how to get an OpenStack jenkins
 environment up and running quickly, using the rcb configuration methodology.
 
 
diff --git a/tools/jenkins/build_configuration.sh b/tools/jenkins/build_configuration.sh
index e295ef2..64ee159 100755
--- a/tools/jenkins/build_configuration.sh
+++ b/tools/jenkins/build_configuration.sh
@@ -5,7 +5,7 @@
 ADAPTER=$3
 RC=$4
 
-function usage() {
+function usage {
     echo "Usage: $0 -  Build a configuration"
     echo ""
     echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER] [RC (optional)]"
diff --git a/tools/jenkins/configurations/kvm.sh b/tools/jenkins/configurations/kvm.sh
index d9a160a..6927fd7 100755
--- a/tools/jenkins/configurations/kvm.sh
+++ b/tools/jenkins/configurations/kvm.sh
@@ -9,7 +9,7 @@
 ADAPTER=$3
 RC=$4
 
-function usage() {
+function usage {
     echo "Usage: $0 - Build a test configuration"
     echo ""
     echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER] [RC (optional)]"
diff --git a/tools/jenkins/configurations/xs.sh b/tools/jenkins/configurations/xs.sh
index 864f949..7b671e9 100755
--- a/tools/jenkins/configurations/xs.sh
+++ b/tools/jenkins/configurations/xs.sh
@@ -8,7 +8,7 @@
 ADAPTER=$3
 RC=$4
 
-function usage() {
+function usage {
     echo "Usage: $0 - Build a test configuration"
     echo ""
     echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER] [RC (optional)]"
diff --git a/tools/jenkins/run_test.sh b/tools/jenkins/run_test.sh
index 4649563..d2b8284 100755
--- a/tools/jenkins/run_test.sh
+++ b/tools/jenkins/run_test.sh
@@ -4,7 +4,7 @@
 ADAPTER=$2
 RC=$3
 
-function usage() {
+function usage {
     echo "Usage: $0 - Run a test"
     echo ""
     echo "$0 [EXECUTOR_NUMBER] [ADAPTER] [RC (optional)]"
diff --git a/tools/sar_filter.py b/tools/sar_filter.py
deleted file mode 100755
index 24ef0e4..0000000
--- a/tools/sar_filter.py
+++ /dev/null
@@ -1,86 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2014 Samsung Electronics Corp. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import re
-import subprocess
-import sys
-
-
-def is_data_line(line):
-    timestamp, data = parse_line(line)
-    return re.search('\d\.d', data)
-
-
-def parse_line(line):
-    m = re.search('(\d\d:\d\d:\d\d( \w\w)?)(\s+((\S+)\s*)+)', line)
-    if m:
-        date = m.group(1)
-        data = m.group(3).rstrip()
-        return date, data
-    else:
-        return None, None
-
-
-process = subprocess.Popen(
-    "sar %s" % " ".join(sys.argv[1:]),
-    shell=True,
-    stdout=subprocess.PIPE,
-    stderr=subprocess.STDOUT)
-
-# Poll process for new output until finished
-
-start_time = ""
-header = ""
-data_line = ""
-printed_header = False
-current_ts = None
-
-# print out the first sysstat line regardless
-print process.stdout.readline()
-
-while True:
-    nextline = process.stdout.readline()
-    if nextline == '' and process.poll() is not None:
-        break
-
-    date, data = parse_line(nextline)
-    # stop until we get to the first set of real lines
-    if not date:
-        continue
-
-    # now we eat the header lines, and only print out the header
-    # if we've never seen them before
-    if not start_time:
-        start_time = date
-        header += "%s   %s" % (date, data)
-    elif date == start_time:
-        header += "   %s" % data
-    elif not printed_header:
-        printed_header = True
-        print header
-
-    # now we know this is a data line, printing out if the timestamp
-    # has changed, and stacking up otherwise.
-    nextline = process.stdout.readline()
-    date, data = parse_line(nextline)
-    if date != current_ts:
-        current_ts = date
-        print data_line
-        data_line = "%s   %s" % (date, data)
-    else:
-        data_line += "   %s" % data
-
-    sys.stdout.flush()
diff --git a/tools/warm_apts_for_uec.sh b/tools/warm_apts_for_uec.sh
index 3c15f52..c57fc2e 100755
--- a/tools/warm_apts_for_uec.sh
+++ b/tools/warm_apts_for_uec.sh
@@ -16,7 +16,7 @@
 cd $TOP_DIR
 
 # Echo usage
-usage() {
+function usage {
     echo "Cache OpenStack dependencies on a uec image to speed up performance."
     echo ""
     echo "Usage: $0 [full path to raw uec base image]"
diff --git a/tools/xen/README.md b/tools/xen/README.md
index ee1abcc..712782b 100644
--- a/tools/xen/README.md
+++ b/tools/xen/README.md
@@ -1,11 +1,11 @@
 # Getting Started With XenServer and Devstack
 
 The purpose of the code in this directory it to help developers bootstrap a
-XenServer 6.2 (older versions may also work) + Openstack development
+XenServer 6.2 (older versions may also work) + OpenStack development
 environment. This file gives some pointers on how to get started.
 
 Xenserver is a Type 1 hypervisor, so it is best installed on bare metal.  The
-Openstack services are configured to run within a virtual machine (called OS
+OpenStack services are configured to run within a virtual machine (called OS
 domU) on the XenServer host. The VM uses the XAPI toolstack to communicate with
 the host over a network connection (see `MGT_BRIDGE_OR_NET_NAME`).
 
diff --git a/tools/xen/build_domU_multi.sh b/tools/xen/build_domU_multi.sh
index 0285f42..0eb2077 100755
--- a/tools/xen/build_domU_multi.sh
+++ b/tools/xen/build_domU_multi.sh
@@ -25,11 +25,5 @@
 # because rabbit won't launch with an ip addr hostname :(
 build_xva HEADNODE $HEAD_PUB_IP $HEAD_MGT_IP 1 "ENABLED_SERVICES=g-api,g-reg,key,n-api,n-sch,n-vnc,horizon,mysql,rabbit"
 
-# Wait till the head node is up
-#while ! curl -L http://$HEAD_PUB_IP | grep -q username; do
-#    echo "Waiting for head node ($HEAD_PUB_IP) to start..."
-#    sleep 5
-#done
-
 # Build the HA compute host
 build_xva COMPUTENODE $COMPUTE_PUB_IP $COMPUTE_MGT_IP 0 "ENABLED_SERVICES=n-cpu,n-net,n-api"
diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh
index 958102b..cc3cbe1 100755
--- a/tools/xen/build_xva.sh
+++ b/tools/xen/build_xva.sh
@@ -21,9 +21,19 @@
 # This directory
 TOP_DIR=$(cd $(dirname "$0") && pwd)
 
+# Source lower level functions
+. $TOP_DIR/../../functions
+
 # Include onexit commands
 . $TOP_DIR/scripts/on_exit.sh
 
+# xapi functions
+. $TOP_DIR/functions
+
+# Determine what system we are running on.
+# Might not be XenServer if we're using xenserver-core
+GetDistro
+
 # Source params - override xenrc params in your localrc to suite your taste
 source xenrc
 
@@ -32,7 +42,7 @@
 #
 GUEST_NAME="$1"
 
-function _print_interface_config() {
+function _print_interface_config {
     local device_nr
     local ip_address
     local netmask
@@ -58,7 +68,7 @@
     echo "  post-up ethtool -K $device tx off"
 }
 
-function print_interfaces_config() {
+function print_interfaces_config {
     echo "auto lo"
     echo "iface lo inet loopback"
 
diff --git a/tools/xen/functions b/tools/xen/functions
index 97c56bc..ab0be84 100644
--- a/tools/xen/functions
+++ b/tools/xen/functions
@@ -336,3 +336,11 @@
     xe vm-param-set uuid=$vm VCPUs-max=$cpu_count
     xe vm-param-set uuid=$vm VCPUs-at-startup=$cpu_count
 }
+
+function get_domid() {
+    local vm_name_label
+
+    vm_name_label="$1"
+
+    xe vm-list name-label="$vm_name_label" params=dom-id minimal=true
+}
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
index 41b184c..a4b3e06 100755
--- a/tools/xen/install_os_domU.sh
+++ b/tools/xen/install_os_domU.sh
@@ -67,21 +67,6 @@
 
 # Install plugins
 
-## Nova plugins
-NOVA_ZIPBALL_URL=${NOVA_ZIPBALL_URL:-$(zip_snapshot_location $NOVA_REPO $NOVA_BRANCH)}
-EXTRACTED_NOVA=$(extract_remote_zipball "$NOVA_ZIPBALL_URL")
-install_xapi_plugins_from "$EXTRACTED_NOVA"
-
-LOGROT_SCRIPT=$(find "$EXTRACTED_NOVA" -name "rotate_xen_guest_logs.sh" -print)
-if [ -n "$LOGROT_SCRIPT" ]; then
-    mkdir -p "/var/log/xen/guest"
-    cp "$LOGROT_SCRIPT" /root/consolelogrotate
-    chmod +x /root/consolelogrotate
-    echo "* * * * * /root/consolelogrotate" | crontab
-fi
-
-rm -rf "$EXTRACTED_NOVA"
-
 ## Install the netwrap xapi plugin to support agent control of dom0 networking
 if [[ "$ENABLED_SERVICES" =~ "q-agt" && "$Q_PLUGIN" = "openvswitch" ]]; then
     NEUTRON_ZIPBALL_URL=${NEUTRON_ZIPBALL_URL:-$(zip_snapshot_location $NEUTRON_REPO $NEUTRON_BRANCH)}
@@ -90,9 +75,6 @@
     rm -rf "$EXTRACTED_NEUTRON"
 fi
 
-create_directory_for_kernels
-create_directory_for_images
-
 #
 # Configure Networking
 #
@@ -184,18 +166,16 @@
 SNAME_TEMPLATE="jeos_snapshot_for_devstack"
 SNAME_FIRST_BOOT="before_first_boot"
 
-function wait_for_VM_to_halt() {
+function wait_for_VM_to_halt {
     set +x
     echo "Waiting for the VM to halt.  Progress in-VM can be checked with vncviewer:"
     mgmt_ip=$(echo $XENAPI_CONNECTION_URL | tr -d -c '1234567890.')
-    domid=$(xe vm-list name-label="$GUEST_NAME" params=dom-id minimal=true)
+    domid=$(get_domid "$GUEST_NAME")
     port=$(xenstore-read /local/domain/$domid/console/vnc-port)
     echo "vncviewer -via root@$mgmt_ip localhost:${port:2}"
-    while true
-    do
+    while true; do
         state=$(xe_min vm-list name-label="$GUEST_NAME" power-state=halted)
-        if [ -n "$state" ]
-        then
+        if [ -n "$state" ]; then
             break
         else
             echo -n "."
@@ -338,7 +318,7 @@
 #
 xe vm-start vm="$GUEST_NAME"
 
-function ssh_no_check() {
+function ssh_no_check {
     ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "$@"
 }
 
@@ -361,6 +341,37 @@
     fi
 fi
 
+# Create an ssh-keypair, and set it up for dom0 user
+rm -f /root/dom0key /root/dom0key.pub
+ssh-keygen -f /root/dom0key -P "" -C "dom0"
+DOMID=$(get_domid "$GUEST_NAME")
+
+xenstore-write /local/domain/$DOMID/authorized_keys/$DOMZERO_USER "$(cat /root/dom0key.pub)"
+xenstore-chmod -u /local/domain/$DOMID/authorized_keys/$DOMZERO_USER r$DOMID
+
+function run_on_appliance {
+    ssh \
+        -i /root/dom0key \
+        -o UserKnownHostsFile=/dev/null \
+        -o StrictHostKeyChecking=no \
+        -o BatchMode=yes \
+        "$DOMZERO_USER@$OS_VM_MANAGEMENT_ADDRESS" "$@"
+}
+
+# Wait until we can log in to the appliance
+while ! run_on_appliance true; do
+    sleep 1
+done
+
+# Remove authenticated_keys updater cronjob
+echo "" | run_on_appliance crontab -
+
+# Generate a passwordless ssh key for domzero user
+echo "ssh-keygen -f /home/$DOMZERO_USER/.ssh/id_rsa -C $DOMZERO_USER@appliance -N \"\" -q" | run_on_appliance
+
+# Authenticate that user to dom0
+run_on_appliance cat /home/$DOMZERO_USER/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys
+
 # If we have copied our ssh credentials, use ssh to monitor while the installation runs
 WAIT_TILL_LAUNCH=${WAIT_TILL_LAUNCH:-1}
 COPYENV=${COPYENV:-1}
diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh
index 05ac86c..440774e 100755
--- a/tools/xen/prepare_guest.sh
+++ b/tools/xen/prepare_guest.sh
@@ -18,6 +18,57 @@
 GUEST_PASSWORD="$1"
 XS_TOOLS_PATH="$2"
 STACK_USER="$3"
+DOMZERO_USER="$4"
+
+
+function setup_domzero_user {
+    local username
+
+    username="$1"
+
+    local key_updater_script
+    local sudoers_file
+    key_updater_script="/home/$username/update_authorized_keys.sh"
+    sudoers_file="/etc/sudoers.d/allow_$username"
+
+    # Create user
+    adduser --disabled-password --quiet "$username" --gecos "$username"
+
+    # Give passwordless sudo
+    cat > $sudoers_file << EOF
+    $username ALL = NOPASSWD: ALL
+EOF
+    chmod 0440 $sudoers_file
+
+    # A script to populate this user's authenticated_keys from xenstore
+    cat > $key_updater_script << EOF
+#!/bin/bash
+set -eux
+
+DOMID=\$(sudo xenstore-read domid)
+sudo xenstore-exists /local/domain/\$DOMID/authorized_keys/$username
+sudo xenstore-read /local/domain/\$DOMID/authorized_keys/$username > /home/$username/xenstore_value
+cat /home/$username/xenstore_value > /home/$username/.ssh/authorized_keys
+EOF
+
+    # Give the key updater to the user
+    chown $username:$username $key_updater_script
+    chmod 0700 $key_updater_script
+
+    # Setup the .ssh folder
+    mkdir -p /home/$username/.ssh
+    chown $username:$username /home/$username/.ssh
+    chmod 0700 /home/$username/.ssh
+    touch /home/$username/.ssh/authorized_keys
+    chown $username:$username /home/$username/.ssh/authorized_keys
+    chmod 0600 /home/$username/.ssh/authorized_keys
+
+    # Setup the key updater as a cron job
+    crontab -u $username - << EOF
+* * * * * $key_updater_script
+EOF
+
+}
 
 # Install basics
 apt-get update
@@ -48,6 +99,8 @@
 echo $STACK_USER:$GUEST_PASSWORD | chpasswd
 echo "$STACK_USER ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
 
+setup_domzero_user "$DOMZERO_USER"
+
 # Add an udev rule, so that new block devices could be written by stack user
 cat > /etc/udev/rules.d/50-openstack-blockdev.rules << EOF
 KERNEL=="xvd[b-z]", GROUP="$STACK_USER", MODE="0660"
diff --git a/tools/xen/prepare_guest_template.sh b/tools/xen/prepare_guest_template.sh
index 546ac99..eaab2fe 100755
--- a/tools/xen/prepare_guest_template.sh
+++ b/tools/xen/prepare_guest_template.sh
@@ -22,9 +22,19 @@
 # This directory
 TOP_DIR=$(cd $(dirname "$0") && pwd)
 
+# Source lower level functions
+. $TOP_DIR/../../functions
+
 # Include onexit commands
 . $TOP_DIR/scripts/on_exit.sh
 
+# xapi functions
+. $TOP_DIR/functions
+
+# Determine what system we are running on.
+# Might not be XenServer if we're using xenserver-core
+GetDistro
+
 # Source params - override xenrc params in your localrc to suite your taste
 source xenrc
 
@@ -76,7 +86,7 @@
 cat <<EOF >$STAGING_DIR/etc/rc.local
 #!/bin/sh -e
 bash /opt/stack/prepare_guest.sh \\
-    "$GUEST_PASSWORD" "$XS_TOOLS_PATH" "$STACK_USER" \\
+    "$GUEST_PASSWORD" "$XS_TOOLS_PATH" "$STACK_USER" "$DOMZERO_USER" \\
     > /opt/stack/prepare_guest.log 2>&1
 EOF
 
diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh
index 7b0d891..b9b65fd 100755
--- a/tools/xen/scripts/install-os-vpx.sh
+++ b/tools/xen/scripts/install-os-vpx.sh
@@ -42,8 +42,7 @@
 
 get_params()
 {
-    while getopts "hbn:r:l:t:" OPTION;
-    do
+    while getopts "hbn:r:l:t:" OPTION; do
         case $OPTION in
             h) usage
                 exit 1
@@ -63,8 +62,7 @@
                 ;;
         esac
     done
-    if [[ -z $BRIDGE ]]
-    then
+    if [[ -z $BRIDGE ]]; then
         BRIDGE=xenbr0
     fi
 
@@ -91,8 +89,7 @@
 find_network()
 {
     result=$(xe_min network-list bridge="$1")
-    if [ "$result" = "" ]
-    then
+    if [ "$result" = "" ]; then
         result=$(xe_min network-list name-label="$1")
     fi
     echo "$result"
@@ -121,8 +118,7 @@
 {
     local v="$1"
     IFS=,
-    for vif in $(xe_min vif-list vm-uuid="$v")
-    do
+    for vif in $(xe_min vif-list vm-uuid="$v"); do
         xe vif-destroy uuid="$vif"
     done
     unset IFS
diff --git a/tools/xen/scripts/on_exit.sh b/tools/xen/scripts/on_exit.sh
index a4db39c..2441e3d 100755
--- a/tools/xen/scripts/on_exit.sh
+++ b/tools/xen/scripts/on_exit.sh
@@ -7,8 +7,7 @@
 
 on_exit()
 {
-    for i in $(seq $((${#on_exit_hooks[*]} - 1)) -1 0)
-    do
+    for i in $(seq $((${#on_exit_hooks[*]} - 1)) -1 0); do
         eval "${on_exit_hooks[$i]}"
     done
 }
@@ -17,8 +16,7 @@
 {
     local n=${#on_exit_hooks[*]}
     on_exit_hooks[$n]="$*"
-    if [[ $n -eq 0 ]]
-    then
+    if [[ $n -eq 0 ]]; then
         trap on_exit EXIT
     fi
 }
diff --git a/tools/xen/test_functions.sh b/tools/xen/test_functions.sh
index 373d996..838f86a 100755
--- a/tools/xen/test_functions.sh
+++ b/tools/xen/test_functions.sh
@@ -227,16 +227,14 @@
 }
 
 [ "$1" = "run_tests" ] && {
-    for testname in $($0)
-    do
+    for testname in $($0); do
         echo "$testname"
         before_each_test
         (
             set -eux
             $testname
         )
-        if [ "$?" != "0" ]
-        then
+        if [ "$?" != "0" ]; then
             echo "FAIL"
             exit 1
         else
diff --git a/tools/xen/xenrc b/tools/xen/xenrc
index cd28234..278bb9b 100644
--- a/tools/xen/xenrc
+++ b/tools/xen/xenrc
@@ -35,7 +35,7 @@
 GUEST_PASSWORD=${GUEST_PASSWORD:-secrete}
 
 # Extracted variables for OpenStack VM network device numbers.
-# Make sure, they form a continous sequence starting from 0
+# Make sure they form a continuous sequence starting from 0
 MGT_DEV_NR=0
 VM_DEV_NR=1
 PUB_DEV_NR=2
@@ -91,4 +91,7 @@
 # Set the size to 0 to avoid creation of additional disk.
 XEN_XVDB_SIZE_GB=0
 
+restore_nounset=`set +o | grep nounset`
+set +u
 source ../../stackrc
+$restore_nounset
diff --git a/unstack.sh b/unstack.sh
index 92d0642..6351fe0 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -55,7 +55,6 @@
 source $TOP_DIR/lib/neutron
 source $TOP_DIR/lib/baremetal
 source $TOP_DIR/lib/ldap
-source $TOP_DIR/lib/ironic
 
 # Extras Source
 # --------------
@@ -104,7 +103,7 @@
     stop_nova
 fi
 
-if is_service_enabled g-api g-reg; then
+if is_service_enabled glance; then
     stop_glance
 fi
 
@@ -118,12 +117,6 @@
     cleanup_swift
 fi
 
-# Ironic runs daemons
-if is_service_enabled ir-api ir-cond; then
-    stop_ironic
-    cleanup_ironic
-fi
-
 # Apache has the WSGI processes
 if is_service_enabled horizon; then
     stop_horizon