Merge "Add mysql devel as testonly alongside mysql-server"
diff --git a/.gitignore b/.gitignore
index b80b476..b0a65f5 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,6 +5,7 @@
 *.pem
 .localrc.auto
 .prereqs
+.tox
 .stackenv
 accrc
 docs/files
diff --git a/MAINTAINERS.rst b/MAINTAINERS.rst
index 0891d02..bdd9e78 100644
--- a/MAINTAINERS.rst
+++ b/MAINTAINERS.rst
@@ -28,20 +28,54 @@
 
 .. contents:: :local:
 
-Fedora/CentOS/RHEL
-~~~~~~~~~~~~~~~~~~
 
- * Ian Wienand <iwienand@redhat.com>
+Ceph
+~~~~
 
-Xen
-~~~
+* Sebastien Han <sebastien.han@enovance.com>
 
 Cinder
 ~~~~~~
 
+Fedora/CentOS/RHEL
+~~~~~~~~~~~~~~~~~~
+
+* Ian Wienand <iwienand@redhat.com>
+
 Neutron
 ~~~~~~~
 
-tempest
+OpenDaylight
+~~~~~~~~~~~~
+
+* Kyle Mestery <kmestery@cisco.com>
+
+Sahara
+~~~~~~
+
+* Sergey Lukjanov <slukjanov@mirantis.com>
+
+SUSE
+~~~~
+
+* Ralf Haferkamp <rhafer@suse.de>
+* Vincent Untz <vuntz@suse.com>
+
+Tempest
 ~~~~~~~
 
+Trove
+~~~~~
+
+* Nikhil Manchanda <SlickNik@gmail.com>
+* Michael Basnight <mbasnight@gmail.com>
+
+Xen
+~~~
+* Bob Ball <bob.ball@citrix.com>
+
+Zaqar (Marconi)
+~~~~~~~~~~~~~~~
+
+* Flavio Percoco <flaper87@gmail.com>
+* Malini Kamalambal <malini.kamalambal@rackspace.com>
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
index d756685..a2ae275 100755
--- a/exercises/boot_from_volume.sh
+++ b/exercises/boot_from_volume.sh
@@ -71,10 +71,10 @@
 # ------
 
 # List the images available
-glance image-list
+openstack image list
 
 # Grab the id of the image to launch
-IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
+IMAGE=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
 die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
 
 # Security Groups
diff --git a/exercises/client-args.sh b/exercises/client-args.sh
index b360f1e..2f85d98 100755
--- a/exercises/client-args.sh
+++ b/exercises/client-args.sh
@@ -122,7 +122,7 @@
         STATUS_GLANCE="Skipped"
     else
         echo -e "\nTest Glance"
-        if glance $TENANT_ARG $ARGS image-list; then
+        if openstack $TENANT_ARG $ARGS image list; then
             STATUS_GLANCE="Succeeded"
         else
             STATUS_GLANCE="Failed"
diff --git a/exercises/client-env.sh b/exercises/client-env.sh
index cc518d9..4a0609a 100755
--- a/exercises/client-env.sh
+++ b/exercises/client-env.sh
@@ -132,7 +132,7 @@
         STATUS_GLANCE="Skipped"
     else
         echo -e "\nTest Glance"
-        if glance image-list; then
+        if openstack image list; then
             STATUS_GLANCE="Succeeded"
         else
             STATUS_GLANCE="Failed"
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index 7e90e5a..57f48e0 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -71,10 +71,10 @@
 # ------
 
 # List the images available
-glance image-list
+openstack image list
 
 # Grab the id of the image to launch
-IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
+IMAGE=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
 die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
 
 # Security Groups
diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh
index 6679670..5b3281b 100755
--- a/exercises/neutron-adv-test.sh
+++ b/exercises/neutron-adv-test.sh
@@ -134,7 +134,7 @@
 }
 
 function get_image_id {
-    local IMAGE_ID=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
+    local IMAGE_ID=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
     die_if_not_set $LINENO IMAGE_ID "Failure retrieving IMAGE_ID"
     echo "$IMAGE_ID"
 }
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index 1dff6a4..504fba1 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -68,10 +68,10 @@
 # ------
 
 # List the images available
-glance image-list
+openstack image list
 
 # Grab the id of the image to launch
-IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
+IMAGE=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
 die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
 
 # Security Groups
diff --git a/files/apache-keystone.template b/files/apache-keystone.template
index fc8731c..a9d9cc3 100644
--- a/files/apache-keystone.template
+++ b/files/apache-keystone.template
@@ -6,8 +6,7 @@
     WSGIProcessGroup keystone-public
     WSGIScriptAlias / %PUBLICWSGI%
     WSGIApplicationGroup %{GLOBAL}
-    ErrorLog /var/log/%APACHE_NAME%/keystone
-    LogLevel debug
+    ErrorLog /var/log/%APACHE_NAME%/keystone.log
     CustomLog /var/log/%APACHE_NAME%/access.log combined
 </VirtualHost>
 
@@ -16,8 +15,7 @@
     WSGIProcessGroup keystone-admin
     WSGIScriptAlias / %ADMINWSGI%
     WSGIApplicationGroup %{GLOBAL}
-    ErrorLog /var/log/%APACHE_NAME%/keystone
-    LogLevel debug
+    ErrorLog /var/log/%APACHE_NAME%/keystone.log
     CustomLog /var/log/%APACHE_NAME%/access.log combined
 </VirtualHost>
 
diff --git a/files/apts/marconi-server b/files/apts/zaqar-server
similarity index 100%
rename from files/apts/marconi-server
rename to files/apts/zaqar-server
diff --git a/files/rpms/marconi-server b/files/rpms/zaqar-server
similarity index 100%
rename from files/rpms/marconi-server
rename to files/rpms/zaqar-server
diff --git a/lib/baremetal b/lib/baremetal
index 79c499c..af90c06 100644
--- a/lib/baremetal
+++ b/lib/baremetal
@@ -232,6 +232,7 @@
         image create \
         $BM_DEPLOY_KERNEL \
         --public --disk-format=aki \
+        --container-format=aki \
         < $TOP_DIR/files/$BM_DEPLOY_KERNEL  | grep ' id ' | get_field 2)
     BM_DEPLOY_RAMDISK_ID=$(openstack \
         --os-token $token \
@@ -239,6 +240,7 @@
         image create \
         $BM_DEPLOY_RAMDISK \
         --public --disk-format=ari \
+        --container-format=ari \
         < $TOP_DIR/files/$BM_DEPLOY_RAMDISK  | grep ' id ' | get_field 2)
 }
 
@@ -287,6 +289,7 @@
         image create \
         $image_name-kernel \
         --public --disk-format=aki \
+        --container-format=aki \
         < $TOP_DIR/files/$OUT_KERNEL | grep ' id ' | get_field 2)
     RAMDISK_ID=$(openstack \
         --os-token $token \
@@ -294,6 +297,7 @@
         image create \
         $image_name-initrd \
         --public --disk-format=ari \
+        --container-format=ari \
         < $TOP_DIR/files/$OUT_RAMDISK | grep ' id ' | get_field 2)
 }
 
diff --git a/lib/ceilometer b/lib/ceilometer
index 4030aca..340acb9 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -79,19 +79,19 @@
 
 create_ceilometer_accounts() {
 
-    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
+    local service_tenant=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    local admin_role=$(openstack role list | awk "/ admin / { print \$2 }")
 
     # Ceilometer
     if [[ "$ENABLED_SERVICES" =~ "ceilometer-api" ]]; then
-        CEILOMETER_USER=$(get_or_create_user "ceilometer" \
-            "$SERVICE_PASSWORD" $SERVICE_TENANT)
-        get_or_add_user_role $ADMIN_ROLE $CEILOMETER_USER $SERVICE_TENANT
+        local ceilometer_user=$(get_or_create_user "ceilometer" \
+            "$SERVICE_PASSWORD" $service_tenant)
+        get_or_add_user_role $admin_role $ceilometer_user $service_tenant
 
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-            CEILOMETER_SERVICE=$(get_or_create_service "ceilometer" \
+            local ceilometer_service=$(get_or_create_service "ceilometer" \
                 "metering" "OpenStack Telemetry Service")
-            get_or_create_endpoint $CEILOMETER_SERVICE \
+            get_or_create_endpoint $ceilometer_service \
                 "$REGION_NAME" \
                 "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \
                 "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \
diff --git a/lib/cinder b/lib/cinder
index c78715e..ce13b86 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -96,10 +96,10 @@
 # Source the enabled backends
 if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
     for be in ${CINDER_ENABLED_BACKENDS//,/ }; do
-        BE_TYPE=${be%%:*}
-        BE_NAME=${be##*:}
-        if [[ -r $CINDER_BACKENDS/${BE_TYPE} ]]; then
-            source $CINDER_BACKENDS/${BE_TYPE}
+        be_type=${be%%:*}
+        be_name=${be##*:}
+        if [[ -r $CINDER_BACKENDS/${be_type} ]]; then
+            source $CINDER_BACKENDS/${be_type}
         fi
     done
 fi
@@ -120,7 +120,7 @@
 function cleanup_cinder {
     # ensure the volume group is cleared up because fails might
     # leave dead volumes in the group
-    TARGETS=$(sudo tgtadm --op show --mode target)
+    local targets=$(sudo tgtadm --op show --mode target)
     if [ $? -ne 0 ]; then
         # If tgt driver isn't running this won't work obviously
         # So check the response and restart if need be
@@ -130,11 +130,11 @@
         else
             restart_service tgtd
         fi
-        TARGETS=$(sudo tgtadm --op show --mode target)
+        targets=$(sudo tgtadm --op show --mode target)
     fi
 
-    if [[ -n "$TARGETS" ]]; then
-        iqn_list=( $(grep --no-filename -r iqn $SCSI_PERSIST_DIR | sed 's/<target //' | sed 's/>//') )
+    if [[ -n "$targets" ]]; then
+        local iqn_list=( $(grep --no-filename -r iqn $SCSI_PERSIST_DIR | sed 's/<target //' | sed 's/>//') )
         for i in "${iqn_list[@]}"; do
             echo removing iSCSI target: $i
             sudo tgt-admin --delete $i
@@ -148,11 +148,12 @@
     fi
 
     if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
+        local be be_name be_type
         for be in ${CINDER_ENABLED_BACKENDS//,/ }; do
-            BE_TYPE=${be%%:*}
-            BE_NAME=${be##*:}
-            if type cleanup_cinder_backend_${BE_TYPE} >/dev/null 2>&1; then
-                cleanup_cinder_backend_${BE_TYPE} ${BE_NAME}
+            be_type=${be%%:*}
+            be_name=${be##*:}
+            if type cleanup_cinder_backend_${be_type} >/dev/null 2>&1; then
+                cleanup_cinder_backend_${be_type} ${be_name}
             fi
         done
     fi
@@ -161,7 +162,7 @@
 # configure_cinder_rootwrap() - configure Cinder's rootwrap
 function configure_cinder_rootwrap {
     # Set the paths of certain binaries
-    CINDER_ROOTWRAP=$(get_rootwrap_location cinder)
+    local cinder_rootwrap=$(get_rootwrap_location cinder)
 
     # Deploy new rootwrap filters files (owned by root).
     # Wipe any existing rootwrap.d files first
@@ -179,14 +180,14 @@
     sudo chown root:root $CINDER_CONF_DIR/rootwrap.conf
     sudo chmod 0644 $CINDER_CONF_DIR/rootwrap.conf
     # Specify rootwrap.conf as first parameter to rootwrap
-    ROOTWRAP_CSUDOER_CMD="$CINDER_ROOTWRAP $CINDER_CONF_DIR/rootwrap.conf *"
+    ROOTWRAP_CSUDOER_CMD="$cinder_rootwrap $CINDER_CONF_DIR/rootwrap.conf *"
 
     # Set up the rootwrap sudoers for cinder
-    TEMPFILE=`mktemp`
-    echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_CSUDOER_CMD" >$TEMPFILE
-    chmod 0440 $TEMPFILE
-    sudo chown root:root $TEMPFILE
-    sudo mv $TEMPFILE /etc/sudoers.d/cinder-rootwrap
+    local tempfile=`mktemp`
+    echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_CSUDOER_CMD" >$tempfile
+    chmod 0440 $tempfile
+    sudo chown root:root $tempfile
+    sudo mv $tempfile /etc/sudoers.d/cinder-rootwrap
 }
 
 # configure_cinder() - Set config files, create data dirs, etc
@@ -237,18 +238,19 @@
     iniset $CINDER_CONF DEFAULT enable_v1_api true
 
     if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
-        enabled_backends=""
-        default_name=""
+        local enabled_backends=""
+        local default_name=""
+        local be be_name be_type
         for be in ${CINDER_ENABLED_BACKENDS//,/ }; do
-            BE_TYPE=${be%%:*}
-            BE_NAME=${be##*:}
-            if type configure_cinder_backend_${BE_TYPE} >/dev/null 2>&1; then
-                configure_cinder_backend_${BE_TYPE} ${BE_NAME}
+            be_type=${be%%:*}
+            be_name=${be##*:}
+            if type configure_cinder_backend_${be_type} >/dev/null 2>&1; then
+                configure_cinder_backend_${be_type} ${be_name}
             fi
-            if [[ -z "$default_name" ]]; then
-                default_name=$BE_NAME
+            if [[ -z "$default_type" ]]; then
+                default_name=$be_type
             fi
-            enabled_backends+=$BE_NAME,
+            enabled_backends+=$be_name,
         done
         iniset $CINDER_CONF DEFAULT enabled_backends ${enabled_backends%,*}
         if [[ -n "$default_name" ]]; then
@@ -316,28 +318,28 @@
 # Migrated from keystone_data.sh
 function create_cinder_accounts {
 
-    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
+    local service_tenant=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    local admin_role=$(openstack role list | awk "/ admin / { print \$2 }")
 
     # Cinder
     if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then
 
-        CINDER_USER=$(get_or_create_user "cinder" \
-            "$SERVICE_PASSWORD" $SERVICE_TENANT)
-        get_or_add_user_role $ADMIN_ROLE $CINDER_USER $SERVICE_TENANT
+        local cinder_user=$(get_or_create_user "cinder" \
+            "$SERVICE_PASSWORD" $service_tenant)
+        get_or_add_user_role $admin_role $cinder_user $service_tenant
 
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
 
-            CINDER_SERVICE=$(get_or_create_service "cinder" \
+            local cinder_service=$(get_or_create_service "cinder" \
                 "volume" "Cinder Volume Service")
-            get_or_create_endpoint $CINDER_SERVICE "$REGION_NAME" \
+            get_or_create_endpoint $cinder_service "$REGION_NAME" \
                 "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \
                 "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \
                 "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s"
 
-            CINDER_V2_SERVICE=$(get_or_create_service "cinderv2" \
+            local cinder_v2_service=$(get_or_create_service "cinderv2" \
                 "volumev2" "Cinder Volume Service V2")
-            get_or_create_endpoint $CINDER_V2_SERVICE "$REGION_NAME" \
+            get_or_create_endpoint $cinder_v2_service "$REGION_NAME" \
                 "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \
                 "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \
                 "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s"
@@ -354,6 +356,7 @@
 }
 
 # init_cinder() - Initialize database and volume group
+# Uses global ``NOVA_ENABLED_APIS``
 function init_cinder {
     # Force nova volumes off
     NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/osapi_volume,//")
@@ -367,11 +370,12 @@
     fi
 
     if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
+        local be be_name be_type
         for be in ${CINDER_ENABLED_BACKENDS//,/ }; do
-            BE_TYPE=${be%%:*}
-            BE_NAME=${be##*:}
-            if type init_cinder_backend_${BE_TYPE} >/dev/null 2>&1; then
-                init_cinder_backend_${BE_TYPE} ${BE_NAME}
+            be_type=${be%%:*}
+            be_name=${be##*:}
+            if type init_cinder_backend_${be_type} >/dev/null 2>&1; then
+                init_cinder_backend_${be_type} ${be_name}
             fi
         done
     fi
@@ -450,6 +454,7 @@
 # stop_cinder() - Stop running processes
 function stop_cinder {
     # Kill the cinder screen windows
+    local serv
     for serv in c-api c-bak c-sch c-vol; do
         screen_stop $serv
     done
@@ -467,12 +472,13 @@
 function create_volume_types {
     # Create volume types
     if is_service_enabled c-api && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
+        local be be_name be_type
         for be in ${CINDER_ENABLED_BACKENDS//,/ }; do
-            BE_TYPE=${be%%:*}
-            BE_NAME=${be##*:}
-            # openstack volume type create --property volume_backend_name="${BE_TYPE}" ${BE_NAME}
-            cinder type-create ${BE_NAME} && \
-                cinder type-key ${BE_NAME} set volume_backend_name="${BE_NAME}"
+            be_type=${be%%:*}
+            be_name=${be##*:}
+            # openstack volume type create --property volume_backend_name="${be_type}" ${be_name}
+            cinder type-create ${be_name} && \
+                cinder type-key ${be_name} set volume_backend_name="${be_name}"
         done
     fi
 }
diff --git a/lib/cinder_backends/lvm b/lib/cinder_backends/lvm
index 324c323..8f8ab79 100644
--- a/lib/cinder_backends/lvm
+++ b/lib/cinder_backends/lvm
@@ -112,6 +112,7 @@
     local lv_prefix=$2
 
     # Clean out existing volumes
+    local lv
     for lv in $(sudo lvs --noheadings -o lv_name $vg 2>/dev/null); do
         # lv_prefix prefixes the LVs we want
         if [[ "${lv#$lv_prefix}" != "$lv" ]]; then
@@ -132,9 +133,9 @@
     # of the backing file
     if [[ -z "$(sudo lvs --noheadings -o lv_name $vg 2>/dev/null)" ]]; then
         # if the backing physical device is a loop device, it was probably setup by devstack
-        VG_DEV=$(sudo losetup -j $backing_file | awk -F':' '/backing-file/ { print $1}')
-        if [[ -n "$VG_DEV" ]] && [[ -e "$VG_DEV" ]]; then
-            sudo losetup -d $VG_DEV
+        local vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/backing-file/ { print $1}')
+        if [[ -n "$vg_dev" ]] && [[ -e "$vg_dev" ]]; then
+            sudo losetup -d $vg_dev
             rm -f $backing_file
         fi
     fi
@@ -159,11 +160,11 @@
         if [ -z "$VOLUME_BACKING_DEVICE" ]; then
             # Only create if the file doesn't already exists
             [[ -f $backing_file ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $backing_file
-            DEV=`sudo losetup -f --show $backing_file`
+            local vg_dev=`sudo losetup -f --show $backing_file`
 
             # Only create if the loopback device doesn't contain $VOLUME_GROUP
             if ! sudo vgs $vg_name; then
-                sudo vgcreate $vg_name $DEV
+                sudo vgcreate $vg_name $vg_dev
             fi
         else
             sudo vgcreate $vg_name $VOLUME_BACKING_DEVICE
diff --git a/lib/cinder_backends/xiv b/lib/cinder_backends/xiv
new file mode 100644
index 0000000..dbdb96c
--- /dev/null
+++ b/lib/cinder_backends/xiv
@@ -0,0 +1,84 @@
+# Copyright 2014 IBM Corp.
+# Copyright (c) 2014 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+# Authors:
+#   Alon Marx <alonma@il.ibm.com>
+
+# lib/cinder_plugins/xiv
+# Configure the xiv_ds8k driver for xiv testing
+
+# Enable xiv_ds8k driver for xiv with:
+#
+#   CINDER_ENABLED_BACKENDS+=,xiv:<volume-type-name>
+#   XIV_DRIVER_VERSION=<version-string>
+#   SAN_IP=<storage-ip-or-hostname>
+#   SAN_LOGIN=<storage-admin-account>
+#   SAN_PASSWORD=<storage-admin-password>
+#   SAN_CLUSTERNAME=<cluster-name>
+#   CONNECTION_TYPE=<connection-type> iscsi|fc
+#   XIV_CHAP=<chap-type> disabled|enabled
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``cinder`` configurations
+
+# configure_cinder_backend_xiv - Configure Cinder for xiv backends
+
+# Save trace setting
+XIV_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+# Defaults
+# --------
+# Set up default directories
+
+
+# Entry Points
+# ------------
+
+# configure_cinder_backend_xiv - Set config files, create data dirs, etc
+function configure_cinder_backend_xiv {
+
+    local be_name=$1
+
+    python -c 'from xiv_ds8k_openstack.xiv_nova_proxy import XIVNovaProxy'
+    if [ $? -ne 0 ]; then
+        die $LINENO "XIV_DS8K driver is missing. Please install first"
+    fi
+
+    # For reference:
+    # XIV_DS8K_BACKEND='IBM-XIV_'${SAN_IP}'_'${SAN_CLUSTERNAME}'_'${CONNECTION_TYPE}
+    iniset $CINDER_CONF DEFAULT xiv_ds8k_driver_version $XIV_DRIVER_VERSION
+
+    iniset $CINDER_CONF $be_name san_ip $SAN_IP
+    iniset $CINDER_CONF $be_name san_login $SAN_LOGIN
+    iniset $CINDER_CONF $be_name san_password $SAN_PASSWORD
+    iniset $CINDER_CONF $be_name san_clustername $SAN_CLUSTERNAME
+    iniset $CINDER_CONF $be_name xiv_ds8k_connection_type $CONNECTION_TYPE
+    iniset $CINDER_CONF $be_name volume_backend_name $be_name
+    iniset $CINDER_CONF $be_name volume_driver 'cinder.volume.drivers.ibm.xiv_ds8k.XIVDS8KDriver'
+    iniset $CINDER_CONF $be_name xiv_ds8k_proxy 'xiv_ds8k_openstack.xiv_nova_proxy.XIVNovaProxy'
+    iniset $CINDER_CONF $be_name xiv_chap $XIV_CHAP
+}
+
+# Restore xtrace
+$XIV_XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
+
diff --git a/lib/config b/lib/config
index 67d788c..0baa4cc 100644
--- a/lib/config
+++ b/lib/config
@@ -110,6 +110,7 @@
 
     [[ -r $localfile ]] || return 0
 
+    local configfile group
     for group in $matchgroups; do
         for configfile in $(get_meta_section_files $localfile $group); do
             if [[ -d $(dirname $(eval "echo $configfile")) ]]; then
diff --git a/lib/glance b/lib/glance
index 78e5e88..1dea6cf 100644
--- a/lib/glance
+++ b/lib/glance
@@ -169,23 +169,23 @@
 function create_glance_accounts {
     if is_service_enabled g-api; then
 
-        GLANCE_USER=$(get_or_create_user "glance" \
+        local glance_user=$(get_or_create_user "glance" \
             "$SERVICE_PASSWORD" $SERVICE_TENANT_NAME)
-        get_or_add_user_role service $GLANCE_USER $SERVICE_TENANT_NAME
+        get_or_add_user_role service $glance_user $SERVICE_TENANT_NAME
 
         # required for swift access
         if is_service_enabled s-proxy; then
 
-            GLANCE_SWIFT_USER=$(get_or_create_user "glance-swift" \
+            local glance_swift_user=$(get_or_create_user "glance-swift" \
                 "$SERVICE_PASSWORD" $SERVICE_TENANT_NAME "glance-swift@example.com")
-            get_or_add_user_role "ResellerAdmin" $GLANCE_SWIFT_USER $SERVICE_TENANT_NAME
+            get_or_add_user_role "ResellerAdmin" $glance_swift_user $SERVICE_TENANT_NAME
         fi
 
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
 
-            GLANCE_SERVICE=$(get_or_create_service "glance" \
+            local glance_service=$(get_or_create_service "glance" \
                 "image" "Glance Image Service")
-            get_or_create_endpoint $GLANCE_SERVICE \
+            get_or_create_endpoint $glance_service \
                 "$REGION_NAME" \
                 "http://$GLANCE_HOSTPORT" \
                 "http://$GLANCE_HOSTPORT" \
diff --git a/lib/horizon b/lib/horizon
index a65b243..614a0c8 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -50,7 +50,7 @@
         sed -e "/^$option/d" -i $local_settings
         echo -e "\n$option=$value" >> $file
     elif grep -q "^$section" $file; then
-        line=$(sed -ne "/^$section/,/^}/ { /^ *'$option':/ p; }" $file)
+        local line=$(sed -ne "/^$section/,/^}/ { /^ *'$option':/ p; }" $file)
         if [ -n "$line" ]; then
             sed -i -e "/^$section/,/^}/ s/^\( *'$option'\) *:.*$/\1: $value,/" $file
         else
@@ -89,7 +89,7 @@
 # init_horizon() - Initialize databases, etc.
 function init_horizon {
     # ``local_settings.py`` is used to override horizon default settings.
-    local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py
+    local local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py
     cp $HORIZON_SETTINGS $local_settings
 
     if is_service_enabled neutron; then
@@ -121,9 +121,9 @@
     sudo mkdir -p $HORIZON_DIR/.blackhole
 
     # Apache 2.4 uses mod_authz_host for access control now (instead of "Allow")
-    HORIZON_REQUIRE=''
+    local horizon_require=''
     if check_apache_version "2.4" ; then
-        HORIZON_REQUIRE='Require all granted'
+        horizon_require='Require all granted'
     fi
 
     local horizon_conf=$(apache_site_config_for horizon)
@@ -135,7 +135,7 @@
         s,%HORIZON_DIR%,$HORIZON_DIR,g;
         s,%APACHE_NAME%,$APACHE_NAME,g;
         s,%DEST%,$DEST,g;
-        s,%HORIZON_REQUIRE%,$HORIZON_REQUIRE,g;
+        s,%HORIZON_REQUIRE%,$horizon_require,g;
     \" $FILES/apache-horizon.template >$horizon_conf"
 
     if is_ubuntu; then
diff --git a/lib/ironic b/lib/ironic
index c35f4dc..469f3a3 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -439,6 +439,10 @@
 
     # Remove the port needed only for workaround.
     neutron port-delete $port_id
+
+    # Finally, share the fixed tenant network across all tenants.  This allows the host
+    # to serve TFTP to a single network namespace via the tap device created above.
+    neutron net-update $ironic_net_id --shared true
 }
 
 function create_bridge_and_vms {
diff --git a/lib/keystone b/lib/keystone
index 547646a..c6e17ca 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -328,11 +328,11 @@
 function create_keystone_accounts {
 
     # admin
-    ADMIN_TENANT=$(get_or_create_project "admin")
-    ADMIN_USER=$(get_or_create_user "admin" \
-        "$ADMIN_PASSWORD" "$ADMIN_TENANT")
-    ADMIN_ROLE=$(get_or_create_role "admin")
-    get_or_add_user_role $ADMIN_ROLE $ADMIN_USER $ADMIN_TENANT
+    local admin_tenant=$(get_or_create_project "admin")
+    local admin_user=$(get_or_create_user "admin" \
+        "$ADMIN_PASSWORD" "$admin_tenant")
+    local admin_role=$(get_or_create_role "admin")
+    get_or_add_user_role $admin_role $admin_user $admin_tenant
 
     # Create service project/role
     get_or_create_project "$SERVICE_TENANT_NAME"
@@ -347,25 +347,25 @@
     get_or_create_role ResellerAdmin
 
     # The Member role is used by Horizon and Swift so we need to keep it:
-    MEMBER_ROLE=$(get_or_create_role "Member")
+    local member_role=$(get_or_create_role "Member")
 
     # ANOTHER_ROLE demonstrates that an arbitrary role may be created and used
     # TODO(sleepsonthefloor): show how this can be used for rbac in the future!
 
-    ANOTHER_ROLE=$(get_or_create_role "anotherrole")
+    local another_role=$(get_or_create_role "anotherrole")
 
     # invisible tenant - admin can't see this one
-    INVIS_TENANT=$(get_or_create_project "invisible_to_admin")
+    local invis_tenant=$(get_or_create_project "invisible_to_admin")
 
     # demo
-    DEMO_TENANT=$(get_or_create_project "demo")
-    DEMO_USER=$(get_or_create_user "demo" \
-        "$ADMIN_PASSWORD" "$DEMO_TENANT" "demo@example.com")
+    local demo_tenant=$(get_or_create_project "demo")
+    local demo_user=$(get_or_create_user "demo" \
+        "$ADMIN_PASSWORD" "$demo_tenant" "demo@example.com")
 
-    get_or_add_user_role $MEMBER_ROLE $DEMO_USER $DEMO_TENANT
-    get_or_add_user_role $ADMIN_ROLE $ADMIN_USER $DEMO_TENANT
-    get_or_add_user_role $ANOTHER_ROLE $DEMO_USER $DEMO_TENANT
-    get_or_add_user_role $MEMBER_ROLE $DEMO_USER $INVIS_TENANT
+    get_or_add_user_role $member_role $demo_user $demo_tenant
+    get_or_add_user_role $admin_role $admin_user $demo_tenant
+    get_or_add_user_role $another_role $demo_user $demo_tenant
+    get_or_add_user_role $member_role $demo_user $invis_tenant
 
     # Keystone
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
@@ -467,7 +467,7 @@
 
     if [ "$KEYSTONE_USE_MOD_WSGI" == "True" ]; then
         restart_apache_server
-        screen_it key "cd $KEYSTONE_DIR && sudo tail -f /var/log/$APACHE_NAME/keystone"
+        screen_it key "cd $KEYSTONE_DIR && sudo tail -f /var/log/$APACHE_NAME/keystone.log"
     else
         # Start Keystone in a screen window
         screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF --debug"
diff --git a/lib/neutron b/lib/neutron
index f703bec..a00664e 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -85,6 +85,20 @@
 NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf
 export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"}
 
+# Agent binaries.  Note, binary paths for other agents are set in per-service
+# scripts in lib/neutron_plugins/services/
+AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent"
+AGENT_L3_BINARY=${AGENT_L3_BINARY:-"$NEUTRON_BIN_DIR/neutron-l3-agent"}
+AGENT_META_BINARY="$NEUTRON_BIN_DIR/neutron-metadata-agent"
+
+# Agent config files. Note, plugin-specific Q_PLUGIN_CONF_FILE is set and
+# loaded from per-plugin  scripts in lib/neutron_plugins/
+Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini
+Q_L3_CONF_FILE=$NEUTRON_CONF_DIR/l3_agent.ini
+Q_FWAAS_CONF_FILE=$NEUTRON_CONF_DIR/fwaas_driver.ini
+Q_VPN_CONF_FILE=$NEUTRON_CONF_DIR/vpn_agent.ini
+Q_META_CONF_FILE=$NEUTRON_CONF_DIR/metadata_agent.ini
+
 # Default name for Neutron database
 Q_DB_NAME=${Q_DB_NAME:-neutron}
 # Default Neutron Plugin
@@ -290,6 +304,51 @@
 # Functions
 # ---------
 
+function _determine_config_server {
+    local cfg_file
+    local opts="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE"
+    for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do
+        opts+=" --config-file /$cfg_file"
+    done
+    echo "$opts"
+}
+
+function _determine_config_vpn {
+    local cfg_file
+    local opts="--config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE --config-file=$Q_VPN_CONF_FILE"
+    if is_service_enabled q-fwaas; then
+        opts+=" --config-file $Q_FWAAS_CONF_FILE"
+    fi
+    for cfg_file in ${Q_VPN_EXTRA_CONF_FILES[@]}; do
+        opts+=" --config-file $cfg_file"
+    done
+    echo "$opts"
+
+}
+
+function _determine_config_l3 {
+    local opts="--config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE"
+    if is_service_enabled q-fwaas; then
+        opts+=" --config-file $Q_FWAAS_CONF_FILE"
+    fi
+    echo "$opts"
+}
+
+# For services and agents that require it, dynamically construct a list of
+# --config-file arguments that are passed to the binary.
+function determine_config_files {
+    local opts=""
+    case "$1" in
+        "neutron-server") opts="$(_determine_config_server)" ;;
+        "neutron-vpn-agent") opts="$(_determine_config_vpn)" ;;
+        "neutron-l3-agent") opts="$(_determine_config_l3)" ;;
+    esac
+    if [ -z "$opts" ] ; then
+        die $LINENO "Could not determine config files for $1."
+    fi
+    echo "$opts"
+}
+
 # Test if any Neutron services are enabled
 # is_neutron_enabled
 function is_neutron_enabled {
@@ -364,7 +423,7 @@
     iniset $NOVA_CONF libvirt vif_driver "$NOVA_VIF_DRIVER"
     iniset $NOVA_CONF DEFAULT linuxnet_interface_driver "$LINUXNET_VIF_DRIVER"
     if is_service_enabled q-meta; then
-        iniset $NOVA_CONF DEFAULT service_neutron_metadata_proxy "True"
+        iniset $NOVA_CONF neutron service_metadata_proxy "True"
     fi
 
     iniset $NOVA_CONF DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL"
@@ -530,14 +589,9 @@
 
 # Start running processes, including screen
 function start_neutron_service_and_check {
-    # build config-file options
-    local cfg_file
-    local CFG_FILE_OPTIONS="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE"
-    for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do
-        CFG_FILE_OPTIONS+=" --config-file /$cfg_file"
-    done
+    local cfg_file_options="$(determine_config_files neutron-server)"
     # Start the Neutron service
-    screen_it q-svc "cd $NEUTRON_DIR && python $NEUTRON_BIN_DIR/neutron-server $CFG_FILE_OPTIONS"
+    screen_it q-svc "cd $NEUTRON_DIR && python $NEUTRON_BIN_DIR/neutron-server $cfg_file_options"
     echo "Waiting for Neutron to start..."
     if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- http://$Q_HOST:$Q_PORT; do sleep 1; done"; then
         die $LINENO "Neutron did not start"
@@ -550,8 +604,6 @@
     screen_it q-agt "cd $NEUTRON_DIR && python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE"
     screen_it q-dhcp "cd $NEUTRON_DIR && python $AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE"
 
-    L3_CONF_FILES="--config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE"
-
     if is_provider_network; then
         sudo ovs-vsctl add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE
         sudo ip link set $OVS_PHYSICAL_BRIDGE up
@@ -559,14 +611,10 @@
         sudo ip link set $PUBLIC_INTERFACE up
     fi
 
-    if is_service_enabled q-fwaas; then
-        L3_CONF_FILES="$L3_CONF_FILES --config-file $Q_FWAAS_CONF_FILE"
-        VPN_CONF_FILES="$VPN_CONF_FILES --config-file $Q_FWAAS_CONF_FILE"
-    fi
     if is_service_enabled q-vpn; then
-        screen_it q-vpn "cd $NEUTRON_DIR && $AGENT_VPN_BINARY $VPN_CONF_FILES"
+        screen_it q-vpn "cd $NEUTRON_DIR && $AGENT_VPN_BINARY $(determine_config_files neutron-vpn-agent)"
     else
-        screen_it q-l3 "cd $NEUTRON_DIR && python $AGENT_L3_BINARY $L3_CONF_FILES"
+        screen_it q-l3 "cd $NEUTRON_DIR && python $AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)"
     fi
 
     screen_it q-meta "cd $NEUTRON_DIR && python $AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file=$Q_META_CONF_FILE"
@@ -703,8 +751,6 @@
 }
 
 function _configure_neutron_dhcp_agent {
-    AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent"
-    Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini
 
     cp $NEUTRON_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE
 
@@ -724,20 +770,8 @@
     # for l3-agent, only use per tenant router if we have namespaces
     Q_L3_ROUTER_PER_TENANT=$Q_USE_NAMESPACE
 
-    AGENT_L3_BINARY=${AGENT_L3_BINARY:-"$NEUTRON_BIN_DIR/neutron-l3-agent"}
-    Q_L3_CONF_FILE=$NEUTRON_CONF_DIR/l3_agent.ini
-
-    if is_service_enabled q-fwaas; then
-        Q_FWAAS_CONF_FILE=$NEUTRON_CONF_DIR/fwaas_driver.ini
-    fi
-
     if is_service_enabled q-vpn; then
-        Q_VPN_CONF_FILE=$NEUTRON_CONF_DIR/vpn_agent.ini
         cp $NEUTRON_DIR/etc/vpn_agent.ini $Q_VPN_CONF_FILE
-        VPN_CONF_FILES="--config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE --config-file=$Q_VPN_CONF_FILE"
-        for cfg_file in ${Q_VPN_EXTRA_CONF_FILES[@]}; do
-            VPN_CONF_FILES+=" --config-file $cfg_file"
-        done
     fi
 
     cp $NEUTRON_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE
@@ -753,9 +787,6 @@
 }
 
 function _configure_neutron_metadata_agent {
-    AGENT_META_BINARY="$NEUTRON_BIN_DIR/neutron-metadata-agent"
-    Q_META_CONF_FILE=$NEUTRON_CONF_DIR/metadata_agent.ini
-
     cp $NEUTRON_DIR/etc/metadata_agent.ini $Q_META_CONF_FILE
 
     iniset $Q_META_CONF_FILE DEFAULT verbose True
diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent
index 3fc37de..835f645 100644
--- a/lib/neutron_plugins/openvswitch_agent
+++ b/lib/neutron_plugins/openvswitch_agent
@@ -67,7 +67,7 @@
 
     if [ "$VIRT_DRIVER" == 'xenserver' ]; then
         # Make a copy of our config for domU
-        sudo cp /$Q_PLUGIN_CONF_FILE "/$Q_PLUGIN_CONF_FILE.domu"
+        sudo cp /$Q_PLUGIN_CONF_FILE "/$Q_PLUGIN_CONF_FILE.domU"
 
         # Deal with Dom0's L2 Agent:
         Q_RR_DOM0_COMMAND="$NEUTRON_BIN_DIR/neutron-rootwrap-xen-dom0 $Q_RR_CONF_FILE"
diff --git a/lib/nova b/lib/nova
index 6b1afd9..892aace 100644
--- a/lib/nova
+++ b/lib/nova
@@ -59,10 +59,6 @@
 # Set the paths of certain binaries
 NOVA_ROOTWRAP=$(get_rootwrap_location nova)
 
-# Allow rate limiting to be turned off for testing, like for Tempest
-# NOTE: Set API_RATE_LIMIT="False" to turn OFF rate limiting
-API_RATE_LIMIT=${API_RATE_LIMIT:-"True"}
-
 # Option to enable/disable config drive
 # NOTE: Set FORCE_CONFIG_DRIVE="False" to turn OFF config drive
 FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"always"}
@@ -461,9 +457,6 @@
     if [ "$SYSLOG" != "False" ]; then
         iniset $NOVA_CONF DEFAULT use_syslog "True"
     fi
-    if [ "$API_RATE_LIMIT" != "True" ]; then
-        iniset $NOVA_CONF DEFAULT api_rate_limit "False"
-    fi
     if [ "$FORCE_CONFIG_DRIVE" != "False" ]; then
         iniset $NOVA_CONF DEFAULT force_config_drive "$FORCE_CONFIG_DRIVE"
     fi
diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt
index 6fb5c38..258e1a4 100644
--- a/lib/nova_plugins/functions-libvirt
+++ b/lib/nova_plugins/functions-libvirt
@@ -37,7 +37,7 @@
     # and HP images used in the gate; rackspace has firewalld but hp
     # cloud doesn't.  RHEL6 doesn't have firewalld either.  So we
     # don't care if it fails.
-    if is_fedora; then
+    if is_fedora && is_package_installed firewalld; then
         sudo service firewalld restart || true
     fi
 }
diff --git a/lib/sahara b/lib/sahara
index 70feacd..37876dc 100644
--- a/lib/sahara
+++ b/lib/sahara
@@ -57,18 +57,18 @@
 # service     sahara    admin
 function create_sahara_accounts {
 
-    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
+    local service_tenant=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    local admin_role=$(openstack role list | awk "/ admin / { print \$2 }")
 
-    SAHARA_USER=$(get_or_create_user "sahara" \
-        "$SERVICE_PASSWORD" $SERVICE_TENANT)
-    get_or_add_user_role $ADMIN_ROLE $SAHARA_USER $SERVICE_TENANT
+    local sahara_user=$(get_or_create_user "sahara" \
+        "$SERVICE_PASSWORD" $service_tenant)
+    get_or_add_user_role $admin_role $sahara_user $service_tenant
 
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
 
-        SAHARA_SERVICE=$(get_or_create_service "sahara" \
+        local sahara_service=$(get_or_create_service "sahara" \
             "data_processing" "Sahara Data Processing")
-        get_or_create_endpoint $SAHARA_SERVICE \
+        get_or_create_endpoint $sahara_service \
             "$REGION_NAME" \
             "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
             "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
@@ -111,6 +111,14 @@
     iniset $SAHARA_CONF_FILE keystone_authtoken signing_dir $SAHARA_AUTH_CACHE_DIR
     iniset $SAHARA_CONF_FILE keystone_authtoken cafile $KEYSTONE_SSL_CA
 
+    # Set configuration to send notifications
+
+    if is_service_enabled ceilometer; then
+        iniset $SAHARA_CONF_FILE DEFAULT enable_notifications "true"
+        iniset $SAHARA_CONF_FILE DEFAULT notification_driver "messaging"
+        iniset_rpc_backend sahara $SAHARA_CONF_FILE DEFAULT
+    fi
+
     iniset $SAHARA_CONF_FILE DEFAULT verbose True
     iniset $SAHARA_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
 
diff --git a/lib/stackforge b/lib/stackforge
index b744318..2d80dad 100644
--- a/lib/stackforge
+++ b/lib/stackforge
@@ -29,6 +29,7 @@
 # --------
 WSME_DIR=$DEST/wsme
 PECAN_DIR=$DEST/pecan
+SQLALCHEMY_MIGRATE_DIR=$DEST/sqlalchemy-migrate
 
 # Entry Points
 # ------------
@@ -40,6 +41,9 @@
 
     git_clone $PECAN_REPO $PECAN_DIR $PECAN_BRANCH
     setup_package $PECAN_DIR
+
+    git_clone $SQLALCHEMY_MIGRATE_REPO $SQLALCHEMY_MIGRATE_DIR $SQLALCHEMY_MIGRATE_BRANCH
+    setup_package $SQLALCHEMY_MIGRATE_DIR
 }
 
 # Restore xtrace
diff --git a/lib/swift b/lib/swift
index d8e8f23..6b96348 100644
--- a/lib/swift
+++ b/lib/swift
@@ -154,9 +154,10 @@
 function _cleanup_swift_apache_wsgi {
     sudo rm -f $SWIFT_APACHE_WSGI_DIR/*.wsgi
     disable_apache_site proxy-server
+    local node_number type
     for node_number in ${SWIFT_REPLICAS_SEQ}; do
         for type in object container account; do
-            site_name=${type}-server-${node_number}
+            local site_name=${type}-server-${node_number}
             disable_apache_site ${site_name}
             sudo rm -f $(apache_site_config_for ${site_name})
         done
@@ -186,10 +187,11 @@
     " -i ${SWIFT_APACHE_WSGI_DIR}/proxy-server.wsgi
 
     # copy apache vhost file and set name and port
+    local node_number
     for node_number in ${SWIFT_REPLICAS_SEQ}; do
-        object_port=$[OBJECT_PORT_BASE + 10 * ($node_number - 1)]
-        container_port=$[CONTAINER_PORT_BASE + 10 * ($node_number - 1)]
-        account_port=$[ACCOUNT_PORT_BASE + 10 * ($node_number - 1)]
+        local object_port=$[OBJECT_PORT_BASE + 10 * ($node_number - 1)]
+        local container_port=$[CONTAINER_PORT_BASE + 10 * ($node_number - 1)]
+        local account_port=$[ACCOUNT_PORT_BASE + 10 * ($node_number - 1)]
 
         sudo cp ${SWIFT_DIR}/examples/apache2/object-server.template $(apache_site_config_for object-server-${node_number})
         sudo sed -e "
@@ -249,7 +251,7 @@
     local server_type=$4
 
     log_facility=$[ node_id - 1 ]
-    node_path=${SWIFT_DATA_DIR}/${node_number}
+    local node_path=${SWIFT_DATA_DIR}/${node_number}
 
     iniuncomment ${swift_node_config} DEFAULT user
     iniset ${swift_node_config} DEFAULT user ${STACK_USER}
@@ -420,8 +422,9 @@
     iniset ${SWIFT_CONF_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH}
     iniset ${SWIFT_CONF_DIR}/swift.conf swift-constraints max_header_size ${SWIFT_MAX_HEADER_SIZE}
 
+    local node_number
     for node_number in ${SWIFT_REPLICAS_SEQ}; do
-        swift_node_config=${SWIFT_CONF_DIR}/object-server/${node_number}.conf
+        local swift_node_config=${SWIFT_CONF_DIR}/object-server/${node_number}.conf
         cp ${SWIFT_DIR}/etc/object-server.conf-sample ${swift_node_config}
         generate_swift_config ${swift_node_config} ${node_number} $[OBJECT_PORT_BASE + 10 * (node_number - 1)] object
         iniset ${swift_node_config} filter:recon recon_cache_path  ${SWIFT_DATA_DIR}/cache
@@ -464,7 +467,7 @@
         iniset ${testfile} func_test auth_prefix /v2.0/
     fi
 
-    swift_log_dir=${SWIFT_DATA_DIR}/logs
+    local swift_log_dir=${SWIFT_DATA_DIR}/logs
     rm -rf ${swift_log_dir}
     mkdir -p ${swift_log_dir}/hourly
     sudo chown -R ${STACK_USER}:adm ${swift_log_dir}
@@ -488,9 +491,9 @@
     # First do a bit of setup by creating the directories and
     # changing the permissions so we can run it as our user.
 
-    USER_GROUP=$(id -g ${STACK_USER})
+    local user_group=$(id -g ${STACK_USER})
     sudo mkdir -p ${SWIFT_DATA_DIR}/{drives,cache,run,logs}
-    sudo chown -R ${STACK_USER}:${USER_GROUP} ${SWIFT_DATA_DIR}
+    sudo chown -R ${STACK_USER}:${user_group} ${SWIFT_DATA_DIR}
 
     # Create a loopback disk and format it to XFS.
     if [[ -e ${SWIFT_DISK_IMAGE} ]]; then
@@ -518,15 +521,16 @@
 
     # Create a link to the above mount and
     # create all of the directories needed to emulate a few different servers
+    local node_number
     for node_number in ${SWIFT_REPLICAS_SEQ}; do
         sudo ln -sf ${SWIFT_DATA_DIR}/drives/sdb1/$node_number ${SWIFT_DATA_DIR}/$node_number;
-        drive=${SWIFT_DATA_DIR}/drives/sdb1/${node_number}
-        node=${SWIFT_DATA_DIR}/${node_number}/node
-        node_device=${node}/sdb1
+        local drive=${SWIFT_DATA_DIR}/drives/sdb1/${node_number}
+        local node=${SWIFT_DATA_DIR}/${node_number}/node
+        local node_device=${node}/sdb1
         [[ -d $node ]] && continue
         [[ -d $drive ]] && continue
-        sudo install -o ${STACK_USER} -g $USER_GROUP -d $drive
-        sudo install -o ${STACK_USER} -g $USER_GROUP -d $node_device
+        sudo install -o ${STACK_USER} -g $user_group -d $drive
+        sudo install -o ${STACK_USER} -g $user_group -d $node_device
         sudo chown -R ${STACK_USER}: ${node}
     done
 }
@@ -544,49 +548,49 @@
 
 function create_swift_accounts {
     # Defines specific passwords used by tools/create_userrc.sh
-    SWIFTUSERTEST1_PASSWORD=testing
-    SWIFTUSERTEST2_PASSWORD=testing2
-    SWIFTUSERTEST3_PASSWORD=testing3
+    local swiftusertest1_password=testing
+    local swiftusertest2_password=testing2
+    local swiftusertest3_password=testing3
 
     KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-sql}
 
-    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
+    local service_tenant=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    local admin_role=$(openstack role list | awk "/ admin / { print \$2 }")
 
-    SWIFT_USER=$(get_or_create_user "swift" \
-        "$SERVICE_PASSWORD" $SERVICE_TENANT)
-    get_or_add_user_role $ADMIN_ROLE $SWIFT_USER $SERVICE_TENANT
+    local swift_user=$(get_or_create_user "swift" \
+        "$SERVICE_PASSWORD" $service_tenant)
+    get_or_add_user_role $admin_role $swift_user $service_tenant
 
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
 
-        SWIFT_SERVICE=$(get_or_create_service "swift" \
+        local swift_service=$(get_or_create_service "swift" \
             "object-store" "Swift Service")
-        get_or_create_endpoint $SWIFT_SERVICE \
+        get_or_create_endpoint $swift_service \
             "$REGION_NAME" \
             "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" \
             "http://$SERVICE_HOST:8080" \
             "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s"
     fi
 
-    SWIFT_TENANT_TEST1=$(get_or_create_project swifttenanttest1)
-    die_if_not_set $LINENO SWIFT_TENANT_TEST1 "Failure creating SWIFT_TENANT_TEST1"
-    SWIFT_USER_TEST1=$(get_or_create_user swiftusertest1 $SWIFTUSERTEST1_PASSWORD \
-        "$SWIFT_TENANT_TEST1" "test@example.com")
+    local swift_tenant_test1=$(get_or_create_project swifttenanttest1)
+    die_if_not_set $LINENO swift_tenant_test1 "Failure creating swift_tenant_test1"
+    SWIFT_USER_TEST1=$(get_or_create_user swiftusertest1 $swiftusertest1_password \
+        "$swift_tenant_test1" "test@example.com")
     die_if_not_set $LINENO SWIFT_USER_TEST1 "Failure creating SWIFT_USER_TEST1"
-    get_or_add_user_role $ADMIN_ROLE $SWIFT_USER_TEST1 $SWIFT_TENANT_TEST1
+    get_or_add_user_role $admin_role $SWIFT_USER_TEST1 $swift_tenant_test1
 
-    SWIFT_USER_TEST3=$(get_or_create_user swiftusertest3 $SWIFTUSERTEST3_PASSWORD \
-        "$SWIFT_TENANT_TEST1" "test3@example.com")
-    die_if_not_set $LINENO SWIFT_USER_TEST3 "Failure creating SWIFT_USER_TEST3"
-    get_or_add_user_role $ANOTHER_ROLE $SWIFT_USER_TEST3 $SWIFT_TENANT_TEST1
+    local swift_user_test3=$(get_or_create_user swiftusertest3 $swiftusertest3_password \
+        "$swift_tenant_test1" "test3@example.com")
+    die_if_not_set $LINENO swift_user_test3 "Failure creating swift_user_test3"
+    get_or_add_user_role $ANOTHER_ROLE $swift_user_test3 $swift_tenant_test1
 
-    SWIFT_TENANT_TEST2=$(get_or_create_project swifttenanttest2)
-    die_if_not_set $LINENO SWIFT_TENANT_TEST2 "Failure creating SWIFT_TENANT_TEST2"
+    local swift_tenant_test2=$(get_or_create_project swifttenanttest2)
+    die_if_not_set $LINENO swift_tenant_test2 "Failure creating swift_tenant_test2"
 
-    SWIFT_USER_TEST2=$(get_or_create_user swiftusertest2 $SWIFTUSERTEST2_PASSWORD \
-        "$SWIFT_TENANT_TEST2" "test2@example.com")
-    die_if_not_set $LINENO SWIFT_USER_TEST2 "Failure creating SWIFT_USER_TEST2"
-    get_or_add_user_role $ADMIN_ROLE $SWIFT_USER_TEST2 $SWIFT_TENANT_TEST2
+    local swift_user_test2=$(get_or_create_user swiftusertest2 $swiftusertest2_password \
+        "$swift_tenant_test2" "test2@example.com")
+    die_if_not_set $LINENO swift_user_test2 "Failure creating swift_user_test2"
+    get_or_add_user_role $admin_role $swift_user_test2 $swift_tenant_test2
 }
 
 # init_swift() - Initialize rings
@@ -670,6 +674,7 @@
     # service so we can run it in foreground in screen.  ``swift-init ...
     # {stop|restart}`` exits with '1' if no servers are running, ignore it just
     # in case
+    local todo type
     swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true
     if [[ ${SWIFT_REPLICAS} == 1 ]]; then
         todo="object container account"
@@ -691,6 +696,7 @@
 
 # stop_swift() - Stop running processes (non-screen)
 function stop_swift {
+    local type
 
     if [ "$SWIFT_USE_MOD_WSGI" == "True" ]; then
         swift-init --run-dir=${SWIFT_DATA_DIR}/run rest stop && return 0
diff --git a/lib/tempest b/lib/tempest
index 681da1e..2e8aa3e 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -112,6 +112,8 @@
             image_uuid_alt="$IMAGE_UUID"
         fi
         images+=($IMAGE_UUID)
+    # TODO(stevemar): update this command to use openstackclient's `openstack image list`
+    # when it supports listing by status.
     done < <(glance image-list --status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }')
 
     case "${#images[*]}" in
@@ -384,6 +386,7 @@
         iniset $TEMPEST_CONFIG compute-feature-enabled resize False
         iniset $TEMPEST_CONFIG compute-feature-enabled shelve False
         iniset $TEMPEST_CONFIG compute-feature-enabled snapshot False
+        iniset $TEMPEST_CONFIG compute-feature-enabled suspend False
     fi
 
     # service_available
diff --git a/lib/tls b/lib/tls
index e58e513..061c1ca 100644
--- a/lib/tls
+++ b/lib/tls
@@ -84,6 +84,7 @@
         return 0
     fi
 
+    local i
     for i in certs crl newcerts private; do
         mkdir -p $ca_dir/$i
     done
@@ -234,31 +235,34 @@
     local common_name=$3
     local alt_names=$4
 
-    # Generate a signing request
-    $OPENSSL req \
-        -sha1 \
-        -newkey rsa \
-        -nodes \
-        -keyout $ca_dir/private/$cert_name.key \
-        -out $ca_dir/$cert_name.csr \
-        -subj "/O=${ORG_NAME}/OU=${ORG_UNIT_NAME} Servers/CN=${common_name}"
+    # Only generate the certificate if it doesn't exist yet on the disk
+    if [ ! -r "$ca_dir/$cert_name.crt" ]; then
+        # Generate a signing request
+        $OPENSSL req \
+            -sha1 \
+            -newkey rsa \
+            -nodes \
+            -keyout $ca_dir/private/$cert_name.key \
+            -out $ca_dir/$cert_name.csr \
+            -subj "/O=${ORG_NAME}/OU=${ORG_UNIT_NAME} Servers/CN=${common_name}"
 
-    if [[ -z "$alt_names" ]]; then
-        alt_names="DNS:${common_name}"
-    else
-        alt_names="DNS:${common_name},${alt_names}"
+        if [[ -z "$alt_names" ]]; then
+            alt_names="DNS:${common_name}"
+        else
+            alt_names="DNS:${common_name},${alt_names}"
+        fi
+
+        # Sign the request valid for 1 year
+        SUBJECT_ALT_NAME="$alt_names" \
+        $OPENSSL ca -config $ca_dir/signing.conf \
+            -extensions req_extensions \
+            -days 365 \
+            -notext \
+            -in $ca_dir/$cert_name.csr \
+            -out $ca_dir/$cert_name.crt \
+            -subj "/O=${ORG_NAME}/OU=${ORG_UNIT_NAME} Servers/CN=${common_name}" \
+            -batch
     fi
-
-    # Sign the request valid for 1 year
-    SUBJECT_ALT_NAME="$alt_names" \
-    $OPENSSL ca -config $ca_dir/signing.conf \
-        -extensions req_extensions \
-        -days 365 \
-        -notext \
-        -in $ca_dir/$cert_name.csr \
-        -out $ca_dir/$cert_name.crt \
-        -subj "/O=${ORG_NAME}/OU=${ORG_UNIT_NAME} Servers/CN=${common_name}" \
-        -batch
 }
 
 
@@ -273,23 +277,25 @@
     create_CA_config $ca_dir 'Intermediate CA'
     create_signing_config $ca_dir
 
-    # Create a signing certificate request
-    $OPENSSL req -config $ca_dir/ca.conf \
-        -sha1 \
-        -newkey rsa \
-        -nodes \
-        -keyout $ca_dir/private/cacert.key \
-        -out $ca_dir/cacert.csr \
-        -outform PEM
+    if [ ! -r "$ca_dir/cacert.pem" ]; then
+        # Create a signing certificate request
+        $OPENSSL req -config $ca_dir/ca.conf \
+            -sha1 \
+            -newkey rsa \
+            -nodes \
+            -keyout $ca_dir/private/cacert.key \
+            -out $ca_dir/cacert.csr \
+            -outform PEM
 
-    # Sign the intermediate request valid for 1 year
-    $OPENSSL ca -config $signing_ca_dir/ca.conf \
-        -extensions ca_extensions \
-        -days 365 \
-        -notext \
-        -in $ca_dir/cacert.csr \
-        -out $ca_dir/cacert.pem \
-        -batch
+        # Sign the intermediate request valid for 1 year
+        $OPENSSL ca -config $signing_ca_dir/ca.conf \
+            -extensions ca_extensions \
+            -days 365 \
+            -notext \
+            -in $ca_dir/cacert.csr \
+            -out $ca_dir/cacert.pem \
+            -batch
+    fi
 }
 
 # Make a root CA to sign other CAs
diff --git a/lib/trove b/lib/trove
index 6877d0f..aa9442b 100644
--- a/lib/trove
+++ b/lib/trove
@@ -76,21 +76,20 @@
 # service              trove     admin        # if enabled
 
 function create_trove_accounts {
-    # Trove
-    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    SERVICE_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
+    local service_tenant=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    local service_role=$(openstack role list | awk "/ admin / { print \$2 }")
 
     if [[ "$ENABLED_SERVICES" =~ "trove" ]]; then
 
-        TROVE_USER=$(get_or_create_user "trove" \
-            "$SERVICE_PASSWORD" $SERVICE_TENANT)
-        get_or_add_user_role $SERVICE_ROLE $TROVE_USER $SERVICE_TENANT
+        local trove_user=$(get_or_create_user "trove" \
+            "$SERVICE_PASSWORD" $service_tenant)
+        get_or_add_user_role $service_role $trove_user $service_tenant
 
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
 
-            TROVE_SERVICE=$(get_or_create_service "trove" \
+            local trove_service=$(get_or_create_service "trove" \
                 "database" "Trove Service")
-            get_or_create_endpoint $TROVE_SERVICE \
+            get_or_create_endpoint $trove_service \
                 "$REGION_NAME" \
                 "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \
                 "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \
@@ -211,7 +210,7 @@
     # The image is uploaded by stack.sh -- see $IMAGE_URLS handling
     GUEST_IMAGE_NAME=$(basename "$TROVE_GUEST_IMAGE_URL")
     GUEST_IMAGE_NAME=${GUEST_IMAGE_NAME%.*}
-    TROVE_GUEST_IMAGE_ID=$(glance --os-auth-token $TOKEN --os-image-url http://$GLANCE_HOSTPORT image-list | grep "${GUEST_IMAGE_NAME}" | get_field 1)
+    TROVE_GUEST_IMAGE_ID=$(openstack --os-token $TOKEN --os-url http://$GLANCE_HOSTPORT image list | grep "${GUEST_IMAGE_NAME}" | get_field 1)
     if [ -z "$TROVE_GUEST_IMAGE_ID" ]; then
         # If no glance id is found, skip remaining setup
         echo "Datastore ${TROVE_DATASTORE_TYPE} will not be created: guest image ${GUEST_IMAGE_NAME} not found."
@@ -237,6 +236,7 @@
 # stop_trove() - Stop running processes
 function stop_trove {
     # Kill the trove screen windows
+    local serv
     for serv in tr-api tr-tmgr tr-cond; do
         screen_stop $serv
     done
diff --git a/stack.sh b/stack.sh
index 71c661d..6c4bde7 100755
--- a/stack.sh
+++ b/stack.sh
@@ -37,6 +37,56 @@
 # Keep track of the devstack directory
 TOP_DIR=$(cd $(dirname "$0") && pwd)
 
+
+# Sanity Checks
+# -------------
+
+# Clean up last environment var cache
+if [[ -r $TOP_DIR/.stackenv ]]; then
+    rm $TOP_DIR/.stackenv
+fi
+
+# ``stack.sh`` keeps the list of ``apt`` and ``rpm`` dependencies and config
+# templates and other useful files in the ``files`` subdirectory
+FILES=$TOP_DIR/files
+if [ ! -d $FILES ]; then
+    die $LINENO "missing devstack/files"
+fi
+
+# ``stack.sh`` keeps function libraries here
+# Make sure ``$TOP_DIR/lib`` directory is present
+if [ ! -d $TOP_DIR/lib ]; then
+    die $LINENO "missing devstack/lib"
+fi
+
+# Check if run as root
+# OpenStack is designed to be run as a non-root user; Horizon will fail to run
+# as **root** since Apache will not serve content from **root** user).
+# ``stack.sh`` must not be run as **root**.  It aborts and suggests one course of
+# action to create a suitable user account.
+
+if [[ $EUID -eq 0 ]]; then
+    echo "You are running this script as root."
+    echo "Cut it out."
+    echo "Really."
+    echo "If you need an account to run DevStack, do this (as root, heh) to create $STACK_USER:"
+    echo "$TOP_DIR/tools/create-stack-user.sh"
+    exit 1
+fi
+
+# Check to see if we are already running DevStack
+# Note that this may fail if USE_SCREEN=False
+if type -p screen >/dev/null && screen -ls | egrep -q "[0-9].$SCREEN_NAME"; then
+    echo "You are already running a stack.sh session."
+    echo "To rejoin this session type 'screen -x stack'."
+    echo "To destroy this session, type './unstack.sh'."
+    exit 1
+fi
+
+
+# Prepare the environment
+# -----------------------
+
 # Import common functions
 source $TOP_DIR/functions
 
@@ -48,9 +98,18 @@
 # and ``DISTRO``
 GetDistro
 
+# Warn users who aren't on an explicitly supported distro, but allow them to
+# override check and attempt installation with ``FORCE=yes ./stack``
+if [[ ! ${DISTRO} =~ (precise|trusty|7.0|wheezy|sid|testing|jessie|f19|f20|rhel6|rhel7) ]]; then
+    echo "WARNING: this script has not been tested on $DISTRO"
+    if [[ "$FORCE" != "yes" ]]; then
+        die $LINENO "If you wish to run this script anyway run with FORCE=yes"
+    fi
+fi
+
 
 # Global Settings
-# ===============
+# ---------------
 
 # Check for a ``localrc`` section embedded in ``local.conf`` and extract if
 # ``localrc`` does not already exist
@@ -106,49 +165,11 @@
 # Make sure the proxy config is visible to sub-processes
 export_proxy_variables
 
-# Destination path for installation ``DEST``
-DEST=${DEST:-/opt/stack}
-
-
-# Sanity Check
-# ------------
-
-# Clean up last environment var cache
-if [[ -r $TOP_DIR/.stackenv ]]; then
-    rm $TOP_DIR/.stackenv
-fi
-
-# ``stack.sh`` keeps the list of ``apt`` and ``rpm`` dependencies and config
-# templates and other useful files in the ``files`` subdirectory
-FILES=$TOP_DIR/files
-if [ ! -d $FILES ]; then
-    die $LINENO "missing devstack/files"
-fi
-
-# ``stack.sh`` keeps function libraries here
-# Make sure ``$TOP_DIR/lib`` directory is present
-if [ ! -d $TOP_DIR/lib ]; then
-    die $LINENO "missing devstack/lib"
-fi
-
-# Import common services (database, message queue) configuration
-source $TOP_DIR/lib/database
-source $TOP_DIR/lib/rpc_backend
-
 # Remove services which were negated in ENABLED_SERVICES
 # using the "-" prefix (e.g., "-rabbit") instead of
 # calling disable_service().
 disable_negated_services
 
-# Warn users who aren't on an explicitly supported distro, but allow them to
-# override check and attempt installation with ``FORCE=yes ./stack``
-if [[ ! ${DISTRO} =~ (precise|trusty|7.0|wheezy|sid|testing|jessie|f19|f20|rhel6|rhel7) ]]; then
-    echo "WARNING: this script has not been tested on $DISTRO"
-    if [[ "$FORCE" != "yes" ]]; then
-        die $LINENO "If you wish to run this script anyway run with FORCE=yes"
-    fi
-fi
-
 # Look for obsolete stuff
 if [[ ,${ENABLED_SERVICES}, =~ ,"swift", ]]; then
     echo "FATAL: 'swift' is not supported as a service name"
@@ -157,38 +178,11 @@
     exit 1
 fi
 
-# Make sure we only have one rpc backend enabled,
-# and the specified rpc backend is available on your platform.
-check_rpc_backend
-
-# Check to see if we are already running DevStack
-# Note that this may fail if USE_SCREEN=False
-if type -p screen >/dev/null && screen -ls | egrep -q "[0-9].$SCREEN_NAME"; then
-    echo "You are already running a stack.sh session."
-    echo "To rejoin this session type 'screen -x stack'."
-    echo "To destroy this session, type './unstack.sh'."
-    exit 1
-fi
-
 # Set up logging level
 VERBOSE=$(trueorfalse True $VERBOSE)
 
-# root Access
-# -----------
-
-# OpenStack is designed to be run as a non-root user; Horizon will fail to run
-# as **root** since Apache will not serve content from **root** user).
-# ``stack.sh`` must not be run as **root**.  It aborts and suggests one course of
-# action to create a suitable user account.
-
-if [[ $EUID -eq 0 ]]; then
-    echo "You are running this script as root."
-    echo "Cut it out."
-    echo "Really."
-    echo "If you need an account to run DevStack, do this (as root, heh) to create $STACK_USER:"
-    echo "$TOP_DIR/tools/create-stack-user.sh"
-    exit 1
-fi
+# Configure sudo
+# --------------
 
 # We're not **root**, make sure ``sudo`` is available
 is_package_installed sudo || install_package sudo
@@ -208,8 +202,9 @@
 sudo chown root:root $TEMPFILE
 sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh
 
-# Additional repos
-# ----------------
+
+# Configure Distro Repositories
+# -----------------------------
 
 # For debian/ubuntu make apt attempt to retry network ops on it's own
 if is_ubuntu; then
@@ -261,8 +256,12 @@
     sudo yum-config-manager --enable ${OPTIONAL_REPO}
 fi
 
-# Filesystem setup
-# ----------------
+
+# Configure Target Directories
+# ----------------------------
+
+# Destination path for installation ``DEST``
+DEST=${DEST:-/opt/stack}
 
 # Create the destination directory and ensure it is writable by the user
 # and read/executable by everybody for daemons (e.g. apache run for horizon)
@@ -273,6 +272,12 @@
 # a basic test for $DEST path permissions (fatal on error unless skipped)
 check_path_perm_sanity ${DEST}
 
+# Destination path for service data
+DATA_DIR=${DATA_DIR:-${DEST}/data}
+sudo mkdir -p $DATA_DIR
+safe_chown -R $STACK_USER $DATA_DIR
+
+# Configure proper hostname
 # Certain services such as rabbitmq require that the local hostname resolves
 # correctly.  Make sure it exists in /etc/hosts so that is always true.
 LOCAL_HOSTNAME=`hostname -s`
@@ -280,11 +285,6 @@
     sudo sed -i "s/\(^127.0.0.1.*\)/\1 $LOCAL_HOSTNAME/" /etc/hosts
 fi
 
-# Destination path for service data
-DATA_DIR=${DATA_DIR:-${DEST}/data}
-sudo mkdir -p $DATA_DIR
-safe_chown -R $STACK_USER $DATA_DIR
-
 
 # Common Configuration
 # --------------------
@@ -336,6 +336,14 @@
 SSL_BUNDLE_FILE="$DATA_DIR/ca-bundle.pem"
 rm -f $SSL_BUNDLE_FILE
 
+# Import common services (database, message queue) configuration
+source $TOP_DIR/lib/database
+source $TOP_DIR/lib/rpc_backend
+
+# Make sure we only have one rpc backend enabled,
+# and the specified rpc backend is available on your platform.
+check_rpc_backend
+
 
 # Configure Projects
 # ==================
@@ -678,7 +686,7 @@
 fi
 
 # Do the ugly hacks for broken packages and distros
-$TOP_DIR/tools/fixup_stuff.sh
+source $TOP_DIR/tools/fixup_stuff.sh
 
 
 # Extras Pre-install
diff --git a/stackrc b/stackrc
index 4f955bd..ad7da6c 100644
--- a/stackrc
+++ b/stackrc
@@ -319,6 +319,10 @@
 PECAN_REPO=${PECAN_REPO:-${GIT_BASE}/stackforge/pecan.git}
 PECAN_BRANCH=${PECAN_BRANCH:-master}
 
+# sqlalchemy-migrate
+SQLALCHEMY_MIGRATE_REPO=${SQLALCHEMY_MIGRATE_REPO:-${GIT_BASE}/stackforge/sqlalchemy-migrate.git}
+SQLALCHEMY_MIGRATE_BRANCH=${SQLALCHEMY_MIGRATE_BRANCH:-master}
+
 
 # Nova hypervisor configuration.  We default to libvirt with **kvm** but will
 # drop back to **qemu** if we are unable to load the kvm module.  ``stack.sh`` can
diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh
index 50fb31c..1732ecc 100755
--- a/tools/fixup_stuff.sh
+++ b/tools/fixup_stuff.sh
@@ -20,20 +20,24 @@
 #   - pre-install hgtools to work around a bug in RHEL6 distribute
 #   - install nose 1.1 from EPEL
 
-set -o errexit
-set -o xtrace
+# If TOP_DIR is set we're being sourced rather than running stand-alone
+# or in a sub-shell
+if [[ -z "$TOP_DIR" ]]; then
+    set -o errexit
+    set -o xtrace
 
-# Keep track of the current directory
-TOOLS_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $TOOLS_DIR/..; pwd)
+    # Keep track of the current directory
+    TOOLS_DIR=$(cd $(dirname "$0") && pwd)
+    TOP_DIR=$(cd $TOOLS_DIR/..; pwd)
 
-# Change dir to top of devstack
-cd $TOP_DIR
+    # Change dir to top of devstack
+    cd $TOP_DIR
 
-# Import common functions
-source $TOP_DIR/functions
+    # Import common functions
+    source $TOP_DIR/functions
 
-FILES=$TOP_DIR/files
+    FILES=$TOP_DIR/files
+fi
 
 # Keystone Port Reservation
 # -------------------------
@@ -99,6 +103,21 @@
     if selinuxenabled; then
         sudo setenforce 0
     fi
+
+    FORCE_FIREWALLD=$(trueorfalse False $FORCE_FIREWALLD)
+    if [[ ${DISTRO} =~ (f19|f20) && $FORCE_FIREWALLD == "False" ]]; then
+        # On Fedora 19 and 20 firewalld interacts badly with libvirt and
+        # slows things down significantly.  However, for those cases
+        # where that combination is desired, allow this fix to be skipped.
+
+        # There was also an additional issue with firewalld hanging
+        # after install of libvirt with polkit.  See
+        # https://bugzilla.redhat.com/show_bug.cgi?id=1099031
+        if is_package_installed firewalld; then
+            uninstall_package firewalld
+        fi
+    fi
+
 fi
 
 # RHEL6
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..c8a603b
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,13 @@
+[tox]
+minversion = 1.6
+skipsdist = True
+envlist = bashate
+
+[testenv]
+usedevelop = False
+install_command = pip install {opts} {packages}
+
+[testenv:bashate]
+deps = bashate
+whitelist_externals = bash
+commands = bash -c "find {toxinidir} -not -wholename \*.tox/\* -and \( -name \*.sh -or -name \*rc -or -name functions\* -or \( -wholename lib/\* -and -not -name \*.md \) \) -print0 | xargs -0 bashate -v"