Merge "Honor the flag for Identity v3 API only jobs"
diff --git a/MAINTAINERS.rst b/MAINTAINERS.rst
index 20e8655..d3e8c67 100644
--- a/MAINTAINERS.rst
+++ b/MAINTAINERS.rst
@@ -75,12 +75,6 @@
 Tempest
 ~~~~~~~
 
-Trove
-~~~~~
-
-* Nikhil Manchanda <SlickNik@gmail.com>
-* Michael Basnight <mbasnight@gmail.com>
-
 Xen
 ~~~
 * Bob Ball <bob.ball@citrix.com>
diff --git a/README.md b/README.md
index 04f5fd9..455e1c6 100644
--- a/README.md
+++ b/README.md
@@ -149,6 +149,10 @@
 
     KEYSTONE_USE_MOD_WSGI="True"
 
+Example (Nova):
+
+    NOVA_USE_MOD_WSGI="True"
+
 Example (Swift):
 
     SWIFT_USE_MOD_WSGI="True"
@@ -328,7 +332,7 @@
 You likely want to change your `localrc` section to run a scheduler that
 will balance VMs across hosts:
 
-    SCHEDULER=nova.scheduler.simple.SimpleScheduler
+    SCHEDULER=nova.scheduler.filter_scheduler.FilterScheduler
 
 You can then run many compute nodes, each of which should have a `stackrc`
 which includes the following, with the IP address of the above controller node:
diff --git a/clean.sh b/clean.sh
index 7db519b..74bcaee 100755
--- a/clean.sh
+++ b/clean.sh
@@ -51,7 +51,6 @@
 source $TOP_DIR/lib/heat
 source $TOP_DIR/lib/neutron-legacy
 source $TOP_DIR/lib/ironic
-source $TOP_DIR/lib/trove
 
 
 # Extras Source
@@ -130,7 +129,7 @@
 fi
 
 # Clean up venvs
-DIRS_TO_CLEAN="$WHEELHOUSE ${PROJECT_VENV[@]}"
+DIRS_TO_CLEAN="$WHEELHOUSE ${PROJECT_VENV[@]} .config/openstack"
 rm -rf $DIRS_TO_CLEAN
 
 # Clean up files
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 1cc7083..8e2e7ff 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -391,7 +391,7 @@
         ENABLED_SERVICES=n-vol,n-cpu,n-net,n-api
 
 IP Version
-    | Default: ``IP_VERSION=4``
+    | Default: ``IP_VERSION=4+6``
     | This setting can be used to configure DevStack to create either an IPv4,
       IPv6, or dual stack tenant data network by setting ``IP_VERSION`` to
       either ``IP_VERSION=4``, ``IP_VERSION=6``, or ``IP_VERSION=4+6``
diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst
index 3030c7b..b0a8907 100644
--- a/doc/source/guides/neutron.rst
+++ b/doc/source/guides/neutron.rst
@@ -131,6 +131,11 @@
 subnet that exists in the private RFC1918 address space - however in
 in a real setup FLOATING_RANGE would be a public IP address range.
 
+Note that extension drivers for the ML2 plugin is set by
+`Q_ML2_PLUGIN_EXT_DRIVERS`, and it includes 'port_security' by default. If you
+want to remove all the extension drivers (even 'port_security'), set
+`Q_ML2_PLUGIN_EXT_DRIVERS` to blank.
+
 Neutron Networking with Open vSwitch and Provider Networks
 ==========================================================
 
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 4435b49..e0c3f3a 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -173,7 +173,6 @@
 * `lib/swift <lib/swift.html>`__
 * `lib/tempest <lib/tempest.html>`__
 * `lib/tls <lib/tls.html>`__
-* `lib/trove <lib/trove.html>`__
 * `lib/zaqar <lib/zaqar.html>`__
 * `unstack.sh <unstack.sh.html>`__
 * `clean.sh <clean.sh.html>`__
@@ -182,7 +181,6 @@
 * `extras.d/50-ironic.sh <extras.d/50-ironic.sh.html>`__
 * `extras.d/60-ceph.sh <extras.d/60-ceph.sh.html>`__
 * `extras.d/70-sahara.sh <extras.d/70-sahara.sh.html>`__
-* `extras.d/70-trove.sh <extras.d/70-trove.sh.html>`__
 * `extras.d/70-tuskar.sh <extras.d/70-tuskar.sh.html>`__
 * `extras.d/70-zaqar.sh <extras.d/70-zaqar.sh.html>`__
 * `extras.d/80-tempest.sh <extras.d/80-tempest.sh.html>`__
@@ -242,6 +240,5 @@
 * `exercises/sahara.sh <exercises/sahara.sh.html>`__
 * `exercises/sec\_groups.sh <exercises/sec_groups.sh.html>`__
 * `exercises/swift.sh <exercises/swift.sh.html>`__
-* `exercises/trove.sh <exercises/trove.sh.html>`__
 * `exercises/volumes.sh <exercises/volumes.sh.html>`__
 * `exercises/zaqar.sh <exercises/zaqar.sh.html>`__
diff --git a/extras.d/70-trove.sh b/extras.d/70-trove.sh
deleted file mode 100644
index f284354..0000000
--- a/extras.d/70-trove.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-# trove.sh - Devstack extras script to install Trove
-
-if is_service_enabled trove; then
-    if [[ "$1" == "source" ]]; then
-        # Initial source
-        source $TOP_DIR/lib/trove
-    elif [[ "$1" == "stack" && "$2" == "install" ]]; then
-        echo_summary "Installing Trove"
-        install_trove
-        install_troveclient
-        cleanup_trove
-    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
-        echo_summary "Configuring Trove"
-        configure_trove
-
-        if is_service_enabled key; then
-            create_trove_accounts
-        fi
-
-    elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
-        # Initialize trove
-        init_trove
-
-        # Start the trove API and trove taskmgr components
-        echo_summary "Starting Trove"
-        start_trove
-    fi
-
-    if [[ "$1" == "unstack" ]]; then
-        stop_trove
-    fi
-fi
diff --git a/files/apache-nova-api.template b/files/apache-nova-api.template
new file mode 100644
index 0000000..70ccedd
--- /dev/null
+++ b/files/apache-nova-api.template
@@ -0,0 +1,16 @@
+Listen %PUBLICPORT%
+
+<VirtualHost *:%PUBLICPORT%>
+    WSGIDaemonProcess nova-api processes=5 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
+    WSGIProcessGroup nova-api
+    WSGIScriptAlias / %PUBLICWSGI%
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+    <IfVersion >= 2.4>
+      ErrorLogFormat "%{cu}t %M"
+    </IfVersion>
+    ErrorLog /var/log/%APACHE_NAME%/nova-api.log
+    %SSLENGINE%
+    %SSLCERTFILE%
+    %SSLKEYFILE%
+</VirtualHost>
\ No newline at end of file
diff --git a/files/apache-nova-ec2-api.template b/files/apache-nova-ec2-api.template
new file mode 100644
index 0000000..ae4cf94
--- /dev/null
+++ b/files/apache-nova-ec2-api.template
@@ -0,0 +1,16 @@
+Listen %PUBLICPORT%
+
+<VirtualHost *:%PUBLICPORT%>
+    WSGIDaemonProcess nova-ec2-api processes=5 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
+    WSGIProcessGroup nova-ec2-api
+    WSGIScriptAlias / %PUBLICWSGI%
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+    <IfVersion >= 2.4>
+      ErrorLogFormat "%{cu}t %M"
+    </IfVersion>
+    ErrorLog /var/log/%APACHE_NAME%/nova-ec2-api.log
+    %SSLENGINE%
+    %SSLCERTFILE%
+    %SSLKEYFILE%
+</VirtualHost>
\ No newline at end of file
diff --git a/files/debs/swift b/files/debs/swift
index 0089d27..726786e 100644
--- a/files/debs/swift
+++ b/files/debs/swift
@@ -1,8 +1,5 @@
 curl
 make
 memcached
-# NOTE python-nose only exists because of swift functional job, we should probably
-# figure out a more consistent way of installing this from test-requirements.txt instead
-python-nose
 sqlite3
 xfsprogs
diff --git a/files/rpms-suse/general b/files/rpms-suse/general
index 2219426..42756d8 100644
--- a/files/rpms-suse/general
+++ b/files/rpms-suse/general
@@ -15,7 +15,6 @@
 openssl
 psmisc
 python-cmd2 # dist:opensuse-12.3
-python-pylint
 screen
 tar
 tcpdump
diff --git a/files/rpms-suse/horizon b/files/rpms-suse/horizon
index d1f378a..c45eae6 100644
--- a/files/rpms-suse/horizon
+++ b/files/rpms-suse/horizon
@@ -12,7 +12,5 @@
 python-dateutil
 python-eventlet
 python-mox
-python-nose
-python-pylint
 python-sqlalchemy-migrate
 python-xattr
diff --git a/files/rpms-suse/swift b/files/rpms-suse/swift
index 4b14098..9c0d188 100644
--- a/files/rpms-suse/swift
+++ b/files/rpms-suse/swift
@@ -8,7 +8,6 @@
 python-eventlet
 python-greenlet
 python-netifaces
-python-nose
 python-simplejson
 python-xattr
 sqlite3
diff --git a/files/rpms/general b/files/rpms/general
index e17d6d6..7b2c00a 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -14,7 +14,6 @@
 libxslt-devel
 pkgconfig
 psmisc
-pylint
 python-devel
 screen
 tar
diff --git a/files/rpms/horizon b/files/rpms/horizon
index 8d7f037..b2cf0de 100644
--- a/files/rpms/horizon
+++ b/files/rpms/horizon
@@ -1,6 +1,5 @@
 Django
 httpd # NOPRIME
 mod_wsgi  # NOPRIME
-pylint
 pyxattr
 pcre-devel  # pyScss
diff --git a/functions-common b/functions-common
index f2e7076..ff92611 100644
--- a/functions-common
+++ b/functions-common
@@ -51,14 +51,16 @@
 function trueorfalse {
     local xtrace=$(set +o | grep xtrace)
     set +o xtrace
-    local default=$1
-    local literal=$2
-    local testval=${!literal:-}
 
-    [[ -z "$testval" ]] && { echo "$default"; return; }
-    [[ "0 no No NO false False FALSE" =~ "$testval" ]] && { echo "False"; return; }
-    [[ "1 yes Yes YES true True TRUE" =~ "$testval" ]] && { echo "True"; return; }
-    echo "$default"
+    local default=$1
+    local testval=${!2:-}
+
+    case "$testval" in
+        "1" | [yY]es | "YES" | [tT]rue | "TRUE" ) echo "True" ;;
+        "0" | [nN]o | "NO" | [fF]alse | "FALSE" ) echo "False" ;;
+        * )                                       echo "$default" ;;
+    esac
+
     $xtrace
 }
 
@@ -1625,14 +1627,23 @@
 # Uses global ``ENABLED_SERVICES``
 # disable_negated_services
 function disable_negated_services {
-    local tmpsvcs="${ENABLED_SERVICES}"
+    local to_remove=""
+    local remaining=""
     local service
-    for service in ${tmpsvcs//,/ }; do
+
+    # build up list of services that should be removed; i.e. they
+    # begin with "-"
+    for service in ${ENABLED_SERVICES//,/ }; do
         if [[ ${service} == -* ]]; then
-            tmpsvcs=$(echo ${tmpsvcs}|sed -r "s/(,)?(-)?${service#-}(,)?/,/g")
+            to_remove+=",${service#-}"
+        else
+            remaining+=",${service}"
         fi
     done
-    ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs")
+
+    # go through the service list.  if this service appears in the "to
+    # be removed" list, drop it
+    ENABLED_SERVICES=$(remove_disabled_services "$remaining" "$to_remove")
 }
 
 # disable_service() removes the services passed as argument to the
@@ -1736,6 +1747,30 @@
     return $enabled
 }
 
+# remove specified list from the input string
+# remove_disabled_services service-list remove-list
+function remove_disabled_services {
+    local service_list=$1
+    local remove_list=$2
+    local service
+    local enabled=""
+
+    for service in ${service_list//,/ }; do
+        local remove
+        local add=1
+        for remove in ${remove_list//,/ }; do
+            if [[ ${remove} == ${service} ]]; then
+                add=0
+                break
+            fi
+        done
+        if [[ $add == 1 ]]; then
+            enabled="${enabled},$service"
+        fi
+    done
+    _cleanup_service_list "$enabled"
+}
+
 # Toggle enable/disable_service for services that must run exclusive of each other
 #  $1 The name of a variable containing a space-separated list of services
 #  $2 The name of a variable in which to store the enabled service's name
@@ -1873,6 +1908,12 @@
     echo $subnet
 }
 
+# Return the current python as "python<major>.<minor>"
+function python_version {
+    local python_version=$(python -c 'import sys; print("%s.%s" % sys.version_info[0:2])')
+    echo "python${python_version}"
+}
+
 # Service wrapper to restart services
 # restart_service service-name
 function restart_service {
diff --git a/inc/rootwrap b/inc/rootwrap
index bac8e1e..f91e557 100644
--- a/inc/rootwrap
+++ b/inc/rootwrap
@@ -38,11 +38,17 @@
 
 # Configure rootwrap
 # Make a load of assumptions otherwise we'll have 6 arguments
-# configure_rootwrap project bin conf-src-dir
+# configure_rootwrap project
 function configure_rootwrap {
-    local project=$1                    # xx
-    local rootwrap_bin=$2               # /opt/stack/xx.venv/bin/xx-rootwrap
-    local rootwrap_conf_src_dir=$3      # /opt/stack/xx/etc/xx
+    local project=$1
+    local project_uc=$(echo $1|tr a-z A-Z)
+    local bin_dir="${project_uc}_BIN_DIR"
+    bin_dir="${!bin_dir}"
+    local project_dir="${project_uc}_DIR"
+    project_dir="${!project_dir}"
+
+    local rootwrap_conf_src_dir="${project_dir}/etc/${project}"
+    local rootwrap_bin="${bin_dir}/${project}-rootwrap"
 
     # Start fresh with rootwrap filters
     sudo rm -rf /etc/${project}/rootwrap.d
@@ -53,12 +59,16 @@
     sudo install -o root -g root -m 644 $rootwrap_conf_src_dir/rootwrap.conf /etc/${project}/rootwrap.conf
     sudo sed -e "s:^filters_path=.*$:filters_path=/etc/${project}/rootwrap.d:" -i /etc/${project}/rootwrap.conf
 
-    # Specify rootwrap.conf as first parameter to rootwrap
-    rootwrap_sudo_cmd="$rootwrap_bin /etc/${project}/rootwrap.conf *"
-
     # Set up the rootwrap sudoers
     local tempfile=$(mktemp)
+    # Specify rootwrap.conf as first parameter to rootwrap
+    rootwrap_sudo_cmd="${rootwrap_bin} /etc/${project}/rootwrap.conf *"
     echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_sudo_cmd" >$tempfile
+    if [ -f ${bin_dir}/${project}-rootwrap-daemon ]; then
+        # rootwrap daemon does not need any parameters
+        rootwrap_sudo_cmd="${rootwrap_bin}-daemon /etc/${project}/rootwrap.conf"
+        echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_sudo_cmd" >>$tempfile
+    fi
     chmod 0440 $tempfile
     sudo chown root:root $tempfile
     sudo mv $tempfile /etc/sudoers.d/${project}-rootwrap
diff --git a/lib/ceilometer b/lib/ceilometer
index 9abdbfe..1f72187 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -254,7 +254,7 @@
 
     if is_service_enabled ceilometer-aipmi; then
         # Configure rootwrap for the ipmi agent
-        configure_rootwrap ceilometer $CEILOMETER_BIN_DIR/ceilometer-rootwrap $CEILOMETER_DIR/etc/ceilometer
+        configure_rootwrap ceilometer
     fi
 }
 
diff --git a/lib/ceph b/lib/ceph
index 76747cc..4068e26 100644
--- a/lib/ceph
+++ b/lib/ceph
@@ -279,7 +279,7 @@
     # configure Nova service options, ceph pool, ceph user and ceph key
     sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} size ${CEPH_REPLICAS}
     if [[ $CEPH_REPLICAS -ne 1 ]]; then
-        sudo -c ${CEPH_CONF_FILE} ceph osd pool set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID}
+        sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID}
     fi
 }
 
diff --git a/lib/cinder b/lib/cinder
index 6439903..da22e29 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -77,9 +77,20 @@
 
 
 # Should cinder perform secure deletion of volumes?
-# Defaults to true, can be set to False to avoid this bug when testing:
+# Defaults to zero. Can also be set to none or shred.
+# This was previously CINDER_SECURE_DELETE (True or False).
+# Equivalents using CINDER_VOLUME_CLEAR are zero and none, respectively.
+# Set to none to avoid this bug when testing:
 # https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1023755
-CINDER_SECURE_DELETE=$(trueorfalse True CINDER_SECURE_DELETE)
+if [[ -n $CINDER_SECURE_DELETE ]]; then
+    CINDER_SECURE_DELETE=$(trueorfalse True CINDER_SECURE_DELETE)
+    if [[ $CINDER_SECURE_DELETE == "False" ]]; then
+        CINDER_VOLUME_CLEAR_DEFAULT="none"
+    fi
+    DEPRECATED_TEXT="$DEPRECATED_TEXT\nConfigure secure Cinder volume deletion using CINDER_VOLUME_CLEAR instead of CINDER_SECURE_DELETE.\n"
+fi
+CINDER_VOLUME_CLEAR=${CINDER_VOLUME_CLEAR:-${CINDER_VOLUME_CLEAR_DEFAULT:-zero}}
+CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]')
 
 # Cinder reports allocations back to the scheduler on periodic intervals
 # it turns out we can get an "out of space" issue when we run tests too
@@ -179,7 +190,7 @@
 
     rm -f $CINDER_CONF
 
-    configure_rootwrap cinder $CINDER_BIN_DIR/cinder-rootwrap $CINDER_DIR/etc/cinder
+    configure_rootwrap cinder
 
     cp $CINDER_DIR/etc/cinder/api-paste.ini $CINDER_API_PASTE_INI
 
@@ -256,9 +267,8 @@
 
     iniset_rpc_backend cinder $CINDER_CONF
 
-    if [[ "$CINDER_SECURE_DELETE" == "False" ]]; then
-        iniset $CINDER_CONF DEFAULT secure_delete False
-        iniset $CINDER_CONF DEFAULT volume_clear none
+    if [[ "$CINDER_VOLUME_CLEAR" == "none" ]] || [[ "$CINDER_VOLUME_CLEAR" == "zero" ]] || [[ "$CINDER_VOLUME_CLEAR" == "shred" ]]; then
+        iniset $CINDER_CONF DEFAULT volume_clear $CINDER_VOLUME_CLEAR
     fi
 
     # Format logging
@@ -289,6 +299,11 @@
         iniset $CINDER_CONF DEFAULT ssl_key_file "$CINDER_SSL_KEY"
     fi
 
+    # Set os_privileged_user credentials (used for os-assisted-snapshots)
+    iniset $CINDER_CONF DEFAULT os_privileged_user_name nova
+    iniset $CINDER_CONF DEFAULT os_privileged_user_password "$SERVICE_PASSWORD"
+    iniset $CINDER_CONF DEFAULT os_privileged_user_tenant "$SERVICE_TENANT_NAME"
+
 }
 
 # create_cinder_accounts() - Set up common required cinder accounts
diff --git a/lib/glance b/lib/glance
index f543e54..4e1bd24 100644
--- a/lib/glance
+++ b/lib/glance
@@ -138,26 +138,12 @@
     fi
 
     # Store specific configs
-    iniset $GLANCE_API_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/
-
-    # NOTE(flaper87): Until Glance is fully migrated, set these configs in both
-    # sections.
     iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/
 
     iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS"
 
     # Store the images in swift if enabled.
     if is_service_enabled s-proxy; then
-        iniset $GLANCE_API_CONF DEFAULT default_store swift
-        iniset $GLANCE_API_CONF DEFAULT swift_store_auth_address $KEYSTONE_SERVICE_URI/v2.0/
-        iniset $GLANCE_API_CONF DEFAULT swift_store_user $SERVICE_TENANT_NAME:glance-swift
-        iniset $GLANCE_API_CONF DEFAULT swift_store_key $SERVICE_PASSWORD
-        iniset $GLANCE_API_CONF DEFAULT swift_store_create_container_on_put True
-
-        iniset $GLANCE_API_CONF DEFAULT known_stores "glance.store.filesystem.Store, glance.store.http.Store, glance.store.swift.Store"
-
-        # NOTE(flaper87): Until Glance is fully migrated, set these configs in both
-        # sections.
         iniset $GLANCE_API_CONF glance_store default_store swift
         iniset $GLANCE_API_CONF glance_store swift_store_auth_address $KEYSTONE_SERVICE_URI/v2.0/
         iniset $GLANCE_API_CONF glance_store swift_store_user $SERVICE_TENANT_NAME:glance-swift
@@ -211,9 +197,6 @@
     iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD
 
     # Store specific confs
-    # NOTE(flaper87): Until Glance is fully migrated, set these configs in both
-    # sections.
-    iniset $GLANCE_CACHE_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/
     iniset $GLANCE_CACHE_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/
 
     cp -p $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON
diff --git a/lib/ironic b/lib/ironic
index 4a37f0a..7493c3c 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -58,6 +58,7 @@
 IRONIC_IPMIINFO_FILE=${IRONIC_IPMIINFO_FILE:-$IRONIC_DATA_DIR/hardware_info}
 
 # Set up defaults for functional / integration testing
+IRONIC_NODE_UUID=${IRONIC_NODE_UUID:-`uuidgen`}
 IRONIC_SCRIPTS_DIR=${IRONIC_SCRIPTS_DIR:-$TOP_DIR/tools/ironic/scripts}
 IRONIC_TEMPLATES_DIR=${IRONIC_TEMPLATES_DIR:-$TOP_DIR/tools/ironic/templates}
 IRONIC_BAREMETAL_BASIC_OPS=$(trueorfalse False IRONIC_BAREMETAL_BASIC_OPS)
@@ -619,7 +620,12 @@
             node_options+=" -i $_IRONIC_DEPLOY_RAMDISK_KEY=$IRONIC_DEPLOY_RAMDISK_ID"
         fi
 
-        local node_id=$(ironic node-create --chassis_uuid $chassis_id \
+        # First node created will be used for testing in ironic w/o glance
+        # scenario, so we need to know its UUID.
+        local standalone_node_uuid=$([ $total_nodes -eq 0 ] && echo "--uuid $IRONIC_NODE_UUID")
+
+        local node_id=$(ironic node-create $standalone_node_uuid\
+            --chassis_uuid $chassis_id \
             --driver $IRONIC_DEPLOY_DRIVER \
             -p cpus=$ironic_node_cpu\
             -p memory_mb=$ironic_node_ram\
diff --git a/lib/keystone b/lib/keystone
index de2d2ca..7a949cf 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -50,7 +50,6 @@
 KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone}
 KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf
 KEYSTONE_PASTE_INI=${KEYSTONE_PASTE_INI:-$KEYSTONE_CONF_DIR/keystone-paste.ini}
-KEYSTONE_AUTH_CACHE_DIR=${KEYSTONE_AUTH_CACHE_DIR:-/var/cache/keystone}
 if is_suse; then
     KEYSTONE_WSGI_DIR=${KEYSTONE_WSGI_DIR:-/srv/www/htdocs/keystone}
 else
@@ -64,21 +63,21 @@
 # Toggle for deploying Keystone under HTTPD + mod_wsgi
 KEYSTONE_USE_MOD_WSGI=${KEYSTONE_USE_MOD_WSGI:-${ENABLE_HTTPD_MOD_WSGI_SERVICES}}
 
-# Select the backend for Keystone's service catalog
+# Select the Catalog backend driver
 KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-sql}
 KEYSTONE_CATALOG=$KEYSTONE_CONF_DIR/default_catalog.templates
 
-# Select the backend for Tokens
+# Select the token persistence backend driver
 KEYSTONE_TOKEN_BACKEND=${KEYSTONE_TOKEN_BACKEND:-sql}
 
-# Select the backend for Identity
+# Select the Identity backend driver
 KEYSTONE_IDENTITY_BACKEND=${KEYSTONE_IDENTITY_BACKEND:-sql}
 
-# Select the backend for Assignment
+# Select the Assignment backend driver
 KEYSTONE_ASSIGNMENT_BACKEND=${KEYSTONE_ASSIGNMENT_BACKEND:-sql}
 
-# Select Keystone's token format
-# Choose from 'UUID', 'PKI', or 'PKIZ'
+# Select Keystone's token provider (and format)
+# Choose from 'uuid', 'pki', 'pkiz', or 'fernet'
 KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-}
 KEYSTONE_TOKEN_FORMAT=$(echo ${KEYSTONE_TOKEN_FORMAT} | tr '[:upper:]' '[:lower:]')
 
@@ -99,12 +98,6 @@
 # Set the tenant for service accounts in Keystone
 SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service}
 
-# valid identity backends as per dir keystone/identity/backends
-KEYSTONE_VALID_IDENTITY_BACKENDS=kvs,ldap,pam,sql
-
-# valid assignment backends as per dir keystone/identity/backends
-KEYSTONE_VALID_ASSIGNMENT_BACKENDS=kvs,ldap,sql
-
 # if we are running with SSL use https protocols
 if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then
     KEYSTONE_AUTH_PROTOCOL="https"
@@ -164,7 +157,7 @@
         keystone_auth_port=$KEYSTONE_AUTH_PORT_INT
     fi
     if [[ ${USE_VENV} = True ]]; then
-        venv_path="python-path=${PROJECT_VENV["keystone"]}/lib/python2.7/site-packages"
+        venv_path="python-path=${PROJECT_VENV["keystone"]}/lib/$(python_version)/site-packages"
     fi
 
     # copy proxy vhost and wsgi file
@@ -231,15 +224,8 @@
         iniset $KEYSTONE_CONF DEFAULT member_role_name "_member_"
     fi
 
-    # check if identity backend is valid
-    if [[ "$KEYSTONE_VALID_IDENTITY_BACKENDS" =~ "$KEYSTONE_IDENTITY_BACKEND" ]]; then
-        iniset $KEYSTONE_CONF identity driver "keystone.identity.backends.$KEYSTONE_IDENTITY_BACKEND.Identity"
-    fi
-
-    # check if assignment backend is valid
-    if [[ "$KEYSTONE_VALID_ASSIGNMENT_BACKENDS" =~ "$KEYSTONE_ASSIGNMENT_BACKEND" ]]; then
-        iniset $KEYSTONE_CONF assignment driver "keystone.assignment.backends.$KEYSTONE_ASSIGNMENT_BACKEND.Assignment"
-    fi
+    iniset $KEYSTONE_CONF identity driver "$KEYSTONE_IDENTITY_BACKEND"
+    iniset $KEYSTONE_CONF assignment driver "$KEYSTONE_ASSIGNMENT_BACKEND"
 
     iniset_rpc_backend keystone $KEYSTONE_CONF
 
@@ -263,23 +249,16 @@
     iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN"
 
     if [[ "$KEYSTONE_TOKEN_FORMAT" != "" ]]; then
-        iniset $KEYSTONE_CONF token provider keystone.token.providers.$KEYSTONE_TOKEN_FORMAT.Provider
+        iniset $KEYSTONE_CONF token provider $KEYSTONE_TOKEN_FORMAT
     fi
 
     iniset $KEYSTONE_CONF database connection `database_connection_url keystone`
-    iniset $KEYSTONE_CONF ec2 driver "keystone.contrib.ec2.backends.sql.Ec2"
 
-    if [[ "$KEYSTONE_TOKEN_BACKEND" = "sql" ]]; then
-        iniset $KEYSTONE_CONF token driver keystone.token.persistence.backends.sql.Token
-    elif [[ "$KEYSTONE_TOKEN_BACKEND" = "memcache" ]]; then
-        iniset $KEYSTONE_CONF token driver keystone.token.persistence.backends.memcache.Token
-    else
-        iniset $KEYSTONE_CONF token driver keystone.token.persistence.backends.kvs.Token
-    fi
+    iniset $KEYSTONE_CONF token driver "$KEYSTONE_TOKEN_BACKEND"
 
+    iniset $KEYSTONE_CONF catalog driver "$KEYSTONE_CATALOG_BACKEND"
     if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then
         # Configure ``keystone.conf`` to use sql
-        iniset $KEYSTONE_CONF catalog driver keystone.catalog.backends.sql.Catalog
         inicomment $KEYSTONE_CONF catalog template_file
     else
         cp -p $FILES/default_catalog.templates $KEYSTONE_CATALOG
@@ -306,7 +285,6 @@
         " -i $KEYSTONE_CATALOG
 
         # Configure ``keystone.conf`` to use templates
-        iniset $KEYSTONE_CONF catalog driver "keystone.catalog.backends.templated.Catalog"
         iniset $KEYSTONE_CONF catalog template_file "$KEYSTONE_CATALOG"
     fi
 
@@ -502,10 +480,6 @@
         # Set up certificates
         rm -rf $KEYSTONE_CONF_DIR/ssl
         $KEYSTONE_BIN_DIR/keystone-manage pki_setup
-
-        # Create cache dir
-        sudo install -d -o $STACK_USER $KEYSTONE_AUTH_CACHE_DIR
-        rm -f $KEYSTONE_AUTH_CACHE_DIR/*
     fi
 }
 
diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2
index abe6ea7..2733f1f 100644
--- a/lib/neutron_plugins/ml2
+++ b/lib/neutron_plugins/ml2
@@ -31,6 +31,9 @@
 Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS=${Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS:-vni_ranges=1001:2000}
 # Default VLAN TypeDriver options
 Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=${Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS:-}
+# List of extension drivers to load, use '-' instead of ':-' to allow people to
+# explicitly override this to blank
+Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS-port_security}
 
 # L3 Plugin to load for ML2
 ML2_L3_PLUGIN=${ML2_L3_PLUGIN:-neutron.services.l3_router.l3_router_plugin.L3RouterPlugin}
@@ -104,13 +107,17 @@
         iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver
     fi
 
-    # Since we enable the tunnel TypeDrivers, also enable a local_ip
-    iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $TUNNEL_ENDPOINT_IP
+    if [[ "$ENABLE_TENANT_TUNNELS" == "True" ]]; then
+        # Set local_ip if TENANT_TUNNELS are enabled.
+        iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $TUNNEL_ENDPOINT_IP
+    fi
 
     populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS
 
     populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS
 
+    populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 extension_drivers=$Q_ML2_PLUGIN_EXT_DRIVERS
+
     populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 $Q_SRV_EXTRA_OPTS
 
     populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_gre $Q_ML2_PLUGIN_GRE_TYPE_OPTIONS
diff --git a/lib/nova b/lib/nova
index 807dfce..da288d3 100644
--- a/lib/nova
+++ b/lib/nova
@@ -16,6 +16,7 @@
 #
 # - install_nova
 # - configure_nova
+# - _config_nova_apache_wsgi
 # - create_nova_conf
 # - init_nova
 # - start_nova
@@ -62,6 +63,15 @@
 # Expect to remove in L or M.
 NOVA_API_VERSION=${NOVA_API_VERSION-default}
 
+if is_suse; then
+    NOVA_WSGI_DIR=${NOVA_WSGI_DIR:-/srv/www/htdocs/nova}
+else
+    NOVA_WSGI_DIR=${NOVA_WSGI_DIR:-/var/www/nova}
+fi
+
+# Toggle for deploying Nova-API under HTTPD + mod_wsgi
+NOVA_USE_MOD_WSGI=${NOVA_USE_MOD_WSGI:-False}
+
 if is_ssl_enabled_service "nova" || is_service_enabled tls-proxy; then
     NOVA_SERVICE_PROTOCOL="https"
     EC2_SERVICE_PROTOCOL="https"
@@ -223,6 +233,64 @@
     #fi
 }
 
+# _cleanup_nova_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file
+function _cleanup_nova_apache_wsgi {
+    sudo rm -f $NOVA_WSGI_DIR/*
+    sudo rm -f $(apache_site_config_for nova-api)
+    sudo rm -f $(apache_site_config_for nova-ec2-api)
+}
+
+# _config_nova_apache_wsgi() - Set WSGI config files of Keystone
+function _config_nova_apache_wsgi {
+    sudo mkdir -p $NOVA_WSGI_DIR
+
+    local nova_apache_conf=$(apache_site_config_for nova-api)
+    local nova_ec2_apache_conf=$(apache_site_config_for nova-ec2-api)
+    local nova_ssl=""
+    local nova_certfile=""
+    local nova_keyfile=""
+    local nova_api_port=$NOVA_SERVICE_PORT
+    local nova_ec2_api_port=$EC2_SERVICE_PORT
+    local venv_path=""
+
+    if is_ssl_enabled_service nova-api; then
+        nova_ssl="SSLEngine On"
+        nova_certfile="SSLCertificateFile $NOVA_SSL_CERT"
+        nova_keyfile="SSLCertificateKeyFile $NOVA_SSL_KEY"
+    fi
+    if [[ ${USE_VENV} = True ]]; then
+        venv_path="python-path=${PROJECT_VENV["nova"]}/lib/$(python_version)/site-packages"
+    fi
+
+    # copy proxy vhost and wsgi helper files
+    sudo cp $NOVA_DIR/nova/wsgi/nova-api.py $NOVA_WSGI_DIR/nova-api
+    sudo cp $NOVA_DIR/nova/wsgi/nova-ec2-api.py $NOVA_WSGI_DIR/nova-ec2-api
+
+    sudo cp $FILES/apache-nova-api.template $nova_apache_conf
+    sudo sed -e "
+        s|%PUBLICPORT%|$nova_api_port|g;
+        s|%APACHE_NAME%|$APACHE_NAME|g;
+        s|%PUBLICWSGI%|$NOVA_WSGI_DIR/nova-api|g;
+        s|%SSLENGINE%|$nova_ssl|g;
+        s|%SSLCERTFILE%|$nova_certfile|g;
+        s|%SSLKEYFILE%|$nova_keyfile|g;
+        s|%USER%|$STACK_USER|g;
+        s|%VIRTUALENV%|$venv_path|g
+    " -i $nova_apache_conf
+
+    sudo cp $FILES/apache-nova-ec2-api.template $nova_ec2_apache_conf
+    sudo sed -e "
+        s|%PUBLICPORT%|$nova_ec2_api_port|g;
+        s|%APACHE_NAME%|$APACHE_NAME|g;
+        s|%PUBLICWSGI%|$NOVA_WSGI_DIR/nova-ec2-api|g;
+        s|%SSLENGINE%|$nova_ssl|g;
+        s|%SSLCERTFILE%|$nova_certfile|g;
+        s|%SSLKEYFILE%|$nova_keyfile|g;
+        s|%USER%|$STACK_USER|g;
+        s|%VIRTUALENV%|$venv_path|g
+    " -i $nova_ec2_apache_conf
+}
+
 # configure_nova() - Set config files, create data dirs, etc
 function configure_nova {
     # Put config files in ``/etc/nova`` for everyone to find
@@ -230,7 +298,7 @@
 
     install_default_policy nova
 
-    configure_rootwrap nova $NOVA_BIN_DIR/nova-rootwrap $NOVA_DIR/etc/nova
+    configure_rootwrap nova
 
     if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
         # Get the sample configuration file in place
@@ -392,7 +460,6 @@
     iniset $NOVA_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL"
     if [ "$NOVA_ALLOW_MOVE_TO_SAME_HOST" == "True" ]; then
         iniset $NOVA_CONF DEFAULT allow_resize_to_same_host "True"
-        iniset $NOVA_CONF DEFAULT allow_migrate_to_same_host "True"
     fi
     iniset $NOVA_CONF DEFAULT api_paste_config "$NOVA_API_PASTE_INI"
     iniset $NOVA_CONF DEFAULT rootwrap_config "$NOVA_CONF_DIR/rootwrap.conf"
@@ -453,12 +520,16 @@
         iniset $NOVA_CONF DEFAULT force_config_drive "$FORCE_CONFIG_DRIVE"
     fi
     # Format logging
-    if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
+    if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$NOVA_USE_MOD_WSGI" == "False" ]  ; then
         setup_colorized_logging $NOVA_CONF DEFAULT
     else
         # Show user_name and project_name instead of user_id and project_id
         iniset $NOVA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s"
     fi
+    if [ "$NOVA_USE_MOD_WSGI" == "True" ]; then
+        _config_nova_apache_wsgi
+    fi
+
     if is_service_enabled ceilometer; then
         iniset $NOVA_CONF DEFAULT instance_usage_audit "True"
         iniset $NOVA_CONF DEFAULT instance_usage_audit_period "hour"
@@ -655,6 +726,13 @@
     git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH
     setup_develop $NOVA_DIR
     sudo install -D -m 0644 -o $STACK_USER {$NOVA_DIR/tools/,/etc/bash_completion.d/}nova-manage.bash_completion
+
+    if [ "$NOVA_USE_MOD_WSGI" == "True" ]; then
+        install_apache_wsgi
+        if is_ssl_enabled_service "nova-api"; then
+            enable_mod_ssl
+        fi
+    fi
 }
 
 # start_nova_api() - Start the API process ahead of other things
@@ -671,7 +749,18 @@
     local old_path=$PATH
     export PATH=$NOVA_BIN_DIR:$PATH
 
-    run_process n-api "$NOVA_BIN_DIR/nova-api"
+    # If the site is not enabled then we are in a grenade scenario
+    local enabled_site_file=$(apache_site_config_for nova-api)
+    if [ -f ${enabled_site_file} ] && [ "$NOVA_USE_MOD_WSGI" == "True" ]; then
+        enable_apache_site nova-api
+        enable_apache_site nova-ec2-api
+        restart_apache_server
+        tail_log nova /var/log/$APACHE_NAME/nova-api.log
+        tail_log nova /var/log/$APACHE_NAME/nova-ec2-api.log
+    else
+        run_process n-api "$NOVA_BIN_DIR/nova-api"
+    fi
+
     echo "Waiting for nova-api to start..."
     if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$SERVICE_HOST:$service_port; then
         die $LINENO "nova-api did not start"
@@ -780,6 +869,13 @@
 }
 
 function stop_nova_rest {
+    if [ "$NOVA_USE_MOD_WSGI" == "True" ]; then
+        disable_apache_site nova-api
+        disable_apache_site nova-ec2-api
+        restart_apache_server
+    else
+        stop_process n-api
+    fi
     # Kill the nova screen windows
     # Some services are listed here twice since more than one instance
     # of a service may be running in certain configs.
diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt
index 04da5e2..96d8a44 100755
--- a/lib/nova_plugins/functions-libvirt
+++ b/lib/nova_plugins/functions-libvirt
@@ -39,17 +39,6 @@
         pip_install_gr libvirt-python
         install_package python-libguestfs
     fi
-
-    # Restart firewalld after install of libvirt to avoid a problem
-    # with polkit, which libvirtd brings in.  See
-    # https://bugzilla.redhat.com/show_bug.cgi?id=1099031
-
-    # Note there is a difference between F20 rackspace cloud images
-    # and HP images used in the gate; rackspace has firewalld but hp
-    # cloud doesn't.
-    if is_fedora && is_package_installed firewalld; then
-        sudo service firewalld restart || true
-    fi
 }
 
 # Configures the installed libvirt system so that is accessible by
diff --git a/lib/sahara b/lib/sahara
index 6d4e864..51e431a 100644
--- a/lib/sahara
+++ b/lib/sahara
@@ -186,7 +186,7 @@
 
     if is_service_enabled tls-proxy; then
         # Set the service port for a proxy to take the original
-        iniset $SAHARA_CONF DEFAULT port $SAHARA_SERVICE_PORT_INT
+        iniset $SAHARA_CONF_FILE DEFAULT port $SAHARA_SERVICE_PORT_INT
     fi
 
     recreate_database sahara
diff --git a/lib/swift b/lib/swift
index 456dde4..820042d 100644
--- a/lib/swift
+++ b/lib/swift
@@ -439,7 +439,7 @@
     if is_service_enabled swift3; then
         cat <<EOF >>${SWIFT_CONFIG_PROXY_SERVER}
 [filter:s3token]
-paste.filter_factory = keystoneclient.middleware.s3_token:filter_factory
+paste.filter_factory = keystonemiddleware.s3_token:filter_factory
 auth_port = ${KEYSTONE_AUTH_PORT}
 auth_host = ${KEYSTONE_AUTH_HOST}
 auth_protocol = ${KEYSTONE_AUTH_PROTOCOL}
diff --git a/lib/tempest b/lib/tempest
index 18e3703..059709d 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -91,10 +91,7 @@
     local extensions_list=$1
     shift
     local disabled_exts=$*
-    for ext_to_remove in ${disabled_exts//,/ } ; do
-        extensions_list=${extensions_list/$ext_to_remove","}
-    done
-    echo $extensions_list
+    remove_disabled_services "$extensions_list" "$disabled_exts"
 }
 
 # configure_tempest() - Set config files, create data dirs, etc
@@ -106,6 +103,10 @@
         pip_install_gr testrepository
     fi
 
+    # Used during configuration so make sure we have the correct
+    # version installed
+    pip_install_gr python-openstackclient
+
     local image_lines
     local images
     local num_images
@@ -143,9 +144,7 @@
                 image_uuid_alt="$IMAGE_UUID"
             fi
             images+=($IMAGE_UUID)
-        # TODO(stevemar): update this command to use openstackclient's `openstack image list`
-        # when it supports listing by status.
-        done < <(glance image-list --status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }')
+        done < <(openstack image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }')
 
         case "${#images[*]}" in
             0)
@@ -348,7 +347,9 @@
     iniset $TEMPEST_CONFIG compute flavor_ref $flavor_ref
     iniset $TEMPEST_CONFIG compute flavor_ref_alt $flavor_ref_alt
     iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method
-    iniset $TEMPEST_CONFIG compute fixed_network_name $PRIVATE_NETWORK_NAME
+    if [[ ! $(is_service_enabled n-cell) && ! $(is_service_enabled neutron) ]]; then
+        iniset $TEMPEST_CONFIG compute fixed_network_name $PRIVATE_NETWORK_NAME
+    fi
 
     # Compute Features
     # Run ``verify_tempest_config -ur`` to retrieve enabled extensions on API endpoints
@@ -372,6 +373,8 @@
     iniset $TEMPEST_CONFIG compute-feature-enabled api_extensions $compute_api_extensions
     # TODO(mriedem): Remove the preserve_ports flag when Juno is end of life.
     iniset $TEMPEST_CONFIG compute-feature-enabled preserve_ports True
+    # TODO(gilliard): Remove the live_migrate_paused_instances flag when Juno is end of life.
+    iniset $TEMPEST_CONFIG compute-feature-enabled live_migrate_paused_instances True
 
     # Network
     iniset $TEMPEST_CONFIG network api_version 2.0
@@ -489,6 +492,8 @@
     if [ "$VIRT_DRIVER" = "ironic" ] ; then
         iniset $TEMPEST_CONFIG baremetal driver_enabled True
         iniset $TEMPEST_CONFIG baremetal unprovision_timeout 300
+        iniset $TEMPEST_CONFIG baremetal deploy_img_dir $FILES
+        iniset $TEMPEST_CONFIG baremetal node_uuid $IRONIC_NODE_UUID
         iniset $TEMPEST_CONFIG compute-feature-enabled change_password False
         iniset $TEMPEST_CONFIG compute-feature-enabled console_output False
         iniset $TEMPEST_CONFIG compute-feature-enabled interface_attach False
diff --git a/lib/trove b/lib/trove
deleted file mode 100644
index b0a9610..0000000
--- a/lib/trove
+++ /dev/null
@@ -1,252 +0,0 @@
-#!/bin/bash
-#
-# lib/trove
-# Functions to control the configuration and operation of the **Trove** service
-
-# Dependencies:
-# ``functions`` file
-# ``DEST``, ``STACK_USER`` must be defined
-# ``SERVICE_{HOST|PROTOCOL|TOKEN}`` must be defined
-
-# ``stack.sh`` calls the entry points in this order:
-#
-# install_trove
-# configure_trove
-# init_trove
-# start_trove
-# stop_trove
-# cleanup_trove
-
-# Save trace setting
-XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-
-# Defaults
-# --------
-if is_service_enabled neutron; then
-    TROVE_HOST_GATEWAY=${PUBLIC_NETWORK_GATEWAY:-172.24.4.1}
-else
-    TROVE_HOST_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1}
-fi
-
-# Set up default configuration
-GITDIR["python-troveclient"]=$DEST/python-troveclient
-
-TROVE_DIR=$DEST/trove
-TROVE_CONF_DIR=${TROVE_CONF_DIR:-/etc/trove}
-TROVE_CONF=${TROVE_CONF:-$TROVE_CONF_DIR/trove.conf}
-TROVE_TASKMANAGER_CONF=${TROVE_TASKMANAGER_CONF:-$TROVE_CONF_DIR/trove-taskmanager.conf}
-TROVE_CONDUCTOR_CONF=${TROVE_CONDUCTOR_CONF:-$TROVE_CONF_DIR/trove-conductor.conf}
-TROVE_GUESTAGENT_CONF=${TROVE_GUESTAGENT_CONF:-$TROVE_CONF_DIR/trove-guestagent.conf}
-TROVE_API_PASTE_INI=${TROVE_API_PASTE_INI:-$TROVE_CONF_DIR/api-paste.ini}
-
-TROVE_LOCAL_CONF_DIR=$TROVE_DIR/etc/trove
-TROVE_LOCAL_API_PASTE_INI=$TROVE_LOCAL_CONF_DIR/api-paste.ini
-TROVE_AUTH_CACHE_DIR=${TROVE_AUTH_CACHE_DIR:-/var/cache/trove}
-TROVE_DATASTORE_TYPE=${TROVE_DATASTORE_TYPE:-"mysql"}
-TROVE_DATASTORE_VERSION=${TROVE_DATASTORE_VERSION:-"5.6"}
-TROVE_DATASTORE_PACKAGE=${TROVE_DATASTORE_PACKAGE:-"mysql-server-5.6"}
-
-# Support entry points installation of console scripts
-if [[ -d $TROVE_DIR/bin ]]; then
-    TROVE_BIN_DIR=$TROVE_DIR/bin
-else
-    TROVE_BIN_DIR=$(get_python_exec_prefix)
-fi
-TROVE_MANAGE=$TROVE_BIN_DIR/trove-manage
-
-# Tell Tempest this project is present
-TEMPEST_SERVICES+=,trove
-
-
-# Functions
-# ---------
-
-# Test if any Trove services are enabled
-# is_trove_enabled
-function is_trove_enabled {
-    [[ ,${ENABLED_SERVICES} =~ ,"tr-" ]] && return 0
-    return 1
-}
-
-# setup_trove_logging() - Adds logging configuration to conf files
-function setup_trove_logging {
-    local CONF=$1
-    iniset $CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-    iniset $CONF DEFAULT use_syslog $SYSLOG
-    if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
-        # Add color to logging output
-        setup_colorized_logging $CONF DEFAULT tenant user
-    fi
-}
-
-# create_trove_accounts() - Set up common required Trove accounts
-
-# Tenant               User       Roles
-# ------------------------------------------------------------------
-# service              trove     admin        # if enabled
-
-function create_trove_accounts {
-    if [[ "$ENABLED_SERVICES" =~ "trove" ]]; then
-
-        create_service_user "trove"
-
-        if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-
-            local trove_service=$(get_or_create_service "trove" \
-                "database" "Trove Service")
-            get_or_create_endpoint $trove_service \
-                "$REGION_NAME" \
-                "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \
-                "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \
-                "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s"
-        fi
-    fi
-}
-
-# stack.sh entry points
-# ---------------------
-
-# cleanup_trove() - Remove residual data files, anything left over from previous
-# runs that a clean run would need to clean up
-function cleanup_trove {
-    #Clean up dirs
-    rm -fr $TROVE_AUTH_CACHE_DIR/*
-    rm -fr $TROVE_CONF_DIR/*
-}
-
-# configure_trove() - Set config files, create data dirs, etc
-function configure_trove {
-    setup_develop $TROVE_DIR
-
-    # Create the trove conf dir and cache dirs if they don't exist
-    sudo install -d -o $STACK_USER ${TROVE_CONF_DIR} ${TROVE_AUTH_CACHE_DIR}
-
-    # Copy api-paste file over to the trove conf dir
-    cp $TROVE_LOCAL_API_PASTE_INI $TROVE_API_PASTE_INI
-
-    # (Re)create trove conf files
-    rm -f $TROVE_CONF
-    rm -f $TROVE_TASKMANAGER_CONF
-    rm -f $TROVE_CONDUCTOR_CONF
-
-    iniset $TROVE_CONF DEFAULT rabbit_userid $RABBIT_USERID
-    iniset $TROVE_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
-    iniset $TROVE_CONF database connection `database_connection_url trove`
-    iniset $TROVE_CONF DEFAULT default_datastore $TROVE_DATASTORE_TYPE
-    setup_trove_logging $TROVE_CONF
-    iniset $TROVE_CONF DEFAULT trove_api_workers "$API_WORKERS"
-
-    configure_auth_token_middleware $TROVE_CONF trove $TROVE_AUTH_CACHE_DIR
-
-    # (Re)create trove taskmanager conf file if needed
-    if is_service_enabled tr-tmgr; then
-        TROVE_AUTH_ENDPOINT=$KEYSTONE_AUTH_URI/v$IDENTITY_API_VERSION
-
-        iniset $TROVE_TASKMANAGER_CONF DEFAULT rabbit_userid $RABBIT_USERID
-        iniset $TROVE_TASKMANAGER_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
-        iniset $TROVE_TASKMANAGER_CONF database connection `database_connection_url trove`
-        iniset $TROVE_TASKMANAGER_CONF DEFAULT taskmanager_manager trove.taskmanager.manager.Manager
-        iniset $TROVE_TASKMANAGER_CONF DEFAULT nova_proxy_admin_user radmin
-        iniset $TROVE_TASKMANAGER_CONF DEFAULT nova_proxy_admin_tenant_name trove
-        iniset $TROVE_TASKMANAGER_CONF DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS
-        iniset $TROVE_TASKMANAGER_CONF DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
-        setup_trove_logging $TROVE_TASKMANAGER_CONF
-    fi
-
-    # (Re)create trove conductor conf file if needed
-    if is_service_enabled tr-cond; then
-        iniset $TROVE_CONDUCTOR_CONF DEFAULT rabbit_userid $RABBIT_USERID
-        iniset $TROVE_CONDUCTOR_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
-        iniset $TROVE_CONDUCTOR_CONF database connection `database_connection_url trove`
-        iniset $TROVE_CONDUCTOR_CONF DEFAULT nova_proxy_admin_user radmin
-        iniset $TROVE_CONDUCTOR_CONF DEFAULT nova_proxy_admin_tenant_name trove
-        iniset $TROVE_CONDUCTOR_CONF DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS
-        iniset $TROVE_CONDUCTOR_CONF DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
-        iniset $TROVE_CONDUCTOR_CONF DEFAULT control_exchange trove
-        setup_trove_logging $TROVE_CONDUCTOR_CONF
-    fi
-
-    # Set up Guest Agent conf
-    iniset $TROVE_GUESTAGENT_CONF DEFAULT rabbit_userid $RABBIT_USERID
-    iniset $TROVE_GUESTAGENT_CONF DEFAULT rabbit_host $TROVE_HOST_GATEWAY
-    iniset $TROVE_GUESTAGENT_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
-    iniset $TROVE_GUESTAGENT_CONF DEFAULT nova_proxy_admin_user radmin
-    iniset $TROVE_GUESTAGENT_CONF DEFAULT nova_proxy_admin_tenant_name trove
-    iniset $TROVE_GUESTAGENT_CONF DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS
-    iniset $TROVE_GUESTAGENT_CONF DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
-    iniset $TROVE_GUESTAGENT_CONF DEFAULT control_exchange trove
-    iniset $TROVE_GUESTAGENT_CONF DEFAULT ignore_users os_admin
-    iniset $TROVE_GUESTAGENT_CONF DEFAULT log_dir /var/log/trove/
-    iniset $TROVE_GUESTAGENT_CONF DEFAULT log_file trove-guestagent.log
-    setup_trove_logging $TROVE_GUESTAGENT_CONF
-}
-
-# install_troveclient() - Collect source and prepare
-function install_troveclient {
-    if use_library_from_git "python-troveclient"; then
-        git_clone_by_name "python-troveclient"
-        setup_dev_lib "python-troveclient"
-    fi
-}
-
-# install_trove() - Collect source and prepare
-function install_trove {
-    git_clone $TROVE_REPO $TROVE_DIR $TROVE_BRANCH
-}
-
-# init_trove() - Initializes Trove Database as a Service
-function init_trove {
-    # (Re)Create trove db
-    recreate_database trove
-
-    # Initialize the trove database
-    $TROVE_MANAGE db_sync
-
-    # If no guest image is specified, skip remaining setup
-    [ -z "$TROVE_GUEST_IMAGE_URL" ] && return 0
-
-    # Find the glance id for the trove guest image
-    # The image is uploaded by stack.sh -- see $IMAGE_URLS handling
-    GUEST_IMAGE_NAME=$(basename "$TROVE_GUEST_IMAGE_URL")
-    GUEST_IMAGE_NAME=${GUEST_IMAGE_NAME%.*}
-    TROVE_GUEST_IMAGE_ID=$(openstack --os-token $TOKEN --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image list | grep "${GUEST_IMAGE_NAME}" | get_field 1)
-    if [ -z "$TROVE_GUEST_IMAGE_ID" ]; then
-        # If no glance id is found, skip remaining setup
-        echo "Datastore ${TROVE_DATASTORE_TYPE} will not be created: guest image ${GUEST_IMAGE_NAME} not found."
-        return 1
-    fi
-
-    # Now that we have the guest image id, initialize appropriate datastores / datastore versions
-    $TROVE_MANAGE datastore_update "$TROVE_DATASTORE_TYPE" ""
-    $TROVE_MANAGE datastore_version_update "$TROVE_DATASTORE_TYPE" "$TROVE_DATASTORE_VERSION" "$TROVE_DATASTORE_TYPE" \
-        "$TROVE_GUEST_IMAGE_ID" "$TROVE_DATASTORE_PACKAGE" 1
-    $TROVE_MANAGE datastore_version_update "$TROVE_DATASTORE_TYPE" "inactive_version" "inactive_manager" "$TROVE_GUEST_IMAGE_ID" "" 0
-    $TROVE_MANAGE datastore_update "$TROVE_DATASTORE_TYPE" "$TROVE_DATASTORE_VERSION"
-    $TROVE_MANAGE datastore_update "Inactive_Datastore" ""
-}
-
-# start_trove() - Start running processes, including screen
-function start_trove {
-    run_process tr-api "$TROVE_BIN_DIR/trove-api --config-file=$TROVE_CONF --debug"
-    run_process tr-tmgr "$TROVE_BIN_DIR/trove-taskmanager --config-file=$TROVE_TASKMANAGER_CONF --debug"
-    run_process tr-cond "$TROVE_BIN_DIR/trove-conductor --config-file=$TROVE_CONDUCTOR_CONF --debug"
-}
-
-# stop_trove() - Stop running processes
-function stop_trove {
-    # Kill the trove screen windows
-    local serv
-    for serv in tr-api tr-tmgr tr-cond; do
-        stop_process $serv
-    done
-}
-
-# Restore xtrace
-$XTRACE
-
-# Tell emacs to use shell-script-mode
-## Local variables:
-## mode: shell-script
-## End:
diff --git a/openrc b/openrc
index aec8a2a..64faa58 100644
--- a/openrc
+++ b/openrc
@@ -78,8 +78,14 @@
 #
 export OS_AUTH_URL=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:5000/v${OS_IDENTITY_API_VERSION}
 
-# Set the pointer to our CA certificate chain.  Harmless if TLS is not used.
-export OS_CACERT=${OS_CACERT:-$INT_CA_DIR/ca-chain.pem}
+# Set OS_CACERT to a default CA certificate chain if it exists.
+if [[ ! -v OS_CACERT ]] ; then
+    DEFAULT_OS_CACERT=$INT_CA_DIR/ca-chain.pem
+    # If the file does not exist, this may confuse preflight sanity checks
+    if [ -e $DEFAULT_OS_CACERT ] ; then
+        export OS_CACERT=$DEFAULT_OS_CACERT
+    fi
+fi
 
 # Currently novaclient needs you to specify the *compute api* version.  This
 # needs to match the config of your catalog returned by Keystone.
diff --git a/stack.sh b/stack.sh
index 3925bb0..dea5643 100755
--- a/stack.sh
+++ b/stack.sh
@@ -705,24 +705,17 @@
 # Virtual Environment
 # -------------------
 
+# Install required infra support libraries
+install_infra
+
 # Pre-build some problematic wheels
 if [[ -n ${WHEELHOUSE:-} && ! -d ${WHEELHOUSE:-} ]]; then
     source $TOP_DIR/tools/build_wheels.sh
-
-    # Due to https://bugs.launchpad.net/swift/+bug/1451992 we have to import
-    # this package with root once so the CFFI bindings can be built. We have
-    # to therefore install it so we can import it.
-    pip_install xattr
-    sudo python -c "import xattr"
 fi
 
 
 # Extras Pre-install
 # ------------------
-
-# Install required infra support libraries
-install_infra
-
 # Phase: pre-install
 run_phase stack pre-install
 
@@ -1220,13 +1213,6 @@
 elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then
     NM_CONF=${NOVA_CONF}
     if is_service_enabled n-cell; then
-        # Both cells should have the same network uuid for server create
-        if [[ ! "$NETWORK_CREATE_ARGS" =~ "--uuid" ]]; then
-            NETWORK_CREATE_ARGS="$NETWORK_CREATE_ARGS --uuid $(uuidgen)"
-        fi
-        # Create a small network in the API cell
-        $NOVA_BIN_DIR/nova-manage --config-file $NM_CONF network create "$PRIVATE_NETWORK_NAME" $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS
-        # Everything else should go in the child cell
         NM_CONF=${NOVA_CELLS_CONF}
     fi
 
@@ -1308,6 +1294,29 @@
     echo $i=${!i} >>$TOP_DIR/.stackenv
 done
 
+# Write out a clouds.yaml file
+# putting the location into a variable to allow for easier refactoring later
+# to make it overridable. There is current no usecase where doing so makes
+# sense, so I'm not actually doing it now.
+CLOUDS_YAML=~/.config/openstack/clouds.yaml
+if [ ! -e $CLOUDS_YAML ]; then
+    mkdir -p $(dirname $CLOUDS_YAML)
+    cat >"$CLOUDS_YAML" <<EOF
+clouds:
+  devstack:
+    auth:
+      auth_url: $KEYSTONE_AUTH_URI/v$IDENTITY_API_VERSION
+      username: demo
+      project_name: demo
+      password: $ADMIN_PASSWORD
+    region_name: $REGION_NAME
+    identity_api_version: $IDENTITY_API_VERSION
+EOF
+    if [ -f "$SSL_BUNDLE_FILE" ]; then
+        echo "    cacert: $SSL_BUNDLE_FILE" >>"$CLOUDS_YAML"
+    fi
+fi
+
 
 # Wrapup configuration
 # ====================
diff --git a/stackrc b/stackrc
index af5ed6e..09ba3e9 100644
--- a/stackrc
+++ b/stackrc
@@ -244,11 +244,6 @@
 SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git}
 SWIFT_BRANCH=${SWIFT_BRANCH:-master}
 
-# trove service
-TROVE_REPO=${TROVE_REPO:-${GIT_BASE}/openstack/trove.git}
-TROVE_BRANCH=${TROVE_BRANCH:-master}
-
-
 ##############
 #
 #  Testing Components
@@ -314,10 +309,6 @@
 GITREPO["python-swiftclient"]=${SWIFTCLIENT_REPO:-${GIT_BASE}/openstack/python-swiftclient.git}
 GITBRANCH["python-swiftclient"]=${SWIFTCLIENT_BRANCH:-master}
 
-# trove client library test
-GITREPO["python-troveclient"]=${TROVECLIENT_REPO:-${GIT_BASE}/openstack/python-troveclient.git}
-GITBRANCH["python-troveclient"]=${TROVECLIENT_BRANCH:-master}
-
 # consolidated openstack python client
 GITREPO["python-openstackclient"]=${OPENSTACKCLIENT_REPO:-${GIT_BASE}/openstack/python-openstackclient.git}
 GITBRANCH["python-openstackclient"]=${OPENSTACKCLIENT_BRANCH:-master}
@@ -549,7 +540,7 @@
 #IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image
 #IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image
 
-CIRROS_VERSION=${CIRROS_VERSION:-"0.3.2"}
+CIRROS_VERSION=${CIRROS_VERSION:-"0.3.4"}
 CIRROS_ARCH=${CIRROS_ARCH:-"x86_64"}
 
 # Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of
@@ -590,16 +581,15 @@
         IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec.tar.gz"};;
 esac
 
-# Trove needs a custom image for its work
-if [[ "$ENABLED_SERVICES" =~ 'tr-api' ]]; then
-    case "$VIRT_DRIVER" in
-        libvirt|ironic|xenapi)
-            TROVE_GUEST_IMAGE_URL=${TROVE_GUEST_IMAGE_URL:-"http://tarballs.openstack.org/trove/images/ubuntu/mysql.qcow2"}
-            IMAGE_URLS+=",${TROVE_GUEST_IMAGE_URL}"
-            ;;
-        *)
-            ;;
-    esac
+# Staging Area for New Images, have them here for at least 24hrs for nodepool
+# to cache them otherwise the failure rates in the gate are too high
+PRECACHE_IMAGES=$(trueorfalse False PRECACHE_IMAGES)
+if [[ "$PRECACHE_IMAGES" == "True" ]]; then
+
+    IMAGE_URL="http://tarballs.openstack.org/trove/images/ubuntu/mysql.qcow2"
+    if ! [[ "$IMAGE_URLS"  =~ "$IMAGE_URL" ]]; then
+        IMAGE_URLS+=",$IMAGE_URL"
+    fi
 fi
 
 # 10Gb default volume backing file size
diff --git a/tests/test_functions.sh b/tests/test_functions.sh
index f8e2c9e..f555de8 100755
--- a/tests/test_functions.sh
+++ b/tests/test_functions.sh
@@ -127,7 +127,40 @@
 test_disable_negated_services 'b,a,-a' 'b'
 test_disable_negated_services 'a,b,-a' 'b'
 test_disable_negated_services 'a,-a,b' 'b'
+test_disable_negated_services 'a,aa,-a' 'aa'
+test_disable_negated_services 'aa,-a' 'aa'
+test_disable_negated_services 'a_a, -a_a' ''
+test_disable_negated_services 'a-b, -a-b' ''
+test_disable_negated_services 'a-b, b, -a-b' 'b'
+test_disable_negated_services 'a,-a,av2,b' 'av2,b'
+test_disable_negated_services 'a,aa,-a' 'aa'
+test_disable_negated_services 'a,av2,-a,a' 'av2'
+test_disable_negated_services 'a,-a,av2' 'av2'
 
+echo "Testing remove_disabled_services()"
+
+function test_remove_disabled_services {
+    local service_list="$1"
+    local remove_list="$2"
+    local expected="$3"
+
+    results=$(remove_disabled_services "$service_list" "$remove_list")
+    if [ "$results" = "$expected" ]; then
+        passed "OK: '$service_list' - '$remove_list' -> '$results'"
+    else
+        failed "getting '$expected' from '$service_list' - '$remove_list' failed: '$results'"
+    fi
+}
+
+test_remove_disabled_services 'a,b,c' 'a,c' 'b'
+test_remove_disabled_services 'a,b,c' 'b' 'a,c'
+test_remove_disabled_services 'a,b,c,d' 'a,c d' 'b'
+test_remove_disabled_services 'a,b c,d' 'a d' 'b,c'
+test_remove_disabled_services 'a,b,c' 'a,b,c' ''
+test_remove_disabled_services 'a,b,c' 'd' 'a,b,c'
+test_remove_disabled_services 'a,b,c' '' 'a,b,c'
+test_remove_disabled_services '' 'a,b,c' ''
+test_remove_disabled_services '' '' ''
 
 echo "Testing is_package_installed()"
 
diff --git a/tests/test_ip.sh b/tests/test_ip.sh
index c53e80d..f8c2058 100755
--- a/tests/test_ip.sh
+++ b/tests/test_ip.sh
@@ -12,51 +12,41 @@
 
 echo "Testing IP addr functions"
 
-if [[ $(cidr2netmask 4) == 240.0.0.0 ]]; then
-    passed "cidr2netmask(): /4...OK"
-else
-    failed "cidr2netmask(): /4...failed"
-fi
-if [[ $(cidr2netmask 8) == 255.0.0.0 ]]; then
-    passed "cidr2netmask(): /8...OK"
-else
-    failed "cidr2netmask(): /8...failed"
-fi
-if [[ $(cidr2netmask 12) == 255.240.0.0 ]]; then
-    passed "cidr2netmask(): /12...OK"
-else
-    failed "cidr2netmask(): /12...failed"
-fi
-if [[ $(cidr2netmask 16) == 255.255.0.0 ]]; then
-    passed "cidr2netmask(): /16...OK"
-else
-    failed "cidr2netmask(): /16...failed"
-fi
-if [[ $(cidr2netmask 20) == 255.255.240.0 ]]; then
-    passed "cidr2netmask(): /20...OK"
-else
-    failed "cidr2netmask(): /20...failed"
-fi
-if [[ $(cidr2netmask 24) == 255.255.255.0 ]]; then
-    passed "cidr2netmask(): /24...OK"
-else
-    failed "cidr2netmask(): /24...failed"
-fi
-if [[ $(cidr2netmask 28) == 255.255.255.240 ]]; then
-    passed "cidr2netmask(): /28...OK"
-else
-    failed "cidr2netmask(): /28...failed"
-fi
-if [[ $(cidr2netmask 30) == 255.255.255.252 ]]; then
-    passed "cidr2netmask(): /30...OK"
-else
-    failed "cidr2netmask(): /30...failed"
-fi
-if [[ $(cidr2netmask 32) == 255.255.255.255 ]]; then
-    passed "cidr2netmask(): /32...OK"
-else
-    failed "cidr2netmask(): /32...failed"
-fi
+function test_cidr2netmask {
+    local mask=0
+    local ips="128 192 224 240 248 252 254 255"
+    local ip
+    local msg
+
+    msg="cidr2netmask(/0) == 0.0.0.0"
+    assert_equal "0.0.0.0" $(cidr2netmask $mask) "$msg"
+
+    for ip in $ips; do
+        mask=$(( mask + 1 ))
+        msg="cidr2netmask(/$mask) == $ip.0.0.0"
+        assert_equal "$ip.0.0.0" $(cidr2netmask $mask) "$msg"
+    done
+
+    for ip in $ips; do
+        mask=$(( mask + 1 ))
+        msg="cidr2netmask(/$mask) == 255.$ip.0.0"
+        assert_equal "255.$ip.0.0" $(cidr2netmask $mask) "$msg"
+    done
+
+    for ip in $ips; do
+        mask=$(( mask + 1 ))
+        msg="cidr2netmask(/$mask) == 255.255.$ip.0"
+        assert_equal "255.255.$ip.0" $(cidr2netmask $mask) "$msg"
+    done
+
+    for ip in $ips; do
+        mask=$(( mask + 1 ))
+        msg="cidr2netmask(/$mask) == 255.255.255.$ip"
+        assert_equal "255.255.255.$ip" $(cidr2netmask $mask) "$msg"
+    done
+}
+
+test_cidr2netmask
 
 if [[ $(maskip 169.254.169.254 240.0.0.0) == 160.0.0.0 ]]; then
     passed "maskip(): /4...OK"
diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh
index 0bec584..8210d0a 100755
--- a/tests/test_libs_from_pypi.sh
+++ b/tests/test_libs_from_pypi.sh
@@ -29,7 +29,7 @@
     fi
 done
 
-ALL_LIBS="python-novaclient oslo.config pbr oslo.context python-troveclient"
+ALL_LIBS="python-novaclient oslo.config pbr oslo.context"
 ALL_LIBS+=" python-keystoneclient taskflow oslo.middleware pycadf"
 ALL_LIBS+=" python-glanceclient python-ironicclient tempest-lib"
 ALL_LIBS+=" oslo.messaging oslo.log cliff python-heatclient stevedore"
diff --git a/tests/test_truefalse.sh b/tests/test_truefalse.sh
index e57948a..2689589 100755
--- a/tests/test_truefalse.sh
+++ b/tests/test_truefalse.sh
@@ -8,27 +8,38 @@
 source $TOP/functions
 source $TOP/tests/unittest.sh
 
-function test_truefalse {
+function test_trueorfalse {
     local one=1
     local captrue=True
     local lowtrue=true
-    local abrevtrue=t
+    local uppertrue=TRUE
+    local capyes=Yes
+    local lowyes=yes
+    local upperyes=YES
+
+    for default in True False; do
+        for name in one captrue lowtrue uppertrue capyes lowyes upperyes; do
+            local msg="trueorfalse($default $name)"
+            assert_equal "True" $(trueorfalse $default $name) "$msg"
+        done
+    done
+
     local zero=0
     local capfalse=False
     local lowfalse=false
-    local abrevfalse=f
-    for against in True False; do
-        for name in one captrue lowtrue abrevtrue; do
-            assert_equal "True" $(trueorfalse $against $name) "\$(trueorfalse $against $name)"
-        done
-    done
-    for against in True False; do
-        for name in zero capfalse lowfalse abrevfalse; do
-            assert_equal "False" $(trueorfalse $against $name) "\$(trueorfalse $against $name)"
+    local upperfalse=FALSE
+    local capno=No
+    local lowno=no
+    local upperno=NO
+
+    for default in True False; do
+        for name in zero capfalse lowfalse upperfalse capno lowno upperno; do
+            local msg="trueorfalse($default $name)"
+            assert_equal "False" $(trueorfalse $default $name) "$msg"
         done
     done
 }
 
-test_truefalse
+test_trueorfalse
 
 report_results
diff --git a/tests/unittest.sh b/tests/unittest.sh
index 69f19b7..93aa5fc 100644
--- a/tests/unittest.sh
+++ b/tests/unittest.sh
@@ -17,6 +17,8 @@
 PASS=0
 FAILED_FUNCS=""
 
+# pass a test, printing out MSG
+#  usage: passed message
 function passed {
     local lineno=$(caller 0 | awk '{print $1}')
     local function=$(caller 0 | awk '{print $2}')
@@ -25,9 +27,11 @@
         msg="OK"
     fi
     PASS=$((PASS+1))
-    echo $function:L$lineno $msg
+    echo "PASS: $function:L$lineno $msg"
 }
 
+# fail a test, printing out MSG
+#  usage: failed message
 function failed {
     local lineno=$(caller 0 | awk '{print $1}')
     local function=$(caller 0 | awk '{print $2}')
@@ -38,10 +42,16 @@
     ERROR=$((ERROR+1))
 }
 
+# assert string comparision of val1 equal val2, printing out msg
+#  usage: assert_equal val1 val2 msg
 function assert_equal {
     local lineno=`caller 0 | awk '{print $1}'`
     local function=`caller 0 | awk '{print $2}'`
     local msg=$3
+
+    if [ -z "$msg" ]; then
+        msg="OK"
+    fi
     if [[ "$1" != "$2" ]]; then
         FAILED_FUNCS+="$function:L$lineno\n"
         echo "ERROR: $1 != $2 in $function:L$lineno!"
@@ -49,10 +59,13 @@
         ERROR=$((ERROR+1))
     else
         PASS=$((PASS+1))
-        echo "$function:L$lineno - ok"
+        echo "PASS: $function:L$lineno - $msg"
     fi
 }
 
+# print a summary of passing and failing tests, exiting
+# with an error if we have failed tests
+#  usage: report_results
 function report_results {
     echo "$PASS Tests PASSED"
     if [[ $ERROR -gt 1 ]]; then
diff --git a/tools/build_wheels.sh b/tools/build_wheels.sh
index c57568f..14c2999 100755
--- a/tools/build_wheels.sh
+++ b/tools/build_wheels.sh
@@ -60,6 +60,18 @@
 # Install modern pip and wheel
 PIP_VIRTUAL_ENV=$TMP_VENV_PATH pip_install -U pip wheel
 
+# BUG: cffi has a lot of issues. It has no stable ABI, if installed
+# code is built with a different ABI than the one that's detected at
+# load time, it tries to compile on the fly for the new ABI in the
+# install location (which will probably be /usr and not
+# writable). Also cffi is often included via setup_requires by
+# packages, which have different install rules (allowing betas) than
+# pip has.
+#
+# Because of this we must pip install cffi into the venv to build
+# wheels.
+PIP_VIRTUAL_ENV=$TMP_VENV_PATH pip_install_gr cffi
+
 # ``VENV_PACKAGES`` is a list of packages we want to pre-install
 VENV_PACKAGE_FILE=$FILES/venv-requirements.txt
 if [[ -r $VENV_PACKAGE_FILE ]]; then
diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh
index 2efb4e0..31258d1 100755
--- a/tools/fixup_stuff.sh
+++ b/tools/fixup_stuff.sh
@@ -109,19 +109,28 @@
     fi
 
     FORCE_FIREWALLD=$(trueorfalse False $FORCE_FIREWALLD)
-    if [[ ${DISTRO} =~ (f20) && $FORCE_FIREWALLD == "False" ]]; then
+    if [[ $FORCE_FIREWALLD == "False" ]]; then
         # On Fedora 20 firewalld interacts badly with libvirt and
-        # slows things down significantly.  However, for those cases
-        # where that combination is desired, allow this fix to be skipped.
-
-        # There was also an additional issue with firewalld hanging
-        # after install of libvirt with polkit.  See
-        # https://bugzilla.redhat.com/show_bug.cgi?id=1099031
+        # slows things down significantly (this issue was fixed in
+        # later fedoras).  There was also an additional issue with
+        # firewalld hanging after install of libvirt with polkit [1].
+        # firewalld also causes problems with neturon+ipv6 [2]
+        #
+        # Note we do the same as the RDO packages and stop & disable,
+        # rather than remove.  This is because other packages might
+        # have the dependency [3][4].
+        #
+        # [1] https://bugzilla.redhat.com/show_bug.cgi?id=1099031
+        # [2] https://bugs.launchpad.net/neutron/+bug/1455303
+        # [3] https://github.com/redhat-openstack/openstack-puppet-modules/blob/master/firewall/manifests/linux/redhat.pp
+        # [4] http://docs.openstack.org/developer/devstack/guides/neutron.html
         if is_package_installed firewalld; then
-            uninstall_package firewalld
+            sudo systemctl disable firewalld
+            sudo systemctl enable iptables
+            sudo systemctl stop firewalld
+            sudo systemctl start iptables
         fi
     fi
-
 fi
 
 # The version of pip(1.5.4) supported by python-virtualenv(1.11.4) has
diff --git a/tools/worlddump.py b/tools/worlddump.py
index 8dd455c..d846f10 100755
--- a/tools/worlddump.py
+++ b/tools/worlddump.py
@@ -18,6 +18,7 @@
 
 import argparse
 import datetime
+import fnmatch
 import os
 import os.path
 import sys
@@ -41,12 +42,24 @@
     print "WARN: %s" % msg
 
 
+def _dump_cmd(cmd):
+    print cmd
+    print "-" * len(cmd)
+    print
+    print os.popen(cmd).read()
+
+
+def _header(name):
+    print
+    print name
+    print "=" * len(name)
+    print
+
+
 def disk_space():
     # the df output
-    print """
-File System Summary
-===================
-"""
+    _header("File System Summary")
+
     dfraw = os.popen("df -Ph").read()
     df = [s.split() for s in dfraw.splitlines()]
     for fs in df:
@@ -61,13 +74,36 @@
     print dfraw
 
 
+def iptables_dump():
+    tables = ['filter', 'nat', 'mangle']
+    _header("IP Tables Dump")
+
+    for table in tables:
+        _dump_cmd("sudo iptables --line-numbers -L -nv -t %s" % table)
+
+
+def network_dump():
+    _header("Network Dump")
+
+    _dump_cmd("brctl show")
+    _dump_cmd("arp -n")
+    _dump_cmd("ip addr")
+    _dump_cmd("ip link")
+    _dump_cmd("ip route")
+
+
 def process_list():
-    print """
-Process Listing
-===============
-"""
-    psraw = os.popen("ps axo user,ppid,pid,pcpu,pmem,vsz,rss,tty,stat,start,time,args").read()
-    print psraw
+    _header("Process Listing")
+    _dump_cmd("ps axo "
+              "user,ppid,pid,pcpu,pmem,vsz,rss,tty,stat,start,time,args")
+
+
+def compute_consoles():
+    _header("Compute consoles")
+    for root, dirnames, filenames in os.walk('/opt/stack'):
+        for filename in fnmatch.filter(filenames, 'console.log'):
+            fullpath = os.path.join(root, filename)
+            _dump_cmd("sudo cat %s" % fullpath)
 
 
 def main():
@@ -79,6 +115,9 @@
         os.dup2(f.fileno(), sys.stdout.fileno())
         disk_space()
         process_list()
+        network_dump()
+        iptables_dump()
+        compute_consoles()
 
 
 if __name__ == '__main__':
diff --git a/tools/xen/README.md b/tools/xen/README.md
index c8f47be..61694e9 100644
--- a/tools/xen/README.md
+++ b/tools/xen/README.md
@@ -97,7 +97,7 @@
     # Download a vhd and a uec image
     IMAGE_URLS="\
     https://github.com/downloads/citrix-openstack/warehouse/cirros-0.3.0-x86_64-disk.vhd.tgz,\
-    http://download.cirros-cloud.net/0.3.2/cirros-0.3.2-x86_64-uec.tar.gz"
+    http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-uec.tar.gz"
 
     # Explicitly set virt driver
     VIRT_DRIVER=xenserver
diff --git a/unstack.sh b/unstack.sh
index ed7e617..f0da971 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -173,10 +173,6 @@
     cleanup_neutron
 fi
 
-if is_service_enabled trove; then
-    cleanup_trove
-fi
-
 if is_service_enabled dstat; then
     stop_dstat
 fi