Merge "Allow devstack plugins to specify prereq packages"
diff --git a/.gitignore b/.gitignore
index 67ab722..c6900c8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -14,7 +14,7 @@
 files/*.qcow2
 files/images
 files/pip-*
-files/get-pip.py
+files/get-pip.py*
 local.conf
 local.sh
 localrc
diff --git a/README.md b/README.md
index c5e7f55..53de970 100644
--- a/README.md
+++ b/README.md
@@ -249,14 +249,17 @@
     Variable Name                    Notes
     ----------------------------------------------------------------------------
     Q_AGENT                          This specifies which agent to run with the
-                                     ML2 Plugin (either `openvswitch` or `linuxbridge`).
+                                     ML2 Plugin (Typically either `openvswitch`
+                                     or `linuxbridge`).
+                                     Defaults to `openvswitch`.
     Q_ML2_PLUGIN_MECHANISM_DRIVERS   The ML2 MechanismDrivers to load. The default
-                                     is none. Note, ML2 will work with the OVS
-                                     and LinuxBridge agents by default.
+                                     is `openvswitch,linuxbridge`.
     Q_ML2_PLUGIN_TYPE_DRIVERS        The ML2 TypeDrivers to load. Defaults to
                                      all available TypeDrivers.
-    Q_ML2_PLUGIN_GRE_TYPE_OPTIONS    GRE TypeDriver options. Defaults to none.
-    Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS  VXLAN TypeDriver options. Defaults to none.
+    Q_ML2_PLUGIN_GRE_TYPE_OPTIONS    GRE TypeDriver options. Defaults to
+                                     `tunnel_id_ranges=1:1000'.
+    Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS  VXLAN TypeDriver options. Defaults to
+                                     `vni_ranges=1001:2000`
     Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS   VLAN TypeDriver options. Defaults to none.
 
 # Heat
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index fe3e2c2..7d06658 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -378,6 +378,18 @@
       can be configured with any valid IPv6 prefix. The default values make
       use of an auto-generated ``IPV6_GLOBAL_ID`` to comply with RFC 4193.*
 
+Unit tests dependencies install
+-------------------------------
+
+    | *Default: ``INSTALL_TESTONLY_PACKAGES=False``*
+    |  In order to be able to run unit tests with script ``run_test.sh``,
+       the required package dependencies need to be installed.
+       Setting this option as below does the work.
+
+    ::
+
+        INSTALL_TESTONLY_PACKAGES=True
+
 Examples
 ========
 
diff --git a/doc/source/guides/devstack-with-nested-kvm.rst b/doc/source/guides/devstack-with-nested-kvm.rst
index 58ec3d3..610300b 100644
--- a/doc/source/guides/devstack-with-nested-kvm.rst
+++ b/doc/source/guides/devstack-with-nested-kvm.rst
@@ -129,7 +129,7 @@
     LIBVIRT_TYPE=kvm
 
 
-Once DevStack is configured succesfully, verify if the Nova instances
+Once DevStack is configured successfully, verify if the Nova instances
 are using KVM by noticing the QEMU CLI invoked by Nova is using the
 parameter `accel=kvm`, e.g.:
 
diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst
index 70287a9..236ece9 100644
--- a/doc/source/guides/single-machine.rst
+++ b/doc/source/guides/single-machine.rst
@@ -67,7 +67,7 @@
 
 ::
 
-    sudo apt-get install git -y || yum install -y git
+    sudo apt-get install git -y || sudo yum install -y git
     git clone https://git.openstack.org/openstack-dev/devstack
     cd devstack
 
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 10f4355..bac593d 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -156,9 +156,7 @@
 * `lib/ceilometer <lib/ceilometer.html>`__
 * `lib/ceph <lib/ceph.html>`__
 * `lib/cinder <lib/cinder.html>`__
-* `lib/config <lib/config.html>`__
 * `lib/database <lib/database.html>`__
-* `lib/dib <lib/dib.html>`__
 * `lib/dstat <lib/dstat.html>`__
 * `lib/glance <lib/glance.html>`__
 * `lib/heat <lib/heat.html>`__
@@ -181,7 +179,6 @@
 * `clean.sh <clean.sh.html>`__
 * `run\_tests.sh <run_tests.sh.html>`__
 
-* `extras.d/40-dib.sh <extras.d/40-dib.sh.html>`__
 * `extras.d/50-ironic.sh <extras.d/50-ironic.sh.html>`__
 * `extras.d/60-ceph.sh <extras.d/60-ceph.sh.html>`__
 * `extras.d/70-sahara.sh <extras.d/70-sahara.sh.html>`__
@@ -190,6 +187,12 @@
 * `extras.d/70-zaqar.sh <extras.d/70-zaqar.sh.html>`__
 * `extras.d/80-tempest.sh <extras.d/80-tempest.sh.html>`__
 
+* `inc/ini-config <inc/ini-config.html>`__
+* `inc/meta-config <inc/meta-config.html>`__
+* `inc/python <inc/python.html>`__
+
+* `pkg/elasticsearch.sh <pkg/elasticsearch.sh.html>`_
+
 Configuration
 -------------
 
diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst
index a9153df..5a61063 100644
--- a/doc/source/plugins.rst
+++ b/doc/source/plugins.rst
@@ -136,6 +136,31 @@
 
   enable_plugin ec2api git://git.openstack.org/stackforge/ec2api
 
+Plugins for gate jobs
+---------------------
+
+All OpenStack plugins that wish to be used as gate jobs need to exist
+in OpenStack's gerrit. Both ``openstack`` namespace and ``stackforge``
+namespace are fine. This allows testing of the plugin as well as
+provides network isolation against upstream git repository failures
+(which we see often enough to be an issue).
+
+Ideally plugins will be implemented as ``devstack`` directory inside
+the project they are testing. For example, the stackforge/ec2-api
+project has it's pluggin support in it's tree.
+
+In the cases where there is no "project tree" per say (like
+integrating a backend storage configuration such as ceph or glusterfs)
+it's also allowed to build a dedicated
+``stackforge/devstack-plugin-FOO`` project to house the plugin.
+
+Note jobs must not require cloning of repositories during tests.
+Tests must list their repository in the ``PROJECTS`` variable for
+`devstack-gate
+<https://git.openstack.org/cgit/openstack-infra/devstack-gate/tree/devstack-vm-gate-wrap.sh>`_
+for the repository to be available to the test.  Further information
+is provided in the project creator's guide.
+
 Hypervisor
 ==========
 
diff --git a/exercises/horizon.sh b/exercises/horizon.sh
deleted file mode 100755
index 4020580..0000000
--- a/exercises/horizon.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-
-# **horizon.sh**
-
-# Sanity check that horizon started if enabled
-
-echo "*********************************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "*********************************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-set -o errexit
-
-# Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following allowing as the install occurs.
-set -o xtrace
-
-
-# Settings
-# ========
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Import configuration
-source $TOP_DIR/openrc
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-is_service_enabled horizon || exit 55
-
-# can we get the front page
-$CURL_GET http://$SERVICE_HOST 2>/dev/null | grep -q '<h3.*>Log In</h3>' || die $LINENO "Horizon front page not functioning!"
-
-set +o xtrace
-echo "*********************************************************************"
-echo "SUCCESS: End DevStack Exercise: $0"
-echo "*********************************************************************"
-
diff --git a/extras.d/40-dib.sh b/extras.d/40-dib.sh
deleted file mode 100644
index fdae011..0000000
--- a/extras.d/40-dib.sh
+++ /dev/null
@@ -1,27 +0,0 @@
-# dib.sh - Devstack extras script to install diskimage-builder
-
-if is_service_enabled dib; then
-    if [[ "$1" == "source" ]]; then
-        # Initial source
-        source $TOP_DIR/lib/dib
-    elif [[ "$1" == "stack" && "$2" == "install" ]]; then
-        echo_summary "Installing diskimage-builder"
-        install_dib
-    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
-        # no-op
-        :
-    elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
-        # no-op
-        :
-    fi
-
-    if [[ "$1" == "unstack" ]]; then
-        # no-op
-        :
-    fi
-
-    if [[ "$1" == "clean" ]]; then
-        # no-op
-        :
-    fi
-fi
diff --git a/files/apache-dib-pip-repo.template b/files/apache-dib-pip-repo.template
deleted file mode 100644
index 5d2379b..0000000
--- a/files/apache-dib-pip-repo.template
+++ /dev/null
@@ -1,15 +0,0 @@
-Listen %DIB_PIP_REPO_PORT%
-
-<VirtualHost *:%DIB_PIP_REPO_PORT%>
-    DocumentRoot %DIB_PIP_REPO%
-    <Directory %DIB_PIP_REPO%>
-        DirectoryIndex index.html
-        Require all granted
-        Order allow,deny
-        allow from all
-    </Directory>
-
-    ErrorLog /var/log/%APACHE_NAME%/dib_pip_repo_error.log
-    LogLevel warn
-    CustomLog /var/log/%APACHE_NAME%/dib_pip_repo_access.log combined
-</VirtualHost>
diff --git a/files/apache-heat-pip-repo.template b/files/apache-heat-pip-repo.template
new file mode 100644
index 0000000..d88ac3e
--- /dev/null
+++ b/files/apache-heat-pip-repo.template
@@ -0,0 +1,15 @@
+Listen %HEAT_PIP_REPO_PORT%
+
+<VirtualHost *:%HEAT_PIP_REPO_PORT%>
+    DocumentRoot %HEAT_PIP_REPO%
+    <Directory %HEAT_PIP_REPO%>
+        DirectoryIndex index.html
+        Require all granted
+        Order allow,deny
+        allow from all
+    </Directory>
+
+    ErrorLog /var/log/%APACHE_NAME%/heat_pip_repo_error.log
+    LogLevel warn
+    CustomLog /var/log/%APACHE_NAME%/heat_pip_repo_access.log combined
+</VirtualHost>
diff --git a/files/debs/trema b/files/debs/trema
deleted file mode 100644
index f685ca5..0000000
--- a/files/debs/trema
+++ /dev/null
@@ -1,15 +0,0 @@
-# Trema
-make
-ruby1.8
-rubygems1.8
-ruby1.8-dev
-libpcap-dev
-libsqlite3-dev
-libglib2.0-dev
-
-# Sliceable Switch
-sqlite3
-libdbi-perl
-libdbd-sqlite3-perl
-apache2
-libjson-perl
diff --git a/files/rpms-suse/general b/files/rpms-suse/general
index 7f4bbfb..63cf14b 100644
--- a/files/rpms-suse/general
+++ b/files/rpms-suse/general
@@ -16,7 +16,6 @@
 psmisc
 python-cmd2 # dist:opensuse-12.3
 python-pylint
-python-unittest2
 screen
 tar
 tcpdump
diff --git a/files/rpms/general b/files/rpms/general
index 56a9331..eac4ec3 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -14,8 +14,6 @@
 libxslt-devel
 psmisc
 pylint
-python-unittest2
-python-virtualenv
 python-devel
 screen
 tar
diff --git a/files/rpms/swift b/files/rpms/swift
index 0fcdb0f..5789a19 100644
--- a/files/rpms/swift
+++ b/files/rpms/swift
@@ -13,3 +13,4 @@
 sqlite
 xfsprogs
 xinetd
+rsync-daemon # dist:f22,f23
diff --git a/files/rpms/zaqar-server b/files/rpms/zaqar-server
index 69e8bfa..541cefa 100644
--- a/files/rpms/zaqar-server
+++ b/files/rpms/zaqar-server
@@ -1,4 +1,5 @@
 selinux-policy-targeted
+mongodb
 mongodb-server
 pymongo
 redis # NOPRIME
diff --git a/files/venv-requirements.txt b/files/venv-requirements.txt
index 3c50061..e473a2f 100644
--- a/files/venv-requirements.txt
+++ b/files/venv-requirements.txt
@@ -1,7 +1,8 @@
+cryptography
 lxml
 MySQL-python
 netifaces
-numpy
+#numpy    # slowest wheel by far, stop building until we are actually using the output
 posix-ipc
 psycopg2
 pycrypto
diff --git a/functions b/functions
index 79b2b37..9adbfe7 100644
--- a/functions
+++ b/functions
@@ -13,6 +13,7 @@
 # Include the common functions
 FUNC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd)
 source ${FUNC_DIR}/functions-common
+source ${FUNC_DIR}/inc/ini-config
 source ${FUNC_DIR}/inc/python
 
 # Save trace setting
diff --git a/functions-common b/functions-common
index e791ad7..3dae814 100644
--- a/functions-common
+++ b/functions-common
@@ -43,197 +43,6 @@
 
 TRACK_DEPENDS=${TRACK_DEPENDS:-False}
 
-# Config Functions
-# ================
-
-# Append a new option in an ini file without replacing the old value
-# iniadd config-file section option value1 value2 value3 ...
-function iniadd {
-    local xtrace=$(set +o | grep xtrace)
-    set +o xtrace
-    local file=$1
-    local section=$2
-    local option=$3
-    shift 3
-
-    local values="$(iniget_multiline $file $section $option) $@"
-    iniset_multiline $file $section $option $values
-    $xtrace
-}
-
-# Comment an option in an INI file
-# inicomment config-file section option
-function inicomment {
-    local xtrace=$(set +o | grep xtrace)
-    set +o xtrace
-    local file=$1
-    local section=$2
-    local option=$3
-
-    sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file"
-    $xtrace
-}
-
-# Get an option from an INI file
-# iniget config-file section option
-function iniget {
-    local xtrace=$(set +o | grep xtrace)
-    set +o xtrace
-    local file=$1
-    local section=$2
-    local option=$3
-    local line
-
-    line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file")
-    echo ${line#*=}
-    $xtrace
-}
-
-# Get a multiple line option from an INI file
-# iniget_multiline config-file section option
-function iniget_multiline {
-    local xtrace=$(set +o | grep xtrace)
-    set +o xtrace
-    local file=$1
-    local section=$2
-    local option=$3
-    local values
-
-    values=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { s/^$option[ \t]*=[ \t]*//gp; }" "$file")
-    echo ${values}
-    $xtrace
-}
-
-# Determinate is the given option present in the INI file
-# ini_has_option config-file section option
-function ini_has_option {
-    local xtrace=$(set +o | grep xtrace)
-    set +o xtrace
-    local file=$1
-    local section=$2
-    local option=$3
-    local line
-
-    line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file")
-    $xtrace
-    [ -n "$line" ]
-}
-
-# Add another config line for a multi-line option.
-# It's normally called after iniset of the same option and assumes
-# that the section already exists.
-#
-# Note that iniset_multiline requires all the 'lines' to be supplied
-# in the argument list. Doing that will cause incorrect configuration
-# if spaces are used in the config values.
-#
-# iniadd_literal config-file section option value
-function iniadd_literal {
-    local xtrace=$(set +o | grep xtrace)
-    set +o xtrace
-    local file=$1
-    local section=$2
-    local option=$3
-    local value=$4
-
-    [[ -z $section || -z $option ]] && return
-
-    # Add it
-    sed -i -e "/^\[$section\]/ a\\
-$option = $value
-" "$file"
-
-    $xtrace
-}
-
-function inidelete {
-    local xtrace=$(set +o | grep xtrace)
-    set +o xtrace
-    local file=$1
-    local section=$2
-    local option=$3
-
-    [[ -z $section || -z $option ]] && return
-
-    # Remove old values
-    sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file"
-
-    $xtrace
-}
-
-# Set an option in an INI file
-# iniset config-file section option value
-function iniset {
-    local xtrace=$(set +o | grep xtrace)
-    set +o xtrace
-    local file=$1
-    local section=$2
-    local option=$3
-    local value=$4
-
-    [[ -z $section || -z $option ]] && return
-
-    if ! grep -q "^\[$section\]" "$file" 2>/dev/null; then
-        # Add section at the end
-        echo -e "\n[$section]" >>"$file"
-    fi
-    if ! ini_has_option "$file" "$section" "$option"; then
-        # Add it
-        sed -i -e "/^\[$section\]/ a\\
-$option = $value
-" "$file"
-    else
-        local sep=$(echo -ne "\x01")
-        # Replace it
-        sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file"
-    fi
-    $xtrace
-}
-
-# Set a multiple line option in an INI file
-# iniset_multiline config-file section option value1 value2 valu3 ...
-function iniset_multiline {
-    local xtrace=$(set +o | grep xtrace)
-    set +o xtrace
-    local file=$1
-    local section=$2
-    local option=$3
-
-    shift 3
-    local values
-    for v in $@; do
-        # The later sed command inserts each new value in the line next to
-        # the section identifier, which causes the values to be inserted in
-        # the reverse order. Do a reverse here to keep the original order.
-        values="$v ${values}"
-    done
-    if ! grep -q "^\[$section\]" "$file"; then
-        # Add section at the end
-        echo -e "\n[$section]" >>"$file"
-    else
-        # Remove old values
-        sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file"
-    fi
-    # Add new ones
-    for v in $values; do
-        sed -i -e "/^\[$section\]/ a\\
-$option = $v
-" "$file"
-    done
-    $xtrace
-}
-
-# Uncomment an option in an INI file
-# iniuncomment config-file section option
-function iniuncomment {
-    local xtrace=$(set +o | grep xtrace)
-    set +o xtrace
-    local file=$1
-    local section=$2
-    local option=$3
-    sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" "$file"
-    $xtrace
-}
 
 # Normalize config values to True or False
 # Accepts as False: 0 no No NO false False FALSE
@@ -253,14 +62,6 @@
     $xtrace
 }
 
-function isset {
-    nounset=$(set +o | grep nounset)
-    set +o nounset
-    [[ -n "${!1+x}" ]]
-    result=$?
-    $nounset
-    return $result
-}
 
 # Control Functions
 # =================
@@ -741,11 +542,11 @@
     local host_ip_iface=$3
     local host_ip=$4
 
-    # Find the interface used for the default route
-    host_ip_iface=${host_ip_iface:-$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }' | head -1)}
     # Search for an IP unless an explicit is set by ``HOST_IP`` environment variable
     if [ -z "$host_ip" -o "$host_ip" == "dhcp" ]; then
         host_ip=""
+        # Find the interface used for the default route
+        host_ip_iface=${host_ip_iface:-$(ip route | awk '/default/ {print $5}' | head -1)}
         local host_ips=$(LC_ALL=C ip -f inet addr show ${host_ip_iface} | awk '/inet/ {split($2,parts,"/");  print parts[1]}')
         local ip
         for ip in $host_ips; do
diff --git a/inc/ini-config b/inc/ini-config
new file mode 100644
index 0000000..0d6d169
--- /dev/null
+++ b/inc/ini-config
@@ -0,0 +1,223 @@
+#!/bin/bash
+#
+# **inc/ini-config** - Configuration/INI functions
+#
+# Support for manipulating INI-style configuration files
+#
+# These functions have no external dependencies and no side-effects
+
+# Save trace setting
+INC_CONF_TRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Config Functions
+# ================
+
+# Append a new option in an ini file without replacing the old value
+# iniadd config-file section option value1 value2 value3 ...
+function iniadd {
+    local xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+    local file=$1
+    local section=$2
+    local option=$3
+    shift 3
+
+    local values="$(iniget_multiline $file $section $option) $@"
+    iniset_multiline $file $section $option $values
+    $xtrace
+}
+
+# Comment an option in an INI file
+# inicomment config-file section option
+function inicomment {
+    local xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+    local file=$1
+    local section=$2
+    local option=$3
+
+    sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file"
+    $xtrace
+}
+
+# Get an option from an INI file
+# iniget config-file section option
+function iniget {
+    local xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+    local file=$1
+    local section=$2
+    local option=$3
+    local line
+
+    line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file")
+    echo ${line#*=}
+    $xtrace
+}
+
+# Get a multiple line option from an INI file
+# iniget_multiline config-file section option
+function iniget_multiline {
+    local xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+    local file=$1
+    local section=$2
+    local option=$3
+    local values
+
+    values=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { s/^$option[ \t]*=[ \t]*//gp; }" "$file")
+    echo ${values}
+    $xtrace
+}
+
+# Determinate is the given option present in the INI file
+# ini_has_option config-file section option
+function ini_has_option {
+    local xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+    local file=$1
+    local section=$2
+    local option=$3
+    local line
+
+    line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file")
+    $xtrace
+    [ -n "$line" ]
+}
+
+# Add another config line for a multi-line option.
+# It's normally called after iniset of the same option and assumes
+# that the section already exists.
+#
+# Note that iniset_multiline requires all the 'lines' to be supplied
+# in the argument list. Doing that will cause incorrect configuration
+# if spaces are used in the config values.
+#
+# iniadd_literal config-file section option value
+function iniadd_literal {
+    local xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+    local file=$1
+    local section=$2
+    local option=$3
+    local value=$4
+
+    [[ -z $section || -z $option ]] && return
+
+    # Add it
+    sed -i -e "/^\[$section\]/ a\\
+$option = $value
+" "$file"
+
+    $xtrace
+}
+
+# Remove an option from an INI file
+# inidelete config-file section option
+function inidelete {
+    local xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+    local file=$1
+    local section=$2
+    local option=$3
+
+    [[ -z $section || -z $option ]] && return
+
+    # Remove old values
+    sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file"
+
+    $xtrace
+}
+
+# Set an option in an INI file
+# iniset config-file section option value
+function iniset {
+    local xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+    local file=$1
+    local section=$2
+    local option=$3
+    local value=$4
+
+    [[ -z $section || -z $option ]] && return
+
+    if ! grep -q "^\[$section\]" "$file" 2>/dev/null; then
+        # Add section at the end
+        echo -e "\n[$section]" >>"$file"
+    fi
+    if ! ini_has_option "$file" "$section" "$option"; then
+        # Add it
+        sed -i -e "/^\[$section\]/ a\\
+$option = $value
+" "$file"
+    else
+        local sep=$(echo -ne "\x01")
+        # Replace it
+        sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file"
+    fi
+    $xtrace
+}
+
+# Set a multiple line option in an INI file
+# iniset_multiline config-file section option value1 value2 valu3 ...
+function iniset_multiline {
+    local xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+    local file=$1
+    local section=$2
+    local option=$3
+
+    shift 3
+    local values
+    for v in $@; do
+        # The later sed command inserts each new value in the line next to
+        # the section identifier, which causes the values to be inserted in
+        # the reverse order. Do a reverse here to keep the original order.
+        values="$v ${values}"
+    done
+    if ! grep -q "^\[$section\]" "$file"; then
+        # Add section at the end
+        echo -e "\n[$section]" >>"$file"
+    else
+        # Remove old values
+        sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file"
+    fi
+    # Add new ones
+    for v in $values; do
+        sed -i -e "/^\[$section\]/ a\\
+$option = $v
+" "$file"
+    done
+    $xtrace
+}
+
+# Uncomment an option in an INI file
+# iniuncomment config-file section option
+function iniuncomment {
+    local xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+    local file=$1
+    local section=$2
+    local option=$3
+    sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" "$file"
+    $xtrace
+}
+
+function isset {
+    nounset=$(set +o | grep nounset)
+    set +o nounset
+    [[ -n "${!1+x}" ]]
+    result=$?
+    $nounset
+    return $result
+}
+
+
+# Restore xtrace
+$INC_CONF_TRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/config b/inc/meta-config
similarity index 96%
rename from lib/config
rename to inc/meta-config
index 31c6fa6..c8789bf 100644
--- a/lib/config
+++ b/inc/meta-config
@@ -1,7 +1,9 @@
 #!/bin/bash
 #
-# lib/config - Configuration file manipulation functions
-
+# **lib/meta-config** - Configuration file manipulation functions
+#
+# Support for DevStack's local.conf meta-config sections
+#
 # These functions have no external dependencies and the following side-effects:
 #
 # CONFIG_AWK_CMD is defined, default is ``awk``
@@ -18,7 +20,7 @@
 # file-name is the destination of the config file
 
 # Save trace setting
-C_XTRACE=$(set +o | grep xtrace)
+INC_META_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -176,7 +178,7 @@
 
 
 # Restore xtrace
-$C_XTRACE
+$INC_META_XTRACE
 
 # Local variables:
 # mode: shell-script
diff --git a/inc/python b/inc/python
index dfc4d63..229c540 100644
--- a/inc/python
+++ b/inc/python
@@ -94,9 +94,10 @@
 
     $xtrace
     $sudo_pip \
-        http_proxy=${http_proxy:-} \
-        https_proxy=${https_proxy:-} \
-        no_proxy=${no_proxy:-} \
+        http_proxy="${http_proxy:-}" \
+        https_proxy="${https_proxy:-}" \
+        no_proxy="${no_proxy:-}" \
+        PIP_FIND_LINKS=$PIP_FIND_LINKS \
         $cmd_pip install \
         $@
 
@@ -108,6 +109,7 @@
                 http_proxy=${http_proxy:-} \
                 https_proxy=${https_proxy:-} \
                 no_proxy=${no_proxy:-} \
+                PIP_FIND_LINKS=$PIP_FIND_LINKS \
                 $cmd_pip install \
                 -r $test_req
         fi
diff --git a/lib/ceilometer b/lib/ceilometer
index 9db0640..a464c52 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -322,6 +322,8 @@
     if use_library_from_git "ceilometermiddleware"; then
         git_clone_by_name "ceilometermiddleware"
         setup_dev_lib "ceilometermiddleware"
+    else
+        pip_install ceilometermiddleware
     fi
 }
 
diff --git a/lib/ceph b/lib/ceph
index a6b8cc8..76747cc 100644
--- a/lib/ceph
+++ b/lib/ceph
@@ -71,7 +71,7 @@
 CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS})
 
 # Connect to an existing Ceph cluster
-REMOTE_CEPH=$(trueorfalse False $REMOTE_CEPH)
+REMOTE_CEPH=$(trueorfalse False REMOTE_CEPH)
 REMOTE_CEPH_ADMIN_KEY_PATH=${REMOTE_CEPH_ADMIN_KEY_PATH:-$CEPH_CONF_DIR/ceph.client.admin.keyring}
 
 
@@ -151,14 +151,14 @@
     if [[ -e ${CEPH_DISK_IMAGE} ]]; then
         sudo rm -f ${CEPH_DISK_IMAGE}
     fi
+
+    # purge ceph config file and keys
+    sudo rm -rf ${CEPH_CONF_DIR}/*
 }
 
 function cleanup_ceph_general {
     undefine_virsh_secret
     uninstall_package ceph ceph-common python-ceph libcephfs1 > /dev/null 2>&1
-
-    # purge ceph config file and keys
-    sudo rm -rf ${CEPH_CONF_DIR}/*
 }
 
 
diff --git a/lib/cinder b/lib/cinder
index 0d157dd..958c7f0 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -228,7 +228,6 @@
     iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
     iniset $CINDER_CONF DEFAULT verbose True
 
-    iniset $CINDER_CONF DEFAULT my_ip "$CINDER_SERVICE_HOST"
     iniset $CINDER_CONF DEFAULT iscsi_helper tgtadm
     iniset $CINDER_CONF DEFAULT sql_connection `database_connection_url cinder`
     iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI
@@ -242,6 +241,8 @@
     # supported.
     iniset $CINDER_CONF DEFAULT enable_v1_api true
 
+    iniset $CINDER_CONF DEFAULT os_region_name "$REGION_NAME"
+
     if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
         local enabled_backends=""
         local default_name=""
@@ -371,15 +372,9 @@
 
     if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
         local be be_name be_type
-        local has_lvm=0
         for be in ${CINDER_ENABLED_BACKENDS//,/ }; do
             be_type=${be%%:*}
             be_name=${be##*:}
-
-            if [[ $be_type == 'lvm' ]]; then
-                has_lvm=1
-            fi
-
             if type init_cinder_backend_${be_type} >/dev/null 2>&1; then
                 # Always init the default volume group for lvm.
                 if [[ "$be_type" == "lvm" ]]; then
@@ -390,17 +385,6 @@
         done
     fi
 
-    # Keep it simple, set a marker if there's an LVM backend
-    # use the created VG's to setup lvm filters
-    if [[ $has_lvm == 1 ]]; then
-        # Order matters here, not only obviously to make
-        # sure the VG's are created, but also some distros
-        # do some customizations to lvm.conf on init, we
-        # want to make sure we copy those over
-        sudo cp /etc/lvm/lvm.conf /etc/cinder/lvm.conf
-        configure_cinder_backend_conf_lvm
-    fi
-
     mkdir -p $CINDER_STATE_PATH/volumes
     create_cinder_cache_dir
 }
diff --git a/lib/cinder_backends/lvm b/lib/cinder_backends/lvm
index 52fc6fb..f210578 100644
--- a/lib/cinder_backends/lvm
+++ b/lib/cinder_backends/lvm
@@ -19,7 +19,6 @@
 # clean_cinder_backend_lvm - called from clean_cinder()
 # configure_cinder_backend_lvm - called from configure_cinder()
 # init_cinder_backend_lvm - called from init_cinder()
-# configure_cinder_backend_conf_lvm - called from configure_cinder()
 
 
 # Save trace setting
@@ -66,36 +65,6 @@
     init_lvm_volume_group $VOLUME_GROUP_NAME-$be_name $VOLUME_BACKING_FILE_SIZE
 }
 
-# configure_cinder_backend_conf_lvm - Sets device filter in /etc/cinder/lvm.conf
-# init_cinder_backend_lvm
-function configure_cinder_backend_conf_lvm {
-    local filter_suffix='"r/.*/" ]'
-    local filter_string="filter = [ "
-    local conf_entries=$(grep volume_group /etc/cinder/cinder.conf | sed "s/ //g")
-    local pv
-    local vg
-    local line
-
-    for pv_info in $(sudo pvs --noheadings -o name,vg_name --separator ';'); do
-        echo_summary "Evaluate PV info for Cinder lvm.conf: $pv_info"
-        IFS=';' read pv vg <<< "$pv_info"
-        for line in ${conf_entries}; do
-            IFS='=' read label group <<< "$line"
-            group=$(echo $group|sed "s/^ *//g")
-            if [[ "$vg" == "$group" ]]; then
-                new="\"a$pv/\", "
-                filter_string=$filter_string$new
-            fi
-        done
-    done
-    filter_string=$filter_string$filter_suffix
-
-    # FIXME(jdg): Possible odd case that the lvm.conf file has been modified
-    # and doesn't have a filter entry to search/replace.  For devstack don't
-    # know that we care, but could consider adding a check and add
-    sudo sed -i "s#^[ \t]*filter.*#    $filter_string#g" /etc/cinder/lvm.conf
-    echo "set LVM filter_strings: $filter_string"
-}
 # Restore xtrace
 $MY_XTRACE
 
diff --git a/lib/dib b/lib/dib
deleted file mode 100644
index 88d9fd8..0000000
--- a/lib/dib
+++ /dev/null
@@ -1,149 +0,0 @@
-#!/bin/bash
-#
-# lib/dib
-# Install and build images with **diskimage-builder**
-
-# Dependencies:
-#
-# - functions
-# - DEST, DATA_DIR must be defined
-
-# stack.sh
-# ---------
-# - install_dib
-
-# Save trace setting
-XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-# Defaults
-# --------
-
-# set up default directories
-DIB_DIR=$DEST/diskimage-builder
-TIE_DIR=$DEST/tripleo-image-elements
-
-# NOTE: Setting DIB_APT_SOURCES assumes you will be building
-# Debian/Ubuntu based images. Leave unset for other flavors.
-DIB_APT_SOURCES=${DIB_APT_SOURCES:-""}
-DIB_BUILD_OFFLINE=$(trueorfalse False DIB_BUILD_OFFLINE)
-DIB_IMAGE_CACHE=$DATA_DIR/diskimage-builder/image-create
-DIB_PIP_REPO=$DATA_DIR/diskimage-builder/pip-repo
-DIB_PIP_REPO_PORT=${DIB_PIP_REPO_PORT:-8899}
-
-OCC_DIR=$DEST/os-collect-config
-ORC_DIR=$DEST/os-refresh-config
-OAC_DIR=$DEST/os-apply-config
-
-# Functions
-# ---------
-
-# install_dib() - Collect source and prepare
-function install_dib {
-    pip_install diskimage-builder
-
-    git_clone $TIE_REPO $TIE_DIR $TIE_BRANCH
-    git_clone $OCC_REPO $OCC_DIR $OCC_BRANCH
-    git_clone $ORC_REPO $ORC_DIR $ORC_BRANCH
-    git_clone $OAC_REPO $OAC_DIR $OAC_BRANCH
-    mkdir -p $DIB_IMAGE_CACHE
-}
-
-# build_dib_pip_repo() - Builds a local pip repo from local projects
-function build_dib_pip_repo {
-    local project_dirs=$1
-    local projpath proj package
-
-    rm -rf $DIB_PIP_REPO
-    mkdir -p $DIB_PIP_REPO
-
-    echo "<html><body>" > $DIB_PIP_REPO/index.html
-    for projpath in $project_dirs; do
-        proj=$(basename $projpath)
-        mkdir -p $DIB_PIP_REPO/$proj
-        pushd $projpath
-        rm -rf dist
-        python setup.py sdist
-        pushd dist
-        package=$(ls *)
-        mv $package $DIB_PIP_REPO/$proj/$package
-        popd
-
-        echo "<html><body><a href=\"$package\">$package</a></body></html>" > $DIB_PIP_REPO/$proj/index.html
-        echo "<a href=\"$proj\">$proj</a><br/>" >> $DIB_PIP_REPO/index.html
-
-        popd
-    done
-
-    echo "</body></html>" >> $DIB_PIP_REPO/index.html
-
-    local dib_pip_repo_apache_conf=$(apache_site_config_for dib_pip_repo)
-
-    sudo cp $FILES/apache-dib-pip-repo.template $dib_pip_repo_apache_conf
-    sudo sed -e "
-        s|%DIB_PIP_REPO%|$DIB_PIP_REPO|g;
-        s|%DIB_PIP_REPO_PORT%|$DIB_PIP_REPO_PORT|g;
-        s|%APACHE_NAME%|$APACHE_NAME|g;
-    " -i $dib_pip_repo_apache_conf
-    enable_apache_site dib_pip_repo
-}
-
-# disk_image_create_upload() - Creates and uploads a diskimage-builder built image
-function disk_image_create_upload {
-
-    local image_name=$1
-    local image_elements=$2
-    local elements_path=$3
-
-    local image_path=$TOP_DIR/files/$image_name.qcow2
-
-    # Include the apt-sources element in builds if we have an
-    # alternative sources.list specified.
-    if [ -n "$DIB_APT_SOURCES" ]; then
-        if [ ! -e "$DIB_APT_SOURCES" ]; then
-            die $LINENO "DIB_APT_SOURCES set but not found at $DIB_APT_SOURCES"
-        fi
-        local extra_elements="apt-sources"
-    fi
-
-    # Set the local pip repo as the primary index mirror so the
-    # image is built with local packages
-    local pypi_mirror_url=http://$SERVICE_HOST:$DIB_PIP_REPO_PORT/
-    local pypi_mirror_url_1
-
-    if [ -a $HOME/.pip/pip.conf ]; then
-        # Add the current pip.conf index-url as an extra-index-url
-        # in the image build
-        pypi_mirror_url_1=$(iniget $HOME/.pip/pip.conf global index-url)
-    else
-        # If no pip.conf, set upstream pypi as an extra mirror
-        # (this also sets the .pydistutils.cfg index-url)
-        pypi_mirror_url_1=http://pypi.python.org/simple
-    fi
-
-    # The disk-image-create command to run
-    ELEMENTS_PATH=$elements_path \
-    DIB_APT_SOURCES=$DIB_APT_SOURCES \
-    DIB_OFFLINE=$DIB_BUILD_OFFLINE \
-    PYPI_MIRROR_URL=$pypi_mirror_url \
-    PYPI_MIRROR_URL_1=$pypi_mirror_url_1 \
-    disk-image-create -a amd64 $image_elements ${extra_elements:-} \
-        --image-cache $DIB_IMAGE_CACHE \
-        -o $image_path
-
-    local token=$(keystone token-get | grep ' id ' | get_field 2)
-    die_if_not_set $LINENO token "Keystone fail to get token"
-
-    glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT \
-        image-create --name $image_name --is-public True \
-        --container-format=bare --disk-format qcow2 \
-        < $image_path
-}
-
-# Restore xtrace
-$XTRACE
-
-# Tell emacs to use shell-script-mode
-## Local variables:
-## mode: shell-script
-## End:
diff --git a/lib/glance b/lib/glance
index eb1df2e..26d7960 100755
--- a/lib/glance
+++ b/lib/glance
@@ -185,8 +185,8 @@
 
     # Format logging
     if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
-        setup_colorized_logging $GLANCE_API_CONF DEFAULT "project_id" "user_id"
-        setup_colorized_logging $GLANCE_REGISTRY_CONF DEFAULT "project_id" "user_id"
+        setup_colorized_logging $GLANCE_API_CONF DEFAULT tenant user
+        setup_colorized_logging $GLANCE_REGISTRY_CONF DEFAULT tenant user
     fi
 
     cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI
diff --git a/lib/heat b/lib/heat
index c102163..cef7069 100644
--- a/lib/heat
+++ b/lib/heat
@@ -8,9 +8,7 @@
 #   ENABLED_SERVICES+=,heat,h-api,h-api-cfn,h-api-cw,h-eng
 
 # Dependencies:
-#
-# - functions
-# - dib (if HEAT_CREATE_TEST_IMAGE=True)
+# (none)
 
 # stack.sh
 # ---------
@@ -37,6 +35,13 @@
 HEAT_DIR=$DEST/heat
 HEAT_CFNTOOLS_DIR=$DEST/heat-cfntools
 HEAT_TEMPLATES_REPO_DIR=$DEST/heat-templates
+OCC_DIR=$DEST/os-collect-config
+ORC_DIR=$DEST/os-refresh-config
+OAC_DIR=$DEST/os-apply-config
+
+HEAT_PIP_REPO=$DATA_DIR/heat-pip-repo
+HEAT_PIP_REPO_PORT=${HEAT_PIP_REPO_PORT:-8899}
+
 HEAT_AUTH_CACHE_DIR=${HEAT_AUTH_CACHE_DIR:-/var/cache/heat}
 HEAT_STANDALONE=$(trueorfalse False HEAT_STANDALONE)
 HEAT_ENABLE_ADOPT_ABANDON=$(trueorfalse False HEAT_ENABLE_ADOPT_ABANDON)
@@ -44,17 +49,19 @@
 HEAT_CONF=$HEAT_CONF_DIR/heat.conf
 HEAT_ENV_DIR=$HEAT_CONF_DIR/environment.d
 HEAT_TEMPLATES_DIR=$HEAT_CONF_DIR/templates
-HEAT_STACK_DOMAIN=$(trueorfalse True HEAT_STACK_DOMAIN)
 HEAT_API_HOST=${HEAT_API_HOST:-$HOST_IP}
 HEAT_API_PORT=${HEAT_API_PORT:-8004}
-HEAT_FUNCTIONAL_IMAGE_ELEMENTS=${HEAT_FUNCTIONAL_IMAGE_ELEMENTS:-\
-vm fedora selinux-permissive pypi  os-collect-config os-refresh-config \
-os-apply-config heat-cfntools heat-config heat-config-cfn-init \
-heat-config-puppet heat-config-script}
 
 
 # other default options
-HEAT_DEFERRED_AUTH=${HEAT_DEFERRED_AUTH:-trusts}
+if [[ "$HEAT_STANDALONE" = "True" ]]; then
+    # for standalone, use defaults which require no service user
+    HEAT_STACK_DOMAIN=`trueorfalse False $HEAT_STACK_DOMAIN`
+    HEAT_DEFERRED_AUTH=${HEAT_DEFERRED_AUTH:-password}
+else
+    HEAT_STACK_DOMAIN=`trueorfalse True $HEAT_STACK_DOMAIN`
+    HEAT_DEFERRED_AUTH=${HEAT_DEFERRED_AUTH:-trusts}
+fi
 
 # Tell Tempest this project is present
 TEMPEST_SERVICES+=,heat
@@ -76,13 +83,11 @@
     sudo rm -rf $HEAT_AUTH_CACHE_DIR
     sudo rm -rf $HEAT_ENV_DIR
     sudo rm -rf $HEAT_TEMPLATES_DIR
+    sudo rm -rf $HEAT_CONF_DIR
 }
 
 # configure_heat() - Set config files, create data dirs, etc
 function configure_heat {
-    if [[ "$HEAT_STANDALONE" = "True" ]]; then
-        setup_develop $HEAT_DIR/contrib/heat_keystoneclient_v2
-    fi
 
     if [[ ! -d $HEAT_CONF_DIR ]]; then
         sudo mkdir -p $HEAT_CONF_DIR
@@ -126,24 +131,22 @@
     # auth plugin setup. This should be fixed in heat.  Heat is also the only
     # service that requires the auth_uri to include a /v2.0. Remove this custom
     # setup when bug #1300246 is resolved.
-    iniset $HEAT_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI
     iniset $HEAT_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_URI/v2.0
-    iniset $HEAT_CONF keystone_authtoken admin_user heat
-    iniset $HEAT_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
-    iniset $HEAT_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
-    iniset $HEAT_CONF keystone_authtoken cafile $SSL_BUNDLE_FILE
-    iniset $HEAT_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR
+    if [[ "$HEAT_STANDALONE" = "True" ]]; then
+        iniset $HEAT_CONF paste_deploy flavor standalone
+        iniset $HEAT_CONF clients_heat url "http://$HEAT_API_HOST:$HEAT_API_PORT/v1/%(tenant_id)s"
+    else
+        iniset $HEAT_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI
+        iniset $HEAT_CONF keystone_authtoken admin_user heat
+        iniset $HEAT_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
+        iniset $HEAT_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
+        iniset $HEAT_CONF keystone_authtoken cafile $SSL_BUNDLE_FILE
+        iniset $HEAT_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR
+    fi
 
     # ec2authtoken
     iniset $HEAT_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_URI/v2.0
 
-    # paste_deploy
-    if [[ "$HEAT_STANDALONE" = "True" ]]; then
-        iniset $HEAT_CONF paste_deploy flavor standalone
-        iniset $HEAT_CONF DEFAULT keystone_backend heat_keystoneclient_v2.client.KeystoneClientV2
-        iniset $HEAT_CONF clients_heat url "http://$HEAT_API_HOST:$HEAT_API_PORT/v1/%(tenant_id)s"
-    fi
-
     # OpenStack API
     iniset $HEAT_CONF heat_api bind_port $HEAT_API_PORT
     iniset $HEAT_CONF heat_api workers "$API_WORKERS"
@@ -242,30 +245,33 @@
 
 # create_heat_accounts() - Set up common required heat accounts
 function create_heat_accounts {
-    create_service_user "heat" "admin"
+    if [[ "$HEAT_STANDALONE" != "True" ]]; then
 
-    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+        create_service_user "heat" "admin"
 
-        local heat_service=$(get_or_create_service "heat" \
-                "orchestration" "Heat Orchestration Service")
-        get_or_create_endpoint $heat_service \
-            "$REGION_NAME" \
-            "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
-            "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
-            "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s"
+        if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
 
-        local heat_cfn_service=$(get_or_create_service "heat-cfn" \
-                "cloudformation" "Heat CloudFormation Service")
-        get_or_create_endpoint $heat_cfn_service \
-            "$REGION_NAME" \
-            "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \
-            "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \
-            "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1"
+            local heat_service=$(get_or_create_service "heat" \
+                    "orchestration" "Heat Orchestration Service")
+            get_or_create_endpoint $heat_service \
+                "$REGION_NAME" \
+                "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
+                "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
+                "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s"
+
+            local heat_cfn_service=$(get_or_create_service "heat-cfn" \
+                    "cloudformation" "Heat CloudFormation Service")
+            get_or_create_endpoint $heat_cfn_service \
+                "$REGION_NAME" \
+                "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \
+                "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \
+                "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1"
+        fi
+
+        # heat_stack_user role is for users created by Heat
+        get_or_create_role "heat_stack_user"
     fi
 
-    # heat_stack_user role is for users created by Heat
-    get_or_create_role "heat_stack_user"
-
     if [[ $HEAT_DEFERRED_AUTH == trusts ]]; then
         iniset $HEAT_CONF DEFAULT deferred_auth_method trusts
     fi
@@ -296,22 +302,44 @@
     fi
 }
 
-# build_heat_functional_test_image() - Build and upload functional test image
-function build_heat_functional_test_image {
-    if is_service_enabled dib; then
-        build_dib_pip_repo "$OCC_DIR $OAC_DIR $ORC_DIR $HEAT_CFNTOOLS_DIR"
-        local image_name=heat-functional-tests-image
+# build_heat_pip_mirror() - Build a pip mirror containing heat agent projects
+function build_heat_pip_mirror {
+    local project_dirs="$OCC_DIR $OAC_DIR $ORC_DIR $HEAT_CFNTOOLS_DIR"
+    local projpath proj package
 
-        # Elements path for tripleo-image-elements and heat-templates software-config
-        local elements_path=$TIE_DIR/elements:$HEAT_TEMPLATES_REPO_DIR/hot/software-config/elements
+    rm -rf $HEAT_PIP_REPO
+    mkdir -p $HEAT_PIP_REPO
 
-        disk_image_create_upload "$image_name" "$HEAT_FUNCTIONAL_IMAGE_ELEMENTS" "$elements_path"
-        iniset $TEMPEST_CONFIG orchestration image_ref $image_name
-    else
-        echo "Error, HEAT_CREATE_TEST_IMAGE=True requires dib" >&2
-        echo "Add \"enable_service dib\" to your localrc" >&2
-        exit 1
-    fi
+    echo "<html><body>" > $HEAT_PIP_REPO/index.html
+    for projpath in $project_dirs; do
+        proj=$(basename $projpath)
+        mkdir -p $HEAT_PIP_REPO/$proj
+        pushd $projpath
+        rm -rf dist
+        python setup.py sdist
+        pushd dist
+        package=$(ls *)
+        mv $package $HEAT_PIP_REPO/$proj/$package
+        popd
+
+        echo "<html><body><a href=\"$package\">$package</a></body></html>" > $HEAT_PIP_REPO/$proj/index.html
+        echo "<a href=\"$proj\">$proj</a><br/>" >> $HEAT_PIP_REPO/index.html
+
+        popd
+    done
+
+    echo "</body></html>" >> $HEAT_PIP_REPO/index.html
+
+    local heat_pip_repo_apache_conf=$(apache_site_config_for heat_pip_repo)
+
+    sudo cp $FILES/apache-heat-pip-repo.template $heat_pip_repo_apache_conf
+    sudo sed -e "
+        s|%HEAT_PIP_REPO%|$HEAT_PIP_REPO|g;
+        s|%HEAT_PIP_REPO_PORT%|$HEAT_PIP_REPO_PORT|g;
+        s|%APACHE_NAME%|$APACHE_NAME|g;
+    " -i $heat_pip_repo_apache_conf
+    enable_apache_site heat_pip_repo
+    restart_apache_server
 }
 
 # Restore xtrace
diff --git a/lib/ironic b/lib/ironic
index bc30cdb..e446d8c 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -63,6 +63,7 @@
 IRONIC_BAREMETAL_BASIC_OPS=$(trueorfalse False IRONIC_BAREMETAL_BASIC_OPS)
 IRONIC_ENABLED_DRIVERS=${IRONIC_ENABLED_DRIVERS:-fake,pxe_ssh,pxe_ipmitool}
 IRONIC_SSH_USERNAME=${IRONIC_SSH_USERNAME:-`whoami`}
+IRONIC_SSH_TIMEOUT=${IRONIC_SSH_TIMEOUT:-15}
 IRONIC_SSH_KEY_DIR=${IRONIC_SSH_KEY_DIR:-$IRONIC_DATA_DIR/ssh_keys}
 IRONIC_SSH_KEY_FILENAME=${IRONIC_SSH_KEY_FILENAME:-ironic_key}
 IRONIC_KEY_FILE=${IRONIC_KEY_FILE:-$IRONIC_SSH_KEY_DIR/$IRONIC_SSH_KEY_FILENAME}
@@ -343,13 +344,24 @@
     iniset $IRONIC_CONF_FILE pxe tftp_server $IRONIC_TFTPSERVER_IP
     iniset $IRONIC_CONF_FILE pxe tftp_root $IRONIC_TFTPBOOT_DIR
     iniset $IRONIC_CONF_FILE pxe tftp_master_path $IRONIC_TFTPBOOT_DIR/master_images
+
+    local pxe_params=""
     if [[ "$IRONIC_VM_LOG_CONSOLE" == "True" ]] ; then
-        local pxe_params="nofb nomodeset vga=normal console=ttyS0"
+        pxe_params+="nofb nomodeset vga=normal console=ttyS0"
         if is_deployed_with_ipa_ramdisk; then
             pxe_params+=" systemd.journald.forward_to_console=yes"
         fi
+    fi
+    # When booting with less than 1GB, we need to switch from default tmpfs
+    # to ramfs for ramdisks to decompress successfully.
+    if (is_ironic_hardware && [[ "$IRONIC_HW_NODE_RAM" -lt 1024 ]]) ||
+        (! is_ironic_hardware && [[ "$IRONIC_VM_SPECS_RAM" -lt 1024 ]]); then
+        pxe_params+=" rootfstype=ramfs"
+    fi
+    if [[ -n "$pxe_params" ]]; then
         iniset $IRONIC_CONF_FILE pxe pxe_append_params "$pxe_params"
     fi
+
     if is_deployed_by_agent; then
         if [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]] ; then
             iniset $IRONIC_CONF_FILE glance swift_temp_url_key $SWIFT_TEMPURL_KEY
@@ -416,6 +428,11 @@
 
 # init_ironic() - Initialize databases, etc.
 function init_ironic {
+    # Save private network as cleaning network
+    local cleaning_network_uuid
+    cleaning_network_uuid=$(neutron net-list | grep private | get_field 1)
+    iniset $IRONIC_CONF_FILE neutron cleaning_network_uuid ${cleaning_network_uuid}
+
     # (Re)create  ironic database
     recreate_database ironic
 
@@ -471,9 +488,8 @@
 
 # stop_ironic() - Stop running processes
 function stop_ironic {
-    # Kill the Ironic screen windows
-    screen -S $SCREEN_NAME -p ir-api -X kill
-    screen -S $SCREEN_NAME -p ir-cond -X kill
+    stop_process ir-api
+    stop_process ir-cond
 
     # Cleanup the WSGI files
     if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then
@@ -616,7 +632,7 @@
             $node_options \
             | grep " uuid " | get_field 2)
 
-        ironic port-create --address $mac_address --node_uuid $node_id
+        ironic port-create --address $mac_address --node $node_id
 
         total_nodes=$((total_nodes+1))
         total_cpus=$((total_cpus+$ironic_node_cpu))
@@ -693,7 +709,7 @@
 
 function configure_ironic_auxiliary {
     configure_ironic_ssh_keypair
-    ironic_ssh_check $IRONIC_KEY_FILE $IRONIC_VM_SSH_ADDRESS $IRONIC_VM_SSH_PORT $IRONIC_SSH_USERNAME 10
+    ironic_ssh_check $IRONIC_KEY_FILE $IRONIC_VM_SSH_ADDRESS $IRONIC_VM_SSH_PORT $IRONIC_SSH_USERNAME $IRONIC_SSH_TIMEOUT
 }
 
 function build_ipa_coreos_ramdisk {
diff --git a/lib/keystone b/lib/keystone
index 0968445..c9433d9 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -226,12 +226,7 @@
         iniset $KEYSTONE_CONF assignment driver "keystone.assignment.backends.$KEYSTONE_ASSIGNMENT_BACKEND.Assignment"
     fi
 
-    # Configure rabbitmq credentials
-    if is_service_enabled rabbit; then
-        iniset $KEYSTONE_CONF DEFAULT rabbit_userid $RABBIT_USERID
-        iniset $KEYSTONE_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
-        iniset $KEYSTONE_CONF DEFAULT rabbit_host $RABBIT_HOST
-    fi
+    iniset_rpc_backend keystone $KEYSTONE_CONF DEFAULT
 
     # Set the URL advertised in the ``versions`` structure returned by the '/' route
     if is_service_enabled tls-proxy; then
diff --git a/lib/lvm b/lib/lvm
index 39eed00..d0322c7 100644
--- a/lib/lvm
+++ b/lib/lvm
@@ -138,6 +138,31 @@
     fi
 }
 
+# set_lvm_filter() Gather all devices configured for LVM and
+# use them to build a global device filter
+# set_lvm_filter() Create a device filter
+# and add to /etc/lvm.conf.  Note this uses
+# all current PV's in use by LVM on the
+# system to build it's filter.
+#
+# Usage: set_lvm_filter()
+function set_lvm_filter {
+    local filter_suffix='"r|.*|" ]'
+    local filter_string="global_filter = [ "
+    local pv
+    local vg
+    local line
+
+    for pv_info in $(sudo pvs --noheadings -o name); do
+        pv=$(echo -e "${pv_info}" | sed 's/ //g' | sed 's/\/dev\///g')
+        new="\"a|$pv|\", "
+        filter_string=$filter_string$new
+    done
+    filter_string=$filter_string$filter_suffix
+
+    sudo sed -i "/# global_filter = \[*\]/a\    $global_filter$filter_string" /etc/lvm/lvm.conf
+    echo_summary "set lvm.conf device global_filter to: $filter_string"
+}
 
 # Restore xtrace
 $MY_XTRACE
diff --git a/lib/neutron b/lib/neutron
index a7aabc5..411c696 100755
--- a/lib/neutron
+++ b/lib/neutron
@@ -153,6 +153,7 @@
 # RHEL's support for namespaces requires using veths with ovs
 Q_OVS_USE_VETH=${Q_OVS_USE_VETH:-False}
 Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True}
+Q_USE_ROOTWRAP_DAEMON=$(trueorfalse True Q_USE_ROOTWRAP_DAEMON)
 # Meta data IP
 Q_META_DATA_IP=${Q_META_DATA_IP:-$SERVICE_HOST}
 # Allow Overlapping IP among subnets
@@ -226,6 +227,9 @@
 else
     NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron)
     Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE"
+    if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
+        Q_RR_DAEMON_COMMAND="sudo $NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE"
+    fi
 fi
 
 
@@ -871,7 +875,7 @@
     fi
 
     if is_ssl_enabled_service "nova"; then
-        iniset $NEUTRON_CONF DEFAULT nova_ca_certificates_file "$SSL_BUNDLE_FILE"
+        iniset $NEUTRON_CONF nova cafile $SSL_BUNDLE_FILE
     fi
 
     if is_ssl_enabled_service "neutron"; then
@@ -896,6 +900,9 @@
     iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT debug False
     iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
     iniset $NEUTRON_TEST_CONFIG_FILE agent root_helper "$Q_RR_COMMAND"
+    if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
+        iniset $NEUTRON_TEST_CONFIG_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND"
+    fi
 
     _neutron_setup_interface_driver $NEUTRON_TEST_CONFIG_FILE
 
@@ -910,6 +917,9 @@
     iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
     iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
     iniset $Q_DHCP_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND"
+    if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
+        iniset $NEUTRON_TEST_CONFIG_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND"
+    fi
 
     if ! is_service_enabled q-l3; then
         if [[ "$ENABLE_ISOLATED_METADATA" = "True" ]]; then
@@ -943,6 +953,9 @@
     iniset $Q_L3_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
     iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
     iniset $Q_L3_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND"
+    if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
+        iniset $Q_L3_CONF_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND"
+    fi
 
     _neutron_setup_interface_driver $Q_L3_CONF_FILE
 
@@ -956,6 +969,9 @@
     iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
     iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP
     iniset $Q_META_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND"
+    if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
+        iniset $Q_META_CONF_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND"
+    fi
 
     # Configures keystone for metadata_agent
     # The third argument "True" sets auth_url needed to communicate with keystone
@@ -1008,6 +1024,9 @@
     # Specify the default root helper prior to agent configuration to
     # ensure that an agent's configuration can override the default
     iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_COMMAND"
+    if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE  agent root_helper_daemon "$Q_RR_DAEMON_COMMAND"
+    fi
     iniset $NEUTRON_CONF DEFAULT verbose True
     iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
 
@@ -1045,13 +1064,15 @@
     # Configuration for neutron notifations to nova.
     iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES
     iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES
-    iniset $NEUTRON_CONF DEFAULT nova_url "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2"
-    iniset $NEUTRON_CONF DEFAULT nova_region_name $REGION_NAME
-    iniset $NEUTRON_CONF DEFAULT nova_admin_username nova
-    iniset $NEUTRON_CONF DEFAULT nova_admin_password $SERVICE_PASSWORD
-    ADMIN_TENANT_ID=$(openstack project list | awk "/ service / { print \$2 }")
-    iniset $NEUTRON_CONF DEFAULT nova_admin_tenant_id $ADMIN_TENANT_ID
-    iniset $NEUTRON_CONF DEFAULT nova_admin_auth_url  "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0"
+
+    iniset $NEUTRON_CONF nova auth_plugin password
+    iniset $NEUTRON_CONF nova auth_url $KEYSTONE_AUTH_URI
+    iniset $NEUTRON_CONF nova username nova
+    iniset $NEUTRON_CONF nova password $SERVICE_PASSWORD
+    iniset $NEUTRON_CONF nova user_domain_id default
+    iniset $NEUTRON_CONF nova project_name $SERVICE_TENANT_NAME
+    iniset $NEUTRON_CONF nova project_domain_id default
+    iniset $NEUTRON_CONF nova region_name $REGION_NAME
 
     # Configure plugin
     neutron_plugin_configure_service
@@ -1104,16 +1125,21 @@
     sudo chmod 0644 $Q_RR_CONF_FILE
     # Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap
     ROOTWRAP_SUDOER_CMD="$NEUTRON_ROOTWRAP $Q_RR_CONF_FILE *"
+    ROOTWRAP_DAEMON_SUDOER_CMD="$NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE"
 
     # Set up the rootwrap sudoers for neutron
     TEMPFILE=`mktemp`
     echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE
+    echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_DAEMON_SUDOER_CMD" >>$TEMPFILE
     chmod 0440 $TEMPFILE
     sudo chown root:root $TEMPFILE
     sudo mv $TEMPFILE /etc/sudoers.d/neutron-rootwrap
 
     # Update the root_helper
     iniset $NEUTRON_CONF agent root_helper "$Q_RR_COMMAND"
+    if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
+        iniset $NEUTRON_CONF agent root_helper_daemon "$Q_RR_DAEMON_COMMAND"
+    fi
 }
 
 # Configures keystone integration for neutron service and agents
diff --git a/lib/neutron_plugins/nec b/lib/neutron_plugins/nec
index 3b1a257..9ea7338 100644
--- a/lib/neutron_plugins/nec
+++ b/lib/neutron_plugins/nec
@@ -1,131 +1,10 @@
 #!/bin/bash
-#
-# Neutron NEC OpenFlow plugin
-# ---------------------------
 
-# Save trace setting
-NEC_XTRACE=$(set +o | grep xtrace)
-set +o xtrace
+# This file is needed so Q_PLUGIN=nec will work.
 
-# Configuration parameters
-OFC_HOST=${OFC_HOST:-127.0.0.1}
-OFC_PORT=${OFC_PORT:-8888}
-
-OFC_API_HOST=${OFC_API_HOST:-$OFC_HOST}
-OFC_API_PORT=${OFC_API_PORT:-$OFC_PORT}
-OFC_OFP_HOST=${OFC_OFP_HOST:-$OFC_HOST}
-OFC_OFP_PORT=${OFC_OFP_PORT:-6633}
-OFC_DRIVER=${OFC_DRIVER:-trema}
-OFC_RETRY_MAX=${OFC_RETRY_MAX:-0}
-OFC_RETRY_INTERVAL=${OFC_RETRY_INTERVAL:-1}
-
-# Main logic
-# ---------------------------
-
-source $TOP_DIR/lib/neutron_plugins/ovs_base
-
-function neutron_plugin_create_nova_conf {
-    _neutron_ovs_base_configure_nova_vif_driver
-}
-
-function neutron_plugin_install_agent_packages {
-    # SKIP_OVS_INSTALL is useful when we want to use Open vSwitch whose
-    # version is different from the version provided by the distribution.
-    if [[ "$SKIP_OVS_INSTALL" = "True" ]]; then
-        echo "You need to install Open vSwitch manually."
-        return
-    fi
-    _neutron_ovs_base_install_agent_packages
-}
-
-function neutron_plugin_configure_common {
-    Q_PLUGIN_CONF_PATH=etc/neutron/plugins/nec
-    Q_PLUGIN_CONF_FILENAME=nec.ini
-    Q_PLUGIN_CLASS="neutron.plugins.nec.nec_plugin.NECPluginV2"
-}
-
-function neutron_plugin_configure_debug_command {
-    _neutron_ovs_base_configure_debug_command
-}
-
-function neutron_plugin_configure_dhcp_agent {
-    :
-}
-
-function neutron_plugin_configure_l3_agent {
-    _neutron_ovs_base_configure_l3_agent
-}
-
-function _quantum_plugin_setup_bridge {
-    if [[ "$SKIP_OVS_BRIDGE_SETUP" = "True" ]]; then
-        return
-    fi
-    # Set up integration bridge
-    _neutron_ovs_base_setup_bridge $OVS_BRIDGE
-    # Generate datapath ID from HOST_IP
-    local dpid=$(printf "%07d%03d%03d%03d\n" ${HOST_IP//./ })
-    sudo ovs-vsctl --no-wait set Bridge $OVS_BRIDGE other-config:datapath-id=$dpid
-    sudo ovs-vsctl --no-wait set-fail-mode $OVS_BRIDGE secure
-    sudo ovs-vsctl --no-wait set-controller $OVS_BRIDGE tcp:$OFC_OFP_HOST:$OFC_OFP_PORT
-    if [ -n "$OVS_INTERFACE" ]; then
-        sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_BRIDGE $OVS_INTERFACE
-    fi
-    _neutron_setup_ovs_tunnels $OVS_BRIDGE
-}
-
-function neutron_plugin_configure_plugin_agent {
-    _quantum_plugin_setup_bridge
-
-    AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-nec-agent"
-
-    _neutron_ovs_base_configure_firewall_driver
-}
-
-function neutron_plugin_configure_service {
-    iniset $NEUTRON_CONF DEFAULT api_extensions_path neutron/plugins/nec/extensions/
-    iniset /$Q_PLUGIN_CONF_FILE ofc host $OFC_API_HOST
-    iniset /$Q_PLUGIN_CONF_FILE ofc port $OFC_API_PORT
-    iniset /$Q_PLUGIN_CONF_FILE ofc driver $OFC_DRIVER
-    iniset /$Q_PLUGIN_CONF_FILE ofc api_retry_max OFC_RETRY_MAX
-    iniset /$Q_PLUGIN_CONF_FILE ofc api_retry_interval OFC_RETRY_INTERVAL
-
-    _neutron_ovs_base_configure_firewall_driver
-}
-
-function neutron_plugin_setup_interface_driver {
-    local conf_file=$1
-    iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
-    iniset $conf_file DEFAULT ovs_use_veth True
-}
-
-# Utility functions
-# ---------------------------
-
-# Setup OVS tunnel manually
-function _neutron_setup_ovs_tunnels {
-    local bridge=$1
-    local id=0
-    GRE_LOCAL_IP=${GRE_LOCAL_IP:-$HOST_IP}
-    if [ -n "$GRE_REMOTE_IPS" ]; then
-        for ip in ${GRE_REMOTE_IPS//:/ }; do
-            if [[ "$ip" == "$GRE_LOCAL_IP" ]]; then
-                continue
-            fi
-            sudo ovs-vsctl --no-wait add-port $bridge gre$id -- \
-                set Interface gre$id type=gre options:remote_ip=$ip
-            id=`expr $id + 1`
-        done
-    fi
-}
-
+# FIXME(amotoki): This function should not be here, but unfortunately
+# devstack calls it before the external plugins are fetched
 function has_neutron_plugin_security_group {
     # 0 means True here
     return 0
 }
-
-function neutron_plugin_check_adv_test_requirements {
-    is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
-}
-
-# Restore xtrace
-$NEC_XTRACE
diff --git a/lib/neutron_thirdparty/trema b/lib/neutron_thirdparty/trema
deleted file mode 100644
index 075f013..0000000
--- a/lib/neutron_thirdparty/trema
+++ /dev/null
@@ -1,119 +0,0 @@
-#!/bin/bash
-#
-# Trema Sliceable Switch
-# ----------------------
-
-# Trema is a Full-Stack OpenFlow Framework in Ruby and C
-# https://github.com/trema/trema
-#
-# Trema Sliceable Switch is an OpenFlow controller which provides
-# virtual layer-2 network slices.
-# https://github.com/trema/apps/wiki
-
-# Trema Sliceable Switch (OpenFlow Controller)
-TREMA_APPS_REPO=${TREMA_APPS_REPO:-https://github.com/trema/apps.git}
-TREMA_APPS_BRANCH=${TREMA_APPS_BRANCH:-master}
-
-# Save trace setting
-TREMA3_XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-TREMA_DIR=${TREMA_DIR:-$DEST/trema}
-TREMA_SS_DIR="$TREMA_DIR/apps/sliceable_switch"
-
-TREMA_DATA_DIR=${TREMA_DATA_DIR:-$DATA_DIR/trema}
-TREMA_SS_ETC_DIR=$TREMA_DATA_DIR/sliceable_switch/etc
-TREMA_SS_DB_DIR=$TREMA_DATA_DIR/sliceable_switch/db
-TREMA_SS_SCRIPT_DIR=$TREMA_DATA_DIR/sliceable_switch/script
-TREMA_TMP_DIR=$TREMA_DATA_DIR/trema
-
-TREMA_LOG_LEVEL=${TREMA_LOG_LEVEL:-info}
-
-TREMA_SS_CONFIG=$TREMA_SS_ETC_DIR/sliceable.conf
-TREMA_SS_APACHE_CONFIG=$(apache_site_config_for sliceable_switch)
-
-# configure_trema - Set config files, create data dirs, etc
-function configure_trema {
-    # prepare dir
-    for d in $TREMA_SS_ETC_DIR $TREMA_SS_DB_DIR $TREMA_SS_SCRIPT_DIR; do
-        sudo mkdir -p $d
-        sudo chown -R `whoami` $d
-    done
-    sudo mkdir -p $TREMA_TMP_DIR
-}
-
-# init_trema - Initialize databases, etc.
-function init_trema {
-    local _pwd=$(pwd)
-
-    # Initialize databases for Sliceable Switch
-    cd $TREMA_SS_DIR
-    rm -f filter.db slice.db
-    ./create_tables.sh
-    mv filter.db slice.db $TREMA_SS_DB_DIR
-    # Make sure that apache cgi has write access to the databases
-    sudo chown -R www-data.www-data $TREMA_SS_DB_DIR
-    cd $_pwd
-
-    # Setup HTTP Server for sliceable_switch
-    cp $TREMA_SS_DIR/{Slice.pm,Filter.pm,config.cgi} $TREMA_SS_SCRIPT_DIR
-    sed -i -e "s|/home/sliceable_switch/db|$TREMA_SS_DB_DIR|" \
-        $TREMA_SS_SCRIPT_DIR/config.cgi
-
-    sudo cp $TREMA_SS_DIR/apache/sliceable_switch $TREMA_SS_APACHE_CONFIG
-    sudo sed -i -e "s|/home/sliceable_switch/script|$TREMA_SS_SCRIPT_DIR|" \
-        $TREMA_SS_APACHE_CONFIG
-    # TODO(gabriel-bezerra): use some function from lib/apache to enable these modules
-    sudo a2enmod rewrite actions
-    enable_apache_site sliceable_switch
-
-    cp $TREMA_SS_DIR/sliceable_switch_null.conf $TREMA_SS_CONFIG
-    sed -i -e "s|^\$apps_dir.*$|\$apps_dir = \"$TREMA_DIR/apps\"|" \
-        -e "s|^\$db_dir.*$|\$db_dir = \"$TREMA_SS_DB_DIR\"|" \
-        $TREMA_SS_CONFIG
-}
-
-function gem_install {
-    [[ "$OFFLINE" = "True" ]] && return
-    [ -n "$RUBYGEMS_CMD" ] || get_gem_command
-
-    local pkg=$1
-    $RUBYGEMS_CMD list | grep "^${pkg} " && return
-    sudo $RUBYGEMS_CMD install $pkg
-}
-
-function get_gem_command {
-    # Trema requires ruby 1.8, so gem1.8 is checked first
-    RUBYGEMS_CMD=$(which gem1.8 || which gem)
-    if [ -z "$RUBYGEMS_CMD" ]; then
-        echo "Warning: ruby gems command not found."
-    fi
-}
-
-function install_trema {
-    # Trema
-    gem_install trema
-    # Sliceable Switch
-    git_clone $TREMA_APPS_REPO $TREMA_DIR/apps $TREMA_APPS_BRANCH
-    make -C $TREMA_DIR/apps/topology
-    make -C $TREMA_DIR/apps/flow_manager
-    make -C $TREMA_DIR/apps/sliceable_switch
-}
-
-function start_trema {
-    restart_apache_server
-
-    sudo LOGGING_LEVEL=$TREMA_LOG_LEVEL TREMA_TMP=$TREMA_TMP_DIR \
-        trema run -d -c $TREMA_SS_CONFIG
-}
-
-function stop_trema {
-    sudo TREMA_TMP=$TREMA_TMP_DIR trema killall
-}
-
-function check_trema {
-    :
-}
-
-# Restore xtrace
-$TREMA3_XTRACE
diff --git a/lib/nova b/lib/nova
index e9e78c7..199daee 100644
--- a/lib/nova
+++ b/lib/nova
@@ -544,6 +544,8 @@
     iniset $NOVA_CONF DEFAULT ec2_workers "$API_WORKERS"
     iniset $NOVA_CONF DEFAULT metadata_workers "$API_WORKERS"
 
+    iniset $NOVA_CONF cinder os_region_name "$REGION_NAME"
+
     if [[ "$NOVA_BACKEND" == "LVM" ]]; then
         iniset $NOVA_CONF libvirt images_type "lvm"
         iniset $NOVA_CONF libvirt images_volume_group $DEFAULT_VOLUME_GROUP_NAME
diff --git a/lib/oslo b/lib/oslo
index 18cddc1..86efb60 100644
--- a/lib/oslo
+++ b/lib/oslo
@@ -36,6 +36,7 @@
 GITDIR["oslo.rootwrap"]=$DEST/oslo.rootwrap
 GITDIR["oslo.serialization"]=$DEST/oslo.serialization
 GITDIR["oslo.utils"]=$DEST/oslo.utils
+GITDIR["oslo.versionedobjects"]=$DEST/oslo.versionedobjects
 GITDIR["oslo.vmware"]=$DEST/oslo.vmware
 GITDIR["pycadf"]=$DEST/pycadf
 GITDIR["stevedore"]=$DEST/stevedore
@@ -72,6 +73,7 @@
     _do_install_oslo_lib "oslo.rootwrap"
     _do_install_oslo_lib "oslo.serialization"
     _do_install_oslo_lib "oslo.utils"
+    _do_install_oslo_lib "oslo.versionedobjects"
     _do_install_oslo_lib "oslo.vmware"
     _do_install_oslo_lib "pycadf"
     _do_install_oslo_lib "stevedore"
diff --git a/lib/rpc_backend b/lib/rpc_backend
index ff22bbf..a399d17 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -233,6 +233,15 @@
     fi
 }
 
+# builds transport url string
+function get_transport_url {
+    if is_service_enabled qpid || [ -n "$QPID_HOST" ]; then
+        echo "qpid://$QPID_USERNAME:$QPID_PASSWORD@$QPID_HOST:5672/"
+    elif is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then
+        echo "rabbit://$RABBIT_USERID:$RABBIT_PASSWORD@$RABBIT_HOST:5672/"
+    fi
+}
+
 # iniset cofiguration
 function iniset_rpc_backend {
     local package=$1
diff --git a/lib/sahara b/lib/sahara
index 9b2e9c4..709e90e 100644
--- a/lib/sahara
+++ b/lib/sahara
@@ -111,9 +111,6 @@
         cp -p $SAHARA_DIR/etc/sahara/policy.json $SAHARA_CONF_DIR
     fi
 
-    # Copy over sahara configuration file and configure common parameters.
-    cp $SAHARA_DIR/etc/sahara/sahara.conf.sample $SAHARA_CONF_FILE
-
     # Create auth cache dir
     sudo mkdir -p $SAHARA_AUTH_CACHE_DIR
     sudo chown $STACK_USER $SAHARA_AUTH_CACHE_DIR
@@ -139,14 +136,12 @@
 
     if is_service_enabled neutron; then
         iniset $SAHARA_CONF_FILE DEFAULT use_neutron true
-        iniset $SAHARA_CONF_FILE DEFAULT use_floating_ips true
 
         if is_ssl_enabled_service "neutron" || is_service_enabled tls-proxy; then
             iniset $SAHARA_CONF_FILE neutron ca_file $SSL_BUNDLE_FILE
         fi
     else
         iniset $SAHARA_CONF_FILE DEFAULT use_neutron false
-        iniset $SAHARA_CONF_FILE DEFAULT use_floating_ips false
     fi
 
     if is_service_enabled heat; then
diff --git a/lib/swift b/lib/swift
index 8a96615..3decd2f 100644
--- a/lib/swift
+++ b/lib/swift
@@ -378,7 +378,11 @@
     # Configure Ceilometer
     if is_service_enabled ceilometer; then
         iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer "set log_level" "WARN"
-        iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer use "egg:ceilometer#swift"
+        iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer paste.filter_factory "ceilometermiddleware.swift:filter_factory"
+        iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer control_exchange "swift"
+        iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer url $(get_transport_url)
+        iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer driver "messaging"
+        iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer topic "notifications"
         SWIFT_EXTRAS_MIDDLEWARE_LAST="${SWIFT_EXTRAS_MIDDLEWARE_LAST} ceilometer"
     fi
 
@@ -441,16 +445,15 @@
 
     if is_service_enabled swift3; then
         cat <<EOF >>${SWIFT_CONFIG_PROXY_SERVER}
-# NOTE(chmou): s3token middleware is not updated yet to use only
-# username and password.
 [filter:s3token]
 paste.filter_factory = keystoneclient.middleware.s3_token:filter_factory
 auth_port = ${KEYSTONE_AUTH_PORT}
 auth_host = ${KEYSTONE_AUTH_HOST}
 auth_protocol = ${KEYSTONE_AUTH_PROTOCOL}
 cafile = ${SSL_BUNDLE_FILE}
-auth_token = ${SERVICE_TOKEN}
-admin_token = ${SERVICE_TOKEN}
+admin_user = swift
+admin_tenant_name = ${SERVICE_TENANT_NAME}
+admin_password = ${SERVICE_PASSWORD}
 
 [filter:swift3]
 use = egg:swift3#swift3
diff --git a/lib/tempest b/lib/tempest
index f856ce0..9b44f47 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -66,7 +66,7 @@
 # This must be False on stable branches, as master tempest
 # deps do not match stable branch deps. Set this to True to
 # have tempest installed in devstack by default.
-INSTALL_TEMPEST=${INSTALL_TEMPEST:-"False"}
+INSTALL_TEMPEST=${INSTALL_TEMPEST:-"True"}
 
 
 BOTO_MATERIALS_PATH="$FILES/images/s3-materials/cirros-${CIRROS_VERSION}"
@@ -315,6 +315,9 @@
 
     # Auth
     iniset $TEMPEST_CONFIG auth allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True}
+    if [[ "$TEMPEST_AUTH_VERSION" == "v3" ]]; then
+        iniset $TEMPEST_CONFIG auth tempest_roles "Member"
+    fi
 
     # Compute
     iniset $TEMPEST_CONFIG compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED
diff --git a/lib/zaqar b/lib/zaqar
index c9321b9..79b4c5a 100644
--- a/lib/zaqar
+++ b/lib/zaqar
@@ -37,8 +37,6 @@
 ZAQARCLIENT_DIR=$DEST/python-zaqarclient
 ZAQAR_CONF_DIR=/etc/zaqar
 ZAQAR_CONF=$ZAQAR_CONF_DIR/zaqar.conf
-ZAQAR_API_LOG_DIR=/var/log/zaqar
-ZAQAR_API_LOG_FILE=$ZAQAR_API_LOG_DIR/queues.log
 ZAQAR_AUTH_CACHE_DIR=${ZAQAR_AUTH_CACHE_DIR:-/var/cache/zaqar}
 
 # Support potential entry-points console scripts
@@ -110,14 +108,10 @@
     [ ! -d $ZAQAR_CONF_DIR ] && sudo mkdir -m 755 -p $ZAQAR_CONF_DIR
     sudo chown $USER $ZAQAR_CONF_DIR
 
-    [ ! -d $ZAQAR_API_LOG_DIR ] &&  sudo mkdir -m 755 -p $ZAQAR_API_LOG_DIR
-    sudo chown $USER $ZAQAR_API_LOG_DIR
-
     iniset $ZAQAR_CONF DEFAULT debug True
     iniset $ZAQAR_CONF DEFAULT verbose True
     iniset $ZAQAR_CONF DEFAULT admin_mode True
     iniset $ZAQAR_CONF DEFAULT use_syslog $SYSLOG
-    iniset $ZAQAR_CONF DEFAULT log_file $ZAQAR_API_LOG_FILE
     iniset $ZAQAR_CONF 'drivers:transport:wsgi' bind $ZAQAR_SERVICE_HOST
 
     configure_auth_token_middleware $ZAQAR_CONF zaqar $ZAQAR_AUTH_CACHE_DIR
diff --git a/pkg/elasticsearch.sh b/pkg/elasticsearch.sh
index 239d6b9..f53c7f2 100755
--- a/pkg/elasticsearch.sh
+++ b/pkg/elasticsearch.sh
@@ -77,6 +77,7 @@
 }
 
 function install_elasticsearch {
+    pip_install elasticsearch
     if is_package_installed elasticsearch; then
         echo "Note: elasticsearch was already installed."
         return
diff --git a/samples/local.conf b/samples/local.conf
index 9e0b540..63000b6 100644
--- a/samples/local.conf
+++ b/samples/local.conf
@@ -3,7 +3,7 @@
 # NOTE: Copy this file to the root ``devstack`` directory for it to
 # work properly.
 
-# ``local.conf`` is a user-maintained setings file that is sourced from ``stackrc``.
+# ``local.conf`` is a user-maintained settings file that is sourced from ``stackrc``.
 # This gives it the ability to override any variables set in ``stackrc``.
 # Also, most of the settings in ``stack.sh`` are written to only be set if no
 # value has already been set; this lets ``local.conf`` effectively override the
@@ -98,4 +98,4 @@
 # -------
 
 # Install the tempest test suite
-enable_service tempest
\ No newline at end of file
+enable_service tempest
diff --git a/stack.sh b/stack.sh
index 2ac7dfa..d83952a 100755
--- a/stack.sh
+++ b/stack.sh
@@ -21,6 +21,13 @@
 
 # Learn more and get the most recent version at http://devstack.org
 
+# check if someone has invoked with "sh"
+if [[ "${POSIXLY_CORRECT}" == "y" ]]; then
+    echo "You appear to be running bash in POSIX compatability mode."
+    echo "devstack uses bash features. \"./stack.sh\" should do the right thing"
+    exit 1
+fi
+
 # Make sure custom grep options don't get in the way
 unset GREP_OPTIONS
 
@@ -92,7 +99,7 @@
 source $TOP_DIR/functions
 
 # Import config functions
-source $TOP_DIR/lib/config
+source $TOP_DIR/inc/meta-config
 
 # Import 'public' stack.sh functions
 source $TOP_DIR/lib/stack
@@ -739,6 +746,9 @@
 fi
 
 if is_service_enabled s-proxy; then
+    if is_service_enabled ceilometer; then
+        install_ceilometermiddleware
+    fi
     stack_install_service swift
     configure_swift
 
@@ -968,7 +978,7 @@
         create_swift_accounts
     fi
 
-    if is_service_enabled heat && [[ "$HEAT_STANDALONE" != "True" ]]; then
+    if is_service_enabled heat; then
         create_heat_accounts
     fi
 
@@ -1227,9 +1237,9 @@
     init_heat
     echo_summary "Starting Heat"
     start_heat
-    if [ "$HEAT_CREATE_TEST_IMAGE" = "True" ]; then
-        echo_summary "Building Heat functional test image"
-        build_heat_functional_test_image
+    if [ "$HEAT_BUILD_PIP_MIRROR" = "True" ]; then
+        echo_summary "Building Heat pip mirror"
+        build_heat_pip_mirror
     fi
 fi
 
@@ -1306,6 +1316,15 @@
 # Prepare bash completion for OSC
 openstack complete | sudo tee /etc/bash_completion.d/osc.bash_completion > /dev/null
 
+# If cinder is configured, set global_filter for PV devices
+if is_service_enabled cinder; then
+    if is_ubuntu; then
+        echo_summary "Configuring lvm.conf global device filter"
+        set_lvm_filter
+    else
+        echo_summary "Skip setting lvm filters for non Ubuntu systems"
+    fi
+fi
 
 # Fin
 # ===
diff --git a/stackrc b/stackrc
index cb044b8..02b12a3 100644
--- a/stackrc
+++ b/stackrc
@@ -358,6 +358,10 @@
 GITREPO["oslo.utils"]=${OSLOUTILS_REPO:-${GIT_BASE}/openstack/oslo.utils.git}
 GITBRANCH["oslo.utils"]=${OSLOUTILS_BRANCH:-master}
 
+# oslo.versionedobjects
+GITREPO["oslo.versionedobjects"]=${OSLOVERSIONEDOBJECTS_REPO:-${GIT_BASE}/openstack/oslo.versionedobjects.git}
+GITBRANCH["oslo.versionedobjects"]=${OSLOVERSIONEDOBJECTS_BRANCH:-master}
+
 # oslo.vmware
 GITREPO["oslo.vmware"]=${OSLOVMWARE_REPO:-${GIT_BASE}/openstack/oslo.vmware.git}
 GITBRANCH["oslo.vmware"]=${OSLOVMWARE_BRANCH:-master}
@@ -419,14 +423,10 @@
 
 ##################
 #
-#  TripleO Components
+#  TripleO / Heat Agent Components
 #
 ##################
 
-# diskimage-builder
-DIB_REPO=${DIB_REPO:-${GIT_BASE}/openstack/diskimage-builder.git}
-DIB_BRANCH=${DIB_BRANCH:-master}
-
 # os-apply-config configuration template tool
 OAC_REPO=${OAC_REPO:-${GIT_BASE}/openstack/os-apply-config.git}
 OAC_BRANCH=${OAC_BRANCH:-master}
@@ -439,10 +439,6 @@
 ORC_REPO=${ORC_REPO:-${GIT_BASE}/openstack/os-refresh-config.git}
 ORC_BRANCH=${ORC_BRANCH:-master}
 
-# Tripleo elements for diskimage-builder images
-TIE_REPO=${TIE_REPO:-${GIT_BASE}/openstack/tripleo-image-elements.git}
-TIE_BRANCH=${TIE_BRANCH:-master}
-
 #################
 #
 #  3rd Party Components (non pip installable)
diff --git a/tests/test_ini.sh b/tests/test_ini_config.sh
similarity index 98%
rename from tests/test_ini.sh
rename to tests/test_ini_config.sh
index 106cc95..4a0ae33 100755
--- a/tests/test_ini.sh
+++ b/tests/test_ini_config.sh
@@ -4,8 +4,8 @@
 
 TOP=$(cd $(dirname "$0")/.. && pwd)
 
-# Import common functions
-source $TOP/functions
+# Import config functions
+source $TOP/inc/ini-config
 
 
 echo "Testing INI functions"
diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh
index 472b0ea..0bec584 100755
--- a/tests/test_libs_from_pypi.sh
+++ b/tests/test_libs_from_pypi.sh
@@ -34,8 +34,8 @@
 ALL_LIBS+=" python-glanceclient python-ironicclient tempest-lib"
 ALL_LIBS+=" oslo.messaging oslo.log cliff python-heatclient stevedore"
 ALL_LIBS+=" python-cinderclient glance_store oslo.concurrency oslo.db"
-ALL_LIBS+=" oslo.vmware keystonemiddleware oslo.serialization"
-ALL_LIBS+=" python-saharaclient django_openstack_auth"
+ALL_LIBS+=" oslo.versionedobjects oslo.vmware keystonemiddleware"
+ALL_LIBS+=" oslo.serialization python-saharaclient django_openstack_auth"
 ALL_LIBS+=" python-openstackclient oslo.rootwrap oslo.i18n"
 ALL_LIBS+=" python-ceilometerclient oslo.utils python-swiftclient"
 ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy"
diff --git a/tests/test_config.sh b/tests/test_meta_config.sh
similarity index 98%
rename from tests/test_config.sh
rename to tests/test_meta_config.sh
index 3252104..9d65280 100755
--- a/tests/test_config.sh
+++ b/tests/test_meta_config.sh
@@ -4,11 +4,9 @@
 
 TOP=$(cd $(dirname "$0")/.. && pwd)
 
-# Import common functions
-source $TOP/functions
-
 # Import config functions
-source $TOP/lib/config
+source $TOP/inc/ini-config
+source $TOP/inc/meta-config
 
 # check_result() tests and reports the result values
 # check_result "actual" "expected"
diff --git a/tools/build_docs.sh b/tools/build_docs.sh
index 929d1e0..2aa0a0a 100755
--- a/tools/build_docs.sh
+++ b/tools/build_docs.sh
@@ -81,7 +81,7 @@
     mkdir -p $FQ_HTML_BUILD/`dirname $f`;
     $SHOCCO $f > $FQ_HTML_BUILD/$f.html
 done
-for f in $(find functions functions-common lib samples -type f -name \*); do
+for f in $(find functions functions-common inc lib pkg samples -type f -name \*); do
     echo $f
     FILES+="$f "
     mkdir -p $FQ_HTML_BUILD/`dirname $f`;
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index 73d0947..b7b40c7 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -42,9 +42,21 @@
 
 
 function install_get_pip {
-    if [[ ! -r $LOCAL_PIP ]]; then
-        curl --retry 6 --retry-delay 5 -o $LOCAL_PIP $PIP_GET_PIP_URL || \
+    # the openstack gate and others put a cached version of get-pip.py
+    # for this to find, explicitly to avoid download issues.
+    #
+    # However, if devstack *did* download the file, we want to check
+    # for updates; people can leave thier stacks around for a long
+    # time and in the mean-time pip might get upgraded.
+    #
+    # Thus we use curl's "-z" feature to always check the modified
+    # since and only download if a new version is out -- but only if
+    # it seems we downloaded the file originally.
+    if [[ ! -r $LOCAL_PIP || -r $LOCAL_PIP.downloaded ]]; then
+        curl --retry 6 --retry-delay 5 \
+            -z $LOCAL_PIP -o $LOCAL_PIP $PIP_GET_PIP_URL || \
             die $LINENO "Download of get-pip.py failed"
+        touch $LOCAL_PIP.downloaded
     fi
     sudo -H -E python $LOCAL_PIP
 }
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
index 082c27e..b49347e 100755
--- a/tools/xen/install_os_domU.sh
+++ b/tools/xen/install_os_domU.sh
@@ -227,7 +227,7 @@
         -n "$UBUNTU_INST_BRIDGE_OR_NET_NAME" \
         -l "$GUEST_NAME"
 
-    set_vm_memory "$GUEST_NAME" "$OSDOMU_MEM_MB"
+    set_vm_memory "$GUEST_NAME" "1024"
 
     xe vm-start vm="$GUEST_NAME"
 
diff --git a/tox.ini b/tox.ini
index a958ae7..bc84928 100644
--- a/tox.ini
+++ b/tox.ini
@@ -20,6 +20,7 @@
           -name \*.sh -or                     \
           -name \*rc -or                      \
           -name functions\* -or               \
+          -wholename \*/inc/\*                \ # /inc files and
           -wholename \*/lib/\*                \ # /lib files are shell, but
          \)                                   \ #   have no extension
          -print0 | xargs -0 bashate -v"