Merge "Update dependencies on openSUSE"
diff --git a/.mailmap b/.mailmap
new file mode 100644
index 0000000..29be995
--- /dev/null
+++ b/.mailmap
@@ -0,0 +1,6 @@
+# Format is:
+# <preferred e-mail> <other e-mail 1>
+# <preferred e-mail> <other e-mail 2>
+Jiajun Liu <jiajun@unitedstack.com> <iamljj@gmail.com>
+Jian Wen <jian.wen@canonical.com> <wenjianhn@gmail.com>
+Joe Gordon <joe.gordon0@gmail.com> <jogo@cloudscaling.com>
diff --git a/AUTHORS b/AUTHORS
index 718a760..c6b40d8 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -26,6 +26,7 @@
 Jason Cannavale <jason.cannavale@rackspace.com>
 Jay Pipes <jaypipes@gmail.com>
 Jesse Andrews <anotherjesse@gmail.com>
+Jian Wen <jian.wen@canonical.com>
 Joe Gordon <jogo@cloudscaling.com>
 Johannes Erdfelt <johannes.erdfelt@rackspace.com>
 John Postlethwait <john.postlethwait@nebula.com>
@@ -37,6 +38,7 @@
 Osamu Habuka <xiu.yushen@gmail.com>
 Russell Bryant <rbryant@redhat.com>
 Scott Moser <smoser@ubuntu.com>
+Sean Dague <sdague@linux.vnet.ibm.com>
 Sumit Naiksatam <sumitnaiksatam@gmail.com>
 Thierry Carrez <thierry@openstack.org>
 Todd Willey <xtoddx@gmail.com>
diff --git a/HACKING.rst b/HACKING.rst
index 6ad8c7e..3fef950 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -7,8 +7,8 @@
 
 DevStack is written in POSIX shell script.  This choice was made because
 it best illustrates the configuration steps that this implementation takes
-on setting up and interacting with OpenStack components.  DevStack specifies
-BASH and is compatible with Bash 3.
+on setting up and interacting with OpenStack components.  DevStack specifically
+uses Bash and is compatible with Bash 3.
 
 DevStack's official repository is located on GitHub at
 https://github.com/openstack-dev/devstack.git.  Besides the master branch that
@@ -30,9 +30,17 @@
 generally useful shell functions and is used by a number of the scripts in
 DevStack.
 
+The ``lib`` directory contains sub-scripts for projects or packages that ``stack.sh``
+sources to perform much of the work related to those projects.  These sub-scripts
+contain configuration defaults and functions to configure, start and stop the project
+or package.  These variables and functions are also used by related projects,
+such as Grenade, to manage a DevStack installation.
+
 A number of additional scripts can be found in the ``tools`` directory that may
-be useful in setting up special-case uses of DevStack. These include: bare metal
-deployment, ramdisk deployment and Jenkins integration.
+be useful in supporting DevStack installations.  Of particular note are ``info.sh``
+to collect and report information about the installed system, and ``instal_prereqs.sh``
+that handles installation of the prerequisite packages for DevStack.  It is
+suitable, for example, to pre-load a system for making a snapshot.
 
 
 Scripts
@@ -63,8 +71,8 @@
     source $TOP_DIR/openrc
 
 ``stack.sh`` is a rather large monolithic script that flows through from beginning
-to end.  The process of breaking it down into project-level sub-scripts is nearly
-complete and should make ``stack.sh`` easier to read and manage.
+to end.  It has been broken down into project-specific subscripts (as noted above)
+located in ``lib`` to make ``stack.sh`` more manageable and to promote code reuse.
 
 These library sub-scripts have a number of fixed entry points, some of which may
 just be stubs.  These entry points will be called by ``stack.sh`` in the
@@ -112,6 +120,7 @@
 ``FOO=${FOO:-baz}``); if they did then they can already be changed in ``localrc``
 and can stay in the project file.
 
+
 Documentation
 -------------
 
@@ -165,7 +174,7 @@
   the script on non-zero exit codes::
 
     # This script exits on an error so that errors don't compound and you see
-    # only the first error that occured.
+    # only the first error that occurred.
     set -o errexit
 
     # Print the commands being run so that we can see the command that triggers
diff --git a/README.md b/README.md
index d8538c2..905a54d 100644
--- a/README.md
+++ b/README.md
@@ -120,14 +120,29 @@
     # Optional, to enable tempest configuration as part of devstack
     enable_service tempest
 
-Then run stack.sh as normal.
+Then run `stack.sh` as normal.
+
+devstack supports adding specific Quantum configuration flags to both the Open vSwitch and LinuxBridge plugin configuration files. To make use of this feature, the following variables are defined and can be configured in your `localrc` file:
+
+    Variable Name             Plugin Config File Section Modified
+    -------------------------------------------------------------------------------------
+    Q_SRV_EXTRA_OPTS          `OVS` (for Open Vswitch) or `LINUX_BRIDGE` (for LinuxBridge)
+    Q_AGENT_EXTRA_AGENT_OPTS  AGENT
+    Q_AGENT_EXTRA_SRV_OPTS    `OVS` (for Open Vswitch) or `LINUX_BRIDGE` (for LinuxBridge)
+
+An example of using the variables in your `localrc` is below:
+
+    Q_AGENT_EXTRA_AGENT_OPTS=(tunnel_type=vxlan vxlan_udp_port=8472)
+    Q_SRV_EXTRA_OPTS=(tenant_network_type=vxlan)
+
+# Tempest
 
 If tempest has been successfully configured, a basic set of smoke tests can be run as follows:
 
     $ cd /opt/stack/tempest
     $ nosetests tempest/tests/network/test_network_basic_ops.py
 
-Multi-Node Setup
+# Multi-Node Setup
 
 A more interesting setup involves running multiple compute nodes, with Quantum networks connecting VMs on different compute nodes.
 You should run at least one "controller node", which should have a `stackrc` that includes at least:
@@ -151,3 +166,24 @@
     MYSQL_HOST=$SERVICE_HOST
     RABBIT_HOST=$SERVICE_HOST
     Q_HOST=$SERVICE_HOST
+    MATCHMAKER_REDIS_HOST=$SERVICE_HOST
+
+# Cells
+
+Cells is a new scaling option with a full spec at http://wiki.openstack.org/blueprint-nova-compute-cells.
+
+To setup a cells environment add the following to your `localrc`:
+
+    enable_service n-cell
+    enable_service n-api-meta
+    MULTI_HOST=True
+
+    # The following have not been tested with cells, they may or may not work.
+    disable_service n-obj
+    disable_service cinder
+    disable_service c-sch
+    disable_service c-api
+    disable_service c-vol
+    disable_service n-xvnc
+
+Be aware that there are some features currently missing in cells, one notable one being security groups.
diff --git a/clean.sh b/clean.sh
index cf24f27..758947a 100755
--- a/clean.sh
+++ b/clean.sh
@@ -19,7 +19,9 @@
 source $TOP_DIR/stackrc
 
 # Get the variables that are set in stack.sh
-source $TOP_DIR/.stackenv
+if [[ -r $TOP_DIR/.stackenv ]]; then
+    source $TOP_DIR/.stackenv
+fi
 
 # Determine what system we are running on.  This provides ``os_VENDOR``,
 # ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME``
@@ -61,7 +63,7 @@
 cleanup_quantum
 cleanup_swift
 
-# cinder doesn't clean up the volume group as it might be used elsewhere...
+# cinder doesn't always clean up the volume group as it might be used elsewhere...
 # clean it up if it is a loop device
 VG_DEV=$(sudo losetup -j $DATA_DIR/${VOLUME_GROUP}-backing-file | awk -F':' '/backing-file/ { print $1}')
 if [[ -n "$VG_DEV" ]]; then
@@ -88,4 +90,4 @@
 # FIXED_IP_ADDR in br100
 
 # Clean up files
-#rm -f .stackenv
+rm -f $TOP_DIR/.stackenv
diff --git a/exercise.sh b/exercise.sh
index 3516738..ce694fb 100755
--- a/exercise.sh
+++ b/exercise.sh
@@ -23,7 +23,7 @@
 
 EXERCISE_DIR=$TOP_DIR/exercises
 
-if [ -z "${basenames}" ] ; then
+if [[ -z "${basenames}" ]]; then
     # Locate the scripts we should run
     basenames=$(for b in `ls $EXERCISE_DIR/*.sh`; do basename $b .sh; done)
 else
@@ -38,7 +38,7 @@
 
 # Loop over each possible script (by basename)
 for script in $basenames; do
-    if [[ ,$SKIP_EXERCISES, =~ ,$script, ]] ; then
+    if [[ ,$SKIP_EXERCISES, =~ ,$script, ]]; then
         skips="$skips $script"
     else
         echo "====================================================================="
@@ -48,7 +48,7 @@
         exitcode=$?
         if [[ $exitcode == 55 ]]; then
             skips="$skips $script"
-        elif [[ $exitcode -ne 0 ]] ; then
+        elif [[ $exitcode -ne 0 ]]; then
             failures="$failures $script"
         else
             passes="$passes $script"
@@ -69,6 +69,6 @@
 done
 echo "====================================================================="
 
-if [ -n "$failures" ] ; then
+if [[ -n "$failures" ]]; then
     exit 1
 fi
diff --git a/exerciserc b/exerciserc
index c26ec2c..9105fe3 100644
--- a/exerciserc
+++ b/exerciserc
@@ -21,10 +21,10 @@
 # Max time to wait for a vm to terminate
 export TERMINATE_TIMEOUT=${TERMINATE_TIMEOUT:-30}
 
-# Max time to wait for a euca-volume command to propogate
+# Max time to wait for a euca-volume command to propagate
 export VOLUME_TIMEOUT=${VOLUME_TIMEOUT:-30}
 
-# Max time to wait for a euca-delete command to propogate
+# Max time to wait for a euca-delete command to propagate
 export VOLUME_DELETE_TIMEOUT=${SNAPSHOT_DELETE_TIMEOUT:-60}
 
 # The size of the volume we want to boot from; some storage back-ends
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
index 14d0049..358b3d2 100755
--- a/exercises/boot_from_volume.sh
+++ b/exercises/boot_from_volume.sh
@@ -11,7 +11,7 @@
 echo "*********************************************************************"
 
 # This script exits on an error so that errors don't compound and you see
-# only the first error that occured.
+# only the first error that occurred.
 set -o errexit
 
 # Print the commands being run so that we can see the command that triggers
diff --git a/exercises/bundle.sh b/exercises/bundle.sh
index dce36aa..b83678a 100755
--- a/exercises/bundle.sh
+++ b/exercises/bundle.sh
@@ -10,7 +10,7 @@
 echo "*********************************************************************"
 
 # This script exits on an error so that errors don't compound and you see
-# only the first error that occured.
+# only the first error that occurred.
 set -o errexit
 
 # Print the commands being run so that we can see the command that triggers
diff --git a/exercises/client-args.sh b/exercises/client-args.sh
index 1e92500..1e68042 100755
--- a/exercises/client-args.sh
+++ b/exercises/client-args.sh
@@ -2,14 +2,14 @@
 
 # **client-args.sh**
 
-# Test OpenStack client authentication aguemnts handling
+# Test OpenStack client authentication arguments handling
 
 echo "*********************************************************************"
 echo "Begin DevStack Exercise: $0"
 echo "*********************************************************************"
 
 # This script exits on an error so that errors don't compound and you see
-# only the first error that occured.
+# only the first error that occurred.
 set -o errexit
 
 # Print the commands being run so that we can see the command that triggers
@@ -67,7 +67,7 @@
 # Keystone client
 # ---------------
 if [[ "$ENABLED_SERVICES" =~ "key" ]]; then
-    if [[ "$SKIP_EXERCISES" =~ "key" ]] ; then
+    if [[ "$SKIP_EXERCISES" =~ "key" ]]; then
         STATUS_KEYSTONE="Skipped"
     else
         echo -e "\nTest Keystone"
@@ -84,7 +84,7 @@
 # -----------
 
 if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
-    if [[ "$SKIP_EXERCISES" =~ "n-api" ]] ; then
+    if [[ "$SKIP_EXERCISES" =~ "n-api" ]]; then
         STATUS_NOVA="Skipped"
         STATUS_EC2="Skipped"
     else
@@ -103,7 +103,7 @@
 # -------------
 
 if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then
-    if [[ "$SKIP_EXERCISES" =~ "c-api" ]] ; then
+    if [[ "$SKIP_EXERCISES" =~ "c-api" ]]; then
         STATUS_CINDER="Skipped"
     else
         echo -e "\nTest Cinder"
@@ -120,7 +120,7 @@
 # -------------
 
 if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then
-    if [[ "$SKIP_EXERCISES" =~ "g-api" ]] ; then
+    if [[ "$SKIP_EXERCISES" =~ "g-api" ]]; then
         STATUS_GLANCE="Skipped"
     else
         echo -e "\nTest Glance"
@@ -137,7 +137,7 @@
 # ------------
 
 if [[ "$ENABLED_SERVICES" =~ "swift" || "$ENABLED_SERVICES" =~ "s-proxy" ]]; then
-    if [[ "$SKIP_EXERCISES" =~ "swift" ]] ; then
+    if [[ "$SKIP_EXERCISES" =~ "swift" ]]; then
         STATUS_SWIFT="Skipped"
     else
         echo -e "\nTest Swift"
@@ -152,8 +152,9 @@
 
 set +o xtrace
 
+
 # Results
-# -------
+# =======
 
 function report() {
     if [[ -n "$2" ]]; then
diff --git a/exercises/client-env.sh b/exercises/client-env.sh
index dd8e56e..6c6fe12 100755
--- a/exercises/client-env.sh
+++ b/exercises/client-env.sh
@@ -2,14 +2,14 @@
 
 # **client-env.sh**
 
-# Test OpenStack client enviroment variable handling
+# Test OpenStack client environment variable handling
 
 echo "*********************************************************************"
 echo "Begin DevStack Exercise: $0"
 echo "*********************************************************************"
 
 # This script exits on an error so that errors don't compound and you see
-# only the first error that occured.
+# only the first error that occurred.
 set -o errexit
 
 # Print the commands being run so that we can see the command that triggers
@@ -60,7 +60,7 @@
 # Keystone client
 # ---------------
 if [[ "$ENABLED_SERVICES" =~ "key" ]]; then
-    if [[ "$SKIP_EXERCISES" =~ "key" ]] ; then
+    if [[ "$SKIP_EXERCISES" =~ "key" ]]; then
         STATUS_KEYSTONE="Skipped"
     else
         echo -e "\nTest Keystone"
@@ -77,7 +77,7 @@
 # -----------
 
 if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
-    if [[ "$SKIP_EXERCISES" =~ "n-api" ]] ; then
+    if [[ "$SKIP_EXERCISES" =~ "n-api" ]]; then
         STATUS_NOVA="Skipped"
         STATUS_EC2="Skipped"
     else
@@ -111,7 +111,7 @@
 # -------------
 
 if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then
-    if [[ "$SKIP_EXERCISES" =~ "c-api" ]] ; then
+    if [[ "$SKIP_EXERCISES" =~ "c-api" ]]; then
         STATUS_CINDER="Skipped"
     else
         echo -e "\nTest Cinder"
@@ -128,7 +128,7 @@
 # -------------
 
 if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then
-    if [[ "$SKIP_EXERCISES" =~ "g-api" ]] ; then
+    if [[ "$SKIP_EXERCISES" =~ "g-api" ]]; then
         STATUS_GLANCE="Skipped"
     else
         echo -e "\nTest Glance"
@@ -146,7 +146,7 @@
 
 
 if [[ "$ENABLED_SERVICES" =~ "swift" || "$ENABLED_SERVICES" =~ "s-proxy" ]]; then
-    if [[ "$SKIP_EXERCISES" =~ "swift" ]] ; then
+    if [[ "$SKIP_EXERCISES" =~ "swift" ]]; then
         STATUS_SWIFT="Skipped"
     else
         echo -e "\nTest Swift"
@@ -161,8 +161,9 @@
 
 set +o xtrace
 
+
 # Results
-# -------
+# =======
 
 function report() {
     if [[ -n "$2" ]]; then
diff --git a/exercises/euca.sh b/exercises/euca.sh
index 50d4744..ac21b6b 100755
--- a/exercises/euca.sh
+++ b/exercises/euca.sh
@@ -10,7 +10,7 @@
 echo "*********************************************************************"
 
 # This script exits on an error so that errors don't compound and you see
-# only the first error that occured.
+# only the first error that occurred.
 set -o errexit
 
 # Print the commands being run so that we can see the command that triggers
@@ -85,18 +85,18 @@
    die_if_not_set $LINENO VOLUME "Failure to create volume"
 
    # Test that volume has been created
-   VOLUME=`euca-describe-volumes | cut -f2`
+   VOLUME=`euca-describe-volumes $VOLUME | cut -f2`
    die_if_not_set $LINENO VOLUME "Failure to get volume"
 
    # Test volume has become available
    if ! timeout $RUNNING_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then
-       die $LINENO "volume didnt become available within $RUNNING_TIMEOUT seconds"
+       die $LINENO "volume didn't become available within $RUNNING_TIMEOUT seconds"
    fi
 
    # Attach volume to an instance
    euca-attach-volume -i $INSTANCE -d $ATTACH_DEVICE $VOLUME || \
        die $LINENO "Failure attaching volume $VOLUME to $INSTANCE"
-   if ! timeout $ACTIVE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q in-use; do sleep 1; done"; then
+   if ! timeout $ACTIVE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -A 1 in-use | grep -q attach; do sleep 1; done"; then
        die $LINENO "Could not attach $VOLUME to $INSTANCE"
    fi
 
@@ -162,7 +162,7 @@
 # case changed with bug/836978. Requesting the status of an invalid instance
 # will now return an error message including the instance id, so we need to
 # filter that out.
-if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -ve \"\\\(InstanceNotFound\\\|InvalidInstanceID\[.\]NotFound\\\)\" | grep -q $INSTANCE; do sleep 1; done"; then
+if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -ve '\(InstanceNotFound\|InvalidInstanceID\.NotFound\)' | grep -q $INSTANCE; do sleep 1; done"; then
     die $LINENO "server didn't terminate within $TERMINATE_TIMEOUT seconds"
 fi
 
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index ad11a6b..b741efb 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -9,7 +9,7 @@
 echo "*********************************************************************"
 
 # This script exits on an error so that errors don't compound and you see
-# only the first error that occured.
+# only the first error that occurred.
 set -o errexit
 
 # Print the commands being run so that we can see the command that triggers
diff --git a/exercises/horizon.sh b/exercises/horizon.sh
index 5d778c9..d62ad52 100755
--- a/exercises/horizon.sh
+++ b/exercises/horizon.sh
@@ -9,7 +9,7 @@
 echo "*********************************************************************"
 
 # This script exits on an error so that errors don't compound and you see
-# only the first error that occured.
+# only the first error that occurred.
 set -o errexit
 
 # Print the commands being run so that we can see the command that triggers
diff --git a/exercises/quantum-adv-test.sh b/exercises/quantum-adv-test.sh
index a1fb2ad..abec5e4 100755
--- a/exercises/quantum-adv-test.sh
+++ b/exercises/quantum-adv-test.sh
@@ -10,7 +10,7 @@
 echo "*********************************************************************"
 
 # This script exits on an error so that errors don't compound and you see
-# only the first error that occured.
+# only the first error that occurred.
 
 set -o errtrace
 
@@ -43,13 +43,12 @@
 # Import configuration
 source $TOP_DIR/openrc
 
-# If quantum is not enabled we exit with exitcode 55 which mean
-# exercise is skipped.
-is_service_enabled quantum && is_service_enabled q-agt && is_service_enabled q-dhcp || exit 55
-
-# Import quantum fucntions
+# Import quantum functions
 source $TOP_DIR/lib/quantum
 
+# If quantum is not enabled we exit with exitcode 55, which means exercise is skipped.
+quantum_plugin_check_adv_test_requirements || exit 55
+
 # Import exercise configuration
 source $TOP_DIR/exerciserc
 
@@ -330,7 +329,7 @@
 }
 
 function delete_networks {
-   foreach_tenant_net 'delete_network ${%TENANT%_NAME} ${%NUM%}'
+   foreach_tenant_net 'delete_network ${%TENANT%_NAME} %NUM%'
    #TODO(nati) add secuirty group check after it is implemented
    # source $TOP_DIR/openrc demo1 demo1
    # nova secgroup-delete-rule default icmp -1 -1 0.0.0.0/0
diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh
index b73afdf..6b67291 100755
--- a/exercises/sec_groups.sh
+++ b/exercises/sec_groups.sh
@@ -9,7 +9,7 @@
 echo "*********************************************************************"
 
 # This script exits on an error so that errors don't compound and you see
-# only the first error that occured.
+# only the first error that occurred.
 set -o errexit
 
 # Print the commands being run so that we can see the command that triggers
diff --git a/exercises/swift.sh b/exercises/swift.sh
index c4ec3e9..b9f1b56 100755
--- a/exercises/swift.sh
+++ b/exercises/swift.sh
@@ -9,7 +9,7 @@
 echo "*********************************************************************"
 
 # This script exits on an error so that errors don't compound and you see
-# only the first error that occured.
+# only the first error that occurred.
 set -o errexit
 
 # Print the commands being run so that we can see the command that triggers
diff --git a/files/apache-horizon.template b/files/apache-horizon.template
index fb98471..af880c4 100644
--- a/files/apache-horizon.template
+++ b/files/apache-horizon.template
@@ -17,6 +17,7 @@
 
     <Directory %HORIZON_DIR%/>
         Options Indexes FollowSymLinks MultiViews
+        %HORIZON_REQUIRE%
         AllowOverride None
         Order allow,deny
         allow from all
diff --git a/files/apts/cinder b/files/apts/cinder
index 5db06ea..c45b97f 100644
--- a/files/apts/cinder
+++ b/files/apts/cinder
@@ -1,2 +1,3 @@
 tgt
 lvm2
+qemu-utils
diff --git a/files/apts/general b/files/apts/general
index a1fcf3c..ec6dd0d 100644
--- a/files/apts/general
+++ b/files/apts/general
@@ -1,5 +1,4 @@
 bridge-utils
-pep8
 pylint
 python-pip
 screen
diff --git a/files/apts/horizon b/files/apts/horizon
index 2c2faf1..e1ce85f 100644
--- a/files/apts/horizon
+++ b/files/apts/horizon
@@ -11,7 +11,6 @@
 python-webob
 python-kombu
 pylint
-pep8
 python-eventlet
 python-nose
 python-sphinx
diff --git a/files/apts/n-api b/files/apts/n-api
index 0f08daa..e0e5e7f 100644
--- a/files/apts/n-api
+++ b/files/apts/n-api
@@ -1 +1,2 @@
 python-dateutil
+msgpack-python
diff --git a/files/apts/n-cpu b/files/apts/n-cpu
index ad2d6d7..88e0144 100644
--- a/files/apts/n-cpu
+++ b/files/apts/n-cpu
@@ -1,7 +1,8 @@
 # Stuff for diablo volumes
+nbd-client
 lvm2
 open-iscsi
-open-iscsi-utils
+open-iscsi-utils # Deprecated since quantal dist:lucid,oneiric,precise
 genisoimage
 sysfsutils
 sg3-utils
diff --git a/files/apts/nova b/files/apts/nova
index f4615c4..6a7ef74 100644
--- a/files/apts/nova
+++ b/files/apts/nova
@@ -1,9 +1,8 @@
 dnsmasq-base
-dnsmasq-utils # for dhcp_release only available in dist:oneiric,precise,quantal
+dnsmasq-utils # for dhcp_release
 kpartx
 parted
-arping # only available in dist:natty
-iputils-arping # only available in dist:oneiric
+iputils-arping
 mysql-server # NOPRIME
 python-mysqldb
 python-xattr # needed for glance which is needed for nova --- this shouldn't be here
@@ -13,7 +12,8 @@
 ebtables
 sqlite3
 sudo
-kvm
+kvm # NOPRIME
+qemu # dist:wheezy,jessie NOPRIME
 libvirt-bin # NOPRIME
 libjs-jquery-tablesorter # Needed for coverage html reports
 vlan
@@ -27,7 +27,7 @@
 python-migrate
 python-gflags
 python-greenlet
-python-libvirt
+python-libvirt # NOPRIME
 python-libxml2
 python-routes
 python-netaddr
diff --git a/files/keystone_data.sh b/files/keystone_data.sh
index 72b5b1e..ccac880 100755
--- a/files/keystone_data.sh
+++ b/files/keystone_data.sh
@@ -5,9 +5,9 @@
 # Tenant               User       Roles
 # ------------------------------------------------------------------
 # service              glance     admin
-# service              swift      admin        # if enabled
-# service              heat       admin        # if enabled
-# service              ceilometer admin        # if enabled
+# service              swift      service        # if enabled
+# service              heat       service        # if enabled
+# service              ceilometer admin          # if enabled
 # Tempest Only:
 # alt_demo             alt_demo  Member
 #
@@ -47,6 +47,8 @@
 # but ResellerAdmin is needed for a user to act as any tenant. The name of this
 # role is also configurable in swift-proxy.conf
 RESELLER_ROLE=$(get_id keystone role-create --name=ResellerAdmin)
+# Service role, so service users do not have to be admins
+SERVICE_ROLE=$(get_id keystone role-create --name=service)
 
 
 # Services
@@ -70,7 +72,7 @@
                                               --email=heat@example.com)
     keystone user-role-add --tenant_id $SERVICE_TENANT \
                            --user_id $HEAT_USER \
-                           --role_id $ADMIN_ROLE
+                           --role_id $SERVICE_ROLE
     # heat_stack_user role is for users created by Heat
     keystone role-create --name heat_stack_user
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
@@ -133,7 +135,7 @@
     keystone user-role-add \
         --tenant_id $SERVICE_TENANT \
         --user_id $SWIFT_USER \
-        --role_id $ADMIN_ROLE
+        --role_id $SERVICE_ROLE
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
         SWIFT_SERVICE=$(get_id keystone service-create \
             --name=swift \
diff --git a/files/ldap/openstack.ldif b/files/ldap/openstack.ldif
index 2b76372..02caf3f 100644
--- a/files/ldap/openstack.ldif
+++ b/files/ldap/openstack.ldif
@@ -4,9 +4,9 @@
 objectClass: organizationalUnit
 ou: openstack
 
-dn: ou=Groups,dc=openstack,dc=org
+dn: ou=UserGroups,dc=openstack,dc=org
 objectClass: organizationalUnit
-ou: Groups
+ou: UserGroups
 
 dn: ou=Users,dc=openstack,dc=org
 objectClass: organizationalUnit
@@ -20,10 +20,6 @@
 objectClass: organizationalUnit
 ou: Projects
 
-dn: ou=Domains,dc=openstack,dc=org
-objectClass: organizationalUnit
-ou: Domains
-
 dn: cn=9fe2ff9ee4384b1894a90878d3e92bab,ou=Roles,dc=openstack,dc=org
 objectClass: organizationalRole
 ou: _member_
diff --git a/files/rpms-suse/cinder b/files/rpms-suse/cinder
index e5b4727..61b9f25 100644
--- a/files/rpms-suse/cinder
+++ b/files/rpms-suse/cinder
@@ -1,2 +1,3 @@
 lvm2
 tgt
+qemu-img
diff --git a/files/rpms-suse/general b/files/rpms-suse/general
index b8ceeb7..93711ff 100644
--- a/files/rpms-suse/general
+++ b/files/rpms-suse/general
@@ -8,7 +8,6 @@
 psmisc
 python-cmd2 # dist:opensuse-12.3
 python-netaddr
-python-pep8
 python-pip
 python-pylint
 python-unittest2
diff --git a/files/rpms-suse/horizon b/files/rpms-suse/horizon
index 7e46ffe..405fb7a 100644
--- a/files/rpms-suse/horizon
+++ b/files/rpms-suse/horizon
@@ -17,7 +17,6 @@
 python-mox
 python-netaddr
 python-nose
-python-pep8
 python-pylint
 python-sqlalchemy-migrate
 python-xattr
diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova
index 8a28e7d..edb1a8a 100644
--- a/files/rpms-suse/nova
+++ b/files/rpms-suse/nova
@@ -7,14 +7,15 @@
 iptables
 iputils
 kpartx
-kvm
+kvm # NOPRIME
 # qemu as fallback if kvm cannot be used
-qemu
+qemu # NOPRIME
 libvirt # NOPRIME
-libvirt-python
+libvirt-python # NOPRIME
 libxml2-python
 mysql-community-server # NOPRIME
 parted
+polkit
 python-M2Crypto
 python-m2crypto # dist:sle11sp2
 python-Paste
diff --git a/files/rpms/cinder b/files/rpms/cinder
index df861aa..19dedff 100644
--- a/files/rpms/cinder
+++ b/files/rpms/cinder
@@ -1,2 +1,3 @@
 lvm2
 scsi-target-utils
+qemu-img
diff --git a/files/rpms/general b/files/rpms/general
index fc3412b..5cb3e28 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -1,14 +1,19 @@
 bridge-utils
 curl
+dbus
 euca2ools # only for testing client
+gcc # dist:rhel6 [2]
 git-core
 openssh-server
 openssl
+openssl-devel # to rebuild pyOpenSSL if needed
+libxml2-devel # dist:rhel6 [2]
+libxslt-devel # dist:rhel6 [2]
 psmisc
 pylint
 python-netaddr
-python-pep8
 python-pip
+python-prettytable # dist:rhel6 [1]
 python-unittest2
 python-virtualenv
 screen
@@ -16,3 +21,12 @@
 tcpdump
 unzip
 wget
+
+# [1] : some of installed tools have unversioned dependencies on this,
+# but others have versioned (<=0.7).  So if a later version (0.7.1)
+# gets installed in response to an unversioned dependency, it breaks.
+# This pre-installs a compatible 0.6(ish) version from RHEL
+
+# [2] : RHEL6 rpm versions of python-lxml is old, and has to be
+# removed.  Several tools rely on it, so we install the dependencies
+# pip needs to build it here (see tools/install_prereqs.sh)
diff --git a/files/rpms/glance b/files/rpms/glance
index eff6c2c..0f113ea 100644
--- a/files/rpms/glance
+++ b/files/rpms/glance
@@ -1,9 +1,10 @@
+gcc
 libxml2-devel
 python-argparse
 python-devel
 python-eventlet
 python-greenlet
-python-paste-deploy
+python-paste-deploy #dist:f16,f17,f18,f19
 python-routes
 python-sqlalchemy
 python-wsgiref
diff --git a/files/rpms/horizon b/files/rpms/horizon
index 12f75ba..b844d98 100644
--- a/files/rpms/horizon
+++ b/files/rpms/horizon
@@ -3,6 +3,7 @@
 gcc
 httpd # NOPRIME
 mod_wsgi  # NOPRIME
+nodejs # NOPRIME
 pylint
 python-anyjson
 python-BeautifulSoup
@@ -17,9 +18,8 @@
 python-mox
 python-netaddr
 python-nose
-python-paste
-python-paste-deploy
-python-pep8
+python-paste        #dist:f16,f17,f18,f19
+python-paste-deploy #dist:f16,f17,f18,f19
 python-routes
 python-sphinx
 python-sqlalchemy
diff --git a/files/rpms/keystone b/files/rpms/keystone
index 59868c7..33a4f47 100644
--- a/files/rpms/keystone
+++ b/files/rpms/keystone
@@ -1,11 +1,13 @@
 python-greenlet
-python-lxml
-python-paste
-python-paste-deploy
-python-paste-script
+python-lxml         #dist:f16,f17,f18,f19
+python-paste        #dist:f16,f17,f18,f19
+python-paste-deploy #dist:f16,f17,f18,f19
+python-paste-script #dist:f16,f17,f18,f19
 python-routes
-python-setuptools
+python-setuptools   #dist:f16,f17,f18,f19
 python-sqlalchemy
 python-sqlite2
 python-webob
 sqlite
+
+# Deps installed via pip for RHEL
\ No newline at end of file
diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu
index 149672a..e4fdaf4 100644
--- a/files/rpms/n-cpu
+++ b/files/rpms/n-cpu
@@ -4,3 +4,4 @@
 genisoimage
 sysfsutils
 sg3_utils
+python-libguestfs
diff --git a/files/rpms/nova b/files/rpms/nova
index 7ff926b..8d8a0b8 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -7,14 +7,15 @@
 iptables
 iputils
 kpartx
-kvm
+kvm # NOPRIME
 libvirt-bin # NOPRIME
-libvirt-python
+libvirt-python # NOPRIME
 libxml2-python
 numpy # needed by websockify for spice console
 m2crypto
 mysql-server # NOPRIME
 parted
+polkit
 python-boto
 python-carrot
 python-cheetah
@@ -28,16 +29,18 @@
 python-migrate
 python-mox
 python-netaddr
-python-paramiko
-python-paste
-python-paste-deploy
+python-paramiko # dist:f16,f17,f18,f19
+# ^ on RHEL, brings in python-crypto which conflicts with version from
+# pip we need
+python-paste        # dist:f16,f17,f18,f19
+python-paste-deploy # dist:f16,f17,f18,f19
 python-qpid
 python-routes
 python-sqlalchemy
 python-suds
 python-tempita
 rabbitmq-server # NOPRIME
-qpid-cpp-server-daemon # NOPRIME
+qpid-cpp-server # NOPRIME
 sqlite
 sudo
 vconfig
diff --git a/files/rpms/quantum b/files/rpms/quantum
index 05398fc..6a8fd36 100644
--- a/files/rpms/quantum
+++ b/files/rpms/quantum
@@ -4,20 +4,22 @@
 iptables
 iputils
 mysql-server # NOPRIME
+openvswitch # NOPRIME
 python-boto
 python-eventlet
 python-greenlet
 python-iso8601
 python-kombu
 python-netaddr
-python-paste
-python-paste-deploy
+#rhel6 gets via pip
+python-paste        # dist:f16,f17,f18,f19
+python-paste-deploy # dist:f16,f17,f18,f19
 python-qpid
 python-routes
 python-sqlalchemy
 python-suds
 rabbitmq-server # NOPRIME
-qpid-cpp-server-daemon # NOPRIME
+qpid-cpp-server        # NOPRIME
 sqlite
 sudo
 vconfig
diff --git a/files/rpms/ryu b/files/rpms/ryu
index 4a4fc52..0f62f9f 100644
--- a/files/rpms/ryu
+++ b/files/rpms/ryu
@@ -1,5 +1,5 @@
-python-setuptools
 python-gevent
 python-gflags
 python-netifaces
+python-setuptools #dist:f16,f17,f18,f19
 python-sphinx
diff --git a/files/rpms/swift b/files/rpms/swift
index ce41ceb..ee1fad8 100644
--- a/files/rpms/swift
+++ b/files/rpms/swift
@@ -8,8 +8,8 @@
 python-greenlet
 python-netifaces
 python-nose
-python-paste-deploy
-python-setuptools
+python-paste-deploy # dist:f16,f17,f18,f19
+python-setuptools   # dist:f16,f17,f18,f19
 python-simplejson
 python-webob
 pyxattr
diff --git a/functions b/functions
index edc4bf9..06d7e7b 100644
--- a/functions
+++ b/functions
@@ -57,15 +57,12 @@
 # die $LINENO "message"
 function die() {
     local exitcode=$?
+    set +o xtrace
+    local line=$1; shift
     if [ $exitcode == 0 ]; then
         exitcode=1
     fi
-    set +o xtrace
-    local msg="[ERROR] $0:$1 $2"
-    echo $msg 1>&2;
-    if [[ -n ${SCREEN_LOGDIR} ]]; then
-        echo $msg >> "${SCREEN_LOGDIR}/error.log"
-    fi
+    err $line "$*"
     exit $exitcode
 }
 
@@ -75,14 +72,49 @@
 # NOTE: env-var is the variable name without a '$'
 # die_if_not_set $LINENO env-var "message"
 function die_if_not_set() {
-    (
-        local exitcode=$?
-        set +o xtrace
-        local evar=$2; shift
-        if ! is_set $evar || [ $exitcode != 0 ]; then
-            die $@
-        fi
-    )
+    local exitcode=$?
+    FXTRACE=$(set +o | grep xtrace)
+    set +o xtrace
+    local line=$1; shift
+    local evar=$1; shift
+    if ! is_set $evar || [ $exitcode != 0 ]; then
+        die $line "$*"
+    fi
+    $FXTRACE
+}
+
+
+# Prints line number and "message" in error format
+# err $LINENO "message"
+function err() {
+    local exitcode=$?
+    errXTRACE=$(set +o | grep xtrace)
+    set +o xtrace
+    local msg="[ERROR] $0:$1 $2"
+    echo $msg 1>&2;
+    if [[ -n ${SCREEN_LOGDIR} ]]; then
+        echo $msg >> "${SCREEN_LOGDIR}/error.log"
+    fi
+    $errXTRACE
+    return $exitcode
+}
+
+
+# Checks an environment variable is not set or has length 0 OR if the
+# exit code is non-zero and prints "message"
+# NOTE: env-var is the variable name without a '$'
+# err_if_not_set $LINENO env-var "message"
+function err_if_not_set() {
+    local exitcode=$?
+    errinsXTRACE=$(set +o | grep xtrace)
+    set +o xtrace
+    local line=$1; shift
+    local evar=$1; shift
+    if ! is_set $evar || [ $exitcode != 0 ]; then
+        err $line "$*"
+    fi
+    $errinsXTRACE
+    return $exitcode
 }
 
 
@@ -168,6 +200,7 @@
     echo "$pkg_dir"
 }
 
+
 # get_packages() collects a list of package names of any type from the
 # prerequisite files in ``files/{apts|rpms}``.  The list is intended
 # to be passed to a package installer such as apt or yum.
@@ -299,7 +332,7 @@
         os_RELEASE=$(lsb_release -r -s)
         os_UPDATE=""
         os_PACKAGE="rpm"
-        if [[ "Debian,Ubuntu" =~ $os_VENDOR ]]; then
+        if [[ "Debian,Ubuntu,LinuxMint" =~ $os_VENDOR ]]; then
             os_PACKAGE="deb"
         elif [[ "SUSE LINUX" =~ $os_VENDOR ]]; then
             lsb_release -d -s | grep -q openSUSE
@@ -348,53 +381,23 @@
             os_VENDOR=""
         done
         os_PACKAGE="rpm"
+    # If lsb_release is not installed, we should be able to detect Debian OS
+    elif [[ -f /etc/debian_version ]] && [[ $(cat /proc/version) =~ "Debian" ]]; then
+        os_VENDOR="Debian"
+        os_PACKAGE="deb"
+        os_CODENAME=$(awk '/VERSION=/' /etc/os-release | sed 's/VERSION=//' | sed -r 's/\"|\(|\)//g' | awk '{print $2}')
+        os_RELEASE=$(awk '/VERSION_ID=/' /etc/os-release | sed 's/VERSION_ID=//' | sed 's/\"//g')
     fi
     export os_VENDOR os_RELEASE os_UPDATE os_PACKAGE os_CODENAME
 }
 
-# git update using reference as a branch.
-# git_update_branch ref
-function git_update_branch() {
-
-    GIT_BRANCH=$1
-
-    git checkout -f origin/$GIT_BRANCH
-    # a local branch might not exist
-    git branch -D $GIT_BRANCH || true
-    git checkout -b $GIT_BRANCH
-}
-
-
-# git update using reference as a tag. Be careful editing source at that repo
-# as working copy will be in a detached mode
-# git_update_tag ref
-function git_update_tag() {
-
-    GIT_TAG=$1
-
-    git tag -d $GIT_TAG
-    # fetching given tag only
-    git fetch origin tag $GIT_TAG
-    git checkout -f $GIT_TAG
-}
-
-
-# git update using reference as a branch.
-# git_update_remote_branch ref
-function git_update_remote_branch() {
-
-    GIT_BRANCH=$1
-
-    git checkout -b $GIT_BRANCH -t origin/$GIT_BRANCH
-}
-
 
 # Translate the OS version values into common nomenclature
 # Sets ``DISTRO`` from the ``os_*`` values
 function GetDistro() {
     GetOSVersion
-    if [[ "$os_VENDOR" =~ (Ubuntu) ]]; then
-        # 'Everyone' refers to Ubuntu releases by the code name adjective
+    if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) ]]; then
+        # 'Everyone' refers to Ubuntu / Debian releases by the code name adjective
         DISTRO=$os_CODENAME
     elif [[ "$os_VENDOR" =~ (Fedora) ]]; then
         # For Fedora, just use 'f' and the release
@@ -408,6 +411,9 @@
         else
             DISTRO="sle${os_RELEASE}sp${os_UPDATE}"
         fi
+    elif [[ "$os_VENDOR" =~ (Red Hat) || "$os_VENDOR" =~ (CentOS) ]]; then
+        # Drop the . release as we assume it's compatible
+        DISTRO="rhel${os_RELEASE::1}"
     else
         # Catch-all for now is Vendor + Release + Update
         DISTRO="$os_VENDOR-$os_RELEASE.$os_UPDATE"
@@ -416,21 +422,8 @@
 }
 
 
-# Determine if current distribution is an Ubuntu-based distribution.
-# It will also detect non-Ubuntu but Debian-based distros; this is not an issue
-# since Debian and Ubuntu should be compatible.
-# is_ubuntu
-function is_ubuntu {
-    if [[ -z "$os_PACKAGE" ]]; then
-        GetOSVersion
-    fi
-
-    [ "$os_PACKAGE" = "deb" ]
-}
-
-
 # Determine if current distribution is a Fedora-based distribution
-# (Fedora, RHEL, CentOS).
+# (Fedora, RHEL, CentOS, etc).
 # is_fedora
 function is_fedora {
     if [[ -z "$os_VENDOR" ]]; then
@@ -453,6 +446,17 @@
 }
 
 
+# Determine if current distribution is an Ubuntu-based distribution
+# It will also detect non-Ubuntu but Debian-based distros
+# is_ubuntu
+function is_ubuntu {
+    if [[ -z "$os_PACKAGE" ]]; then
+        GetOSVersion
+    fi
+    [ "$os_PACKAGE" = "deb" ]
+}
+
+
 # Exit after outputting a message about the distribution not being supported.
 # exit_distro_not_supported [optional-string-telling-what-is-missing]
 function exit_distro_not_supported {
@@ -467,6 +471,13 @@
     fi
 }
 
+# Utility function for checking machine architecture
+# is_arch arch-type
+function is_arch {
+    ARCH_TYPE=$1
+
+    [ "($uname -m)" = "$ARCH_TYPE" ]
+}
 
 # git clone only if directory doesn't exist already.  Since ``DEST`` might not
 # be owned by the installation user, we create the directory and change the
@@ -527,6 +538,43 @@
 }
 
 
+# git update using reference as a branch.
+# git_update_branch ref
+function git_update_branch() {
+
+    GIT_BRANCH=$1
+
+    git checkout -f origin/$GIT_BRANCH
+    # a local branch might not exist
+    git branch -D $GIT_BRANCH || true
+    git checkout -b $GIT_BRANCH
+}
+
+
+# git update using reference as a branch.
+# git_update_remote_branch ref
+function git_update_remote_branch() {
+
+    GIT_BRANCH=$1
+
+    git checkout -b $GIT_BRANCH -t origin/$GIT_BRANCH
+}
+
+
+# git update using reference as a tag. Be careful editing source at that repo
+# as working copy will be in a detached mode
+# git_update_tag ref
+function git_update_tag() {
+
+    GIT_TAG=$1
+
+    git tag -d $GIT_TAG
+    # fetching given tag only
+    git fetch origin tag $GIT_TAG
+    git checkout -f $GIT_TAG
+}
+
+
 # Comment an option in an INI file
 # inicomment config-file section option
 function inicomment() {
@@ -536,6 +584,7 @@
     sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file"
 }
 
+
 # Uncomment an option in an INI file
 # iniuncomment config-file section option
 function iniuncomment() {
@@ -557,6 +606,7 @@
     echo ${line#*=}
 }
 
+
 # Determinate is the given option present in the INI file
 # ini_has_option config-file section option
 function ini_has_option() {
@@ -568,6 +618,7 @@
     [ -n "$line" ]
 }
 
+
 # Set an option in an INI file
 # iniset config-file section option value
 function iniset() {
@@ -590,6 +641,7 @@
     fi
 }
 
+
 # Get a multiple line option from an INI file
 # iniget_multiline config-file section option
 function iniget_multiline() {
@@ -601,6 +653,7 @@
     echo ${values}
 }
 
+
 # Set a multiple line option in an INI file
 # iniset_multiline config-file section option value1 value2 valu3 ...
 function iniset_multiline() {
@@ -630,6 +683,7 @@
     done
 }
 
+
 # Append a new option in an ini file without replacing the old value
 # iniadd config-file section option value1 value2 value3 ...
 function iniadd() {
@@ -641,6 +695,17 @@
     iniset_multiline $file $section $option $values
 }
 
+# Find out if a process exists by partial name.
+# is_running name
+function is_running() {
+    local name=$1
+    ps auxw | grep -v grep | grep ${name} > /dev/null
+    RC=$?
+    # some times I really hate bash reverse binary logic
+    return $RC
+}
+
+
 # is_service_enabled() checks if the service(s) specified as arguments are
 # enabled by the user in ``ENABLED_SERVICES``.
 #
@@ -828,7 +893,7 @@
     if [[ -z "$os_PACKAGE" ]]; then
         GetOSVersion
     fi
-    if [[ $TRACK_DEPENDS = True ]] ; then
+    if [[ $TRACK_DEPENDS = True ]]; then
         source $DEST/.venv/bin/activate
         CMD_PIP=$DEST/.venv/bin/pip
         SUDO_PIP="env"
@@ -836,9 +901,18 @@
         SUDO_PIP="sudo"
         CMD_PIP=$(get_pip_command)
     fi
+
+    if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then
+        # RHEL6 pip by default doesn't have this (was introduced
+        # around 0.8.1 or so)
+        PIP_USE_MIRRORS=${PIP_USE_MIRRORS:-False}
+    else
+        PIP_USE_MIRRORS=${PIP_USE_MIRRORS:-True}
+    fi
     if [[ "$PIP_USE_MIRRORS" != "False" ]]; then
         PIP_MIRROR_OPT="--use-mirrors"
     fi
+
     $SUDO_PIP PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \
         HTTP_PROXY=$http_proxy \
         HTTPS_PROXY=$https_proxy \
@@ -946,6 +1020,8 @@
         echo "sessionname $SCREEN_NAME" > $SCREENRC
         # Set a reasonable statusbar
         echo "hardstatus alwayslastline '$SCREEN_HARDSTATUS'" >> $SCREENRC
+        # Some distributions override PROMPT_COMMAND for the screen terminal type - turn that off
+        echo "setenv PROMPT_COMMAND /bin/true" >> $SCREENRC
         echo "screen -t shell bash" >> $SCREENRC
     fi
     # If this service doesn't already exist in the screenrc file
@@ -956,6 +1032,7 @@
     fi
 }
 
+
 # Helper to remove the *.failure files under $SERVICE_DIR/$SCREEN_NAME
 # This is used for service_check when all the screen_it are called finished
 # init_service_check
@@ -970,6 +1047,7 @@
     rm -f "$SERVICE_DIR/$SCREEN_NAME"/*.failure
 }
 
+
 # Helper to get the status of each running service
 # service_check
 function service_check() {
@@ -998,12 +1076,13 @@
     fi
 }
 
+
 # ``pip install`` the dependencies of the package before ``setup.py develop``
 # so pip and not distutils processes the dependency chain
 # Uses globals ``TRACK_DEPENDES``, ``*_proxy`
 # setup_develop directory
 function setup_develop() {
-    if [[ $TRACK_DEPENDS = True ]] ; then
+    if [[ $TRACK_DEPENDS = True ]]; then
         SUDO_CMD="env"
     else
         SUDO_CMD="sudo"
@@ -1178,6 +1257,7 @@
     fi
 }
 
+
 # Set the database backend to use
 # When called from stackrc/localrc DATABASE_BACKENDS has not been
 # initialized yet, just save the configuration selection and call back later
@@ -1195,6 +1275,7 @@
     fi
 }
 
+
 # Toggle enable/disable_service for services that must run exclusive of each other
 #  $1 The name of a variable containing a space-separated list of services
 #  $2 The name of a variable in which to store the enabled service's name
@@ -1211,6 +1292,7 @@
     return 0
 }
 
+
 # Wait for an HTTP server to start answering requests
 # wait_for_service timeout url
 function wait_for_service() {
@@ -1219,6 +1301,7 @@
     timeout $timeout sh -c "while ! http_proxy= https_proxy= curl -s $url >/dev/null; do sleep 1; done"
 }
 
+
 # Wrapper for ``yum`` to set proxy environment variables
 # Uses globals ``OFFLINE``, ``*_proxy`
 # yum_install package [package ...]
@@ -1231,8 +1314,21 @@
         yum install -y "$@"
 }
 
+
+# zypper wrapper to set arguments correctly
+# zypper_install package [package ...]
+function zypper_install() {
+    [[ "$OFFLINE" = "True" ]] && return
+    local sudo="sudo"
+    [[ "$(id -u)" = "0" ]] && sudo="env"
+    $sudo http_proxy=$http_proxy https_proxy=$https_proxy \
+        zypper --non-interactive install --auto-agree-with-licenses "$@"
+}
+
+
 # ping check
 # Uses globals ``ENABLED_SERVICES``
+# ping_check from-net ip boot-timeout expected
 function ping_check() {
     if is_service_enabled quantum; then
         _ping_check_quantum  "$1" $2 $3 $4
@@ -1269,8 +1365,10 @@
     fi
 }
 
+
 # ssh check
 
+# ssh_check net-name key-file floating-ip default-user active-timeout
 function ssh_check() {
     if is_service_enabled quantum; then
         _ssh_check_quantum  "$1" $2 $3 $4 $5
@@ -1286,23 +1384,12 @@
     local DEFAULT_INSTANCE_USER=$4
     local ACTIVE_TIMEOUT=$5
     local probe_cmd=""
-    if ! timeout $ACTIVE_TIMEOUT sh -c "while ! ssh -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP echo success ; do sleep 1; done"; then
+    if ! timeout $ACTIVE_TIMEOUT sh -c "while ! ssh -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP echo success; do sleep 1; done"; then
         die $LINENO "server didn't become ssh-able!"
     fi
 }
 
 
-# zypper wrapper to set arguments correctly
-# zypper_install package [package ...]
-function zypper_install() {
-    [[ "$OFFLINE" = "True" ]] && return
-    local sudo="sudo"
-    [[ "$(id -u)" = "0" ]] && sudo="env"
-    $sudo http_proxy=$http_proxy https_proxy=$https_proxy \
-        zypper --non-interactive install --auto-agree-with-licenses "$@"
-}
-
-
 # Add a user to a group.
 # add_user_to_group user group
 function add_user_to_group() {
@@ -1325,13 +1412,14 @@
 # Get the path to the direcotry where python executables are installed.
 # get_python_exec_prefix
 function get_python_exec_prefix() {
-    if is_fedora; then
+    if is_fedora || is_suse; then
         echo "/usr/bin"
     else
         echo "/usr/local/bin"
     fi
 }
 
+
 # Get the location of the $module-rootwrap executables, where module is cinder
 # or nova.
 # get_rootwrap_location module
@@ -1341,6 +1429,7 @@
     echo "$(get_python_exec_prefix)/$module-rootwrap"
 }
 
+
 # Get the path to the pip command.
 # get_pip_command
 function get_pip_command() {
@@ -1349,8 +1438,97 @@
     else
         which pip
     fi
+
+    if [ $? -ne 0 ]; then
+        die $LINENO "Unable to find pip; cannot continue"
+    fi
 }
 
+
+# Path permissions sanity check
+# check_path_perm_sanity path
+function check_path_perm_sanity() {
+    # Ensure no element of the path has 0700 permissions, which is very
+    # likely to cause issues for daemons.  Inspired by default 0700
+    # homedir permissions on RHEL and common practice of making DEST in
+    # the stack user's homedir.
+
+    local real_path=$(readlink -f $1)
+    local rebuilt_path=""
+    for i in $(echo ${real_path} | tr "/" " "); do
+        rebuilt_path=$rebuilt_path"/"$i
+
+        if [[ $(stat -c '%a' ${rebuilt_path}) = 700 ]]; then
+            echo "*** DEST path element"
+            echo "***    ${rebuilt_path}"
+            echo "*** appears to have 0700 permissions."
+            echo "*** This is very likely to cause fatal issues for devstack daemons."
+
+            if [[ -n "$SKIP_PATH_SANITY" ]]; then
+                return
+            else
+                echo "*** Set SKIP_PATH_SANITY to skip this check"
+                die $LINENO "Invalid path permissions"
+            fi
+        fi
+    done
+}
+
+
+# This function recursively compares versions, and is not meant to be
+# called by anything other than vercmp_numbers below. This function does
+# not work with alphabetic versions.
+#
+# _vercmp_r sep ver1 ver2
+function _vercmp_r {
+  typeset sep
+  typeset -a ver1=() ver2=()
+  sep=$1; shift
+  ver1=("${@:1:sep}")
+  ver2=("${@:sep+1}")
+
+  if ((ver1 > ver2)); then
+    echo 1; return 0
+  elif ((ver2 > ver1)); then
+    echo -1; return 0
+  fi
+
+  if ((sep <= 1)); then
+    echo 0; return 0
+  fi
+
+  _vercmp_r $((sep-1)) "${ver1[@]:1}" "${ver2[@]:1}"
+}
+
+
+# This function compares two versions and is meant to be called by
+# external callers. Please note the function assumes non-alphabetic
+# versions. For example, this will work:
+#
+#   vercmp_numbers 1.10 1.4
+#
+# The above will return "1", as 1.10 is greater than 1.4.
+#
+#   vercmp_numbers 5.2 6.4
+#
+# The above will return "-1", as 5.2 is less than 6.4.
+#
+#   vercmp_numbers 4.0 4.0
+#
+# The above will return "0", as the versions are equal.
+#
+# vercmp_numbers ver1 ver2
+vercmp_numbers() {
+  typeset v1=$1 v2=$2 sep
+  typeset -a ver1 ver2
+
+  IFS=. read -ra ver1 <<< "$v1"
+  IFS=. read -ra ver2 <<< "$v2"
+
+  _vercmp_r "${#ver1[@]}" "${ver1[@]}" "${ver2[@]}"
+}
+
+
 # Restore xtrace
 $XTRACE
 
diff --git a/lib/baremetal b/lib/baremetal
index 24cce9f..bed3c09 100644
--- a/lib/baremetal
+++ b/lib/baremetal
@@ -53,6 +53,7 @@
 XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
+
 # Sub-driver settings
 # -------------------
 
@@ -203,7 +204,7 @@
     sudo mkdir -p /tftpboot
     sudo mkdir -p /tftpboot/pxelinux.cfg
     sudo cp /usr/lib/syslinux/pxelinux.0 /tftpboot/
-    sudo chown -R $STACK_USER:libvirtd /tftpboot
+    sudo chown -R $STACK_USER:$LIBVIRT_GROUP /tftpboot
 
     # ensure $NOVA_STATE_PATH/baremetal is prepared
     sudo mkdir -p $NOVA_STATE_PATH/baremetal
@@ -427,7 +428,7 @@
        "$mac_1" \
        | grep ' id ' | get_field 2 )
     [ $? -eq 0 ] || [ "$id" ] || die "Error adding baremetal node"
-    id2=$(nova baremetal-add-interface "$id" "$mac_2" )
+    id2=$(nova baremetal-interface-add "$id" "$mac_2" )
     [ $? -eq 0 ] || [ "$id2" ] || die "Error adding interface to barmetal node $id"
 }
 
diff --git a/lib/ceilometer b/lib/ceilometer
index 58cafd1..bd4ab0f 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -1,8 +1,8 @@
 # lib/ceilometer
 # Install and start **Ceilometer** service
 
-# To enable, add the following to localrc
-# ENABLED_SERVICES+=ceilometer-acompute,ceilometer-acentral,ceilometer-collector,ceilometer-api
+# To enable Ceilometer services, add the following to localrc
+# enable_service ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api
 
 # Dependencies:
 # - functions
@@ -37,12 +37,16 @@
 CEILOMETER_AUTH_CACHE_DIR=${CEILOMETER_AUTH_CACHE_DIR:-/var/cache/ceilometer}
 
 # Support potential entry-points console scripts
-if [ -d $CEILOMETER_DIR/bin ] ; then
+if [[ -d $CEILOMETER_DIR/bin ]]; then
     CEILOMETER_BIN_DIR=$CEILOMETER_DIR/bin
 else
     CEILOMETER_BIN_DIR=$(get_python_exec_prefix)
 fi
 
+
+# Functions
+# ---------
+
 # cleanup_ceilometer() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
 function cleanup_ceilometer() {
@@ -66,9 +70,8 @@
 
     iniset_rpc_backend ceilometer $CEILOMETER_CONF DEFAULT
 
-    iniset $CEILOMETER_CONF DEFAULT notification_topics 'notifications,glance_notifications'
+    iniset $CEILOMETER_CONF DEFAULT notification_topics 'notifications'
     iniset $CEILOMETER_CONF DEFAULT verbose True
-    iniset $CEILOMETER_CONF DEFAULT `database_connection_url nova`
 
     # Install the policy file for the API server
     cp $CEILOMETER_DIR/etc/ceilometer/policy.json $CEILOMETER_CONF_DIR
@@ -88,13 +91,15 @@
     iniset $CEILOMETER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
     iniset $CEILOMETER_CONF keystone_authtoken signing_dir $CEILOMETER_AUTH_CACHE_DIR
 
+    iniset $CEILOMETER_CONF database connection mongodb://localhost:27017/ceilometer
+
     configure_mongodb
 
     cleanup_ceilometer
 }
 
 function configure_mongodb() {
-    if is_fedora ; then
+    if is_fedora; then
         # ensure smallfiles selected to minimize freespace requirements
         sudo sed -i '/--smallfiles/!s/OPTIONS=\"/OPTIONS=\"--smallfiles /' /etc/sysconfig/mongod
 
@@ -122,10 +127,10 @@
 
 # start_ceilometer() - Start running processes, including screen
 function start_ceilometer() {
-    screen_it ceilometer-acompute "cd $CEILOMETER_DIR && sg libvirtd \"$CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_CONF\""
-    screen_it ceilometer-acentral "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_CONF"
-    screen_it ceilometer-collector "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-collector --config-file $CEILOMETER_CONF"
-    screen_it ceilometer-api "cd $CEILOMETER_DIR && $CEILOMETER_BIN_DIR/ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF"
+    screen_it ceilometer-acompute "sg $LIBVIRT_GROUP \"ceilometer-agent-compute --config-file $CEILOMETER_CONF\""
+    screen_it ceilometer-acentral "ceilometer-agent-central --config-file $CEILOMETER_CONF"
+    screen_it ceilometer-collector "ceilometer-collector --config-file $CEILOMETER_CONF"
+    screen_it ceilometer-api "ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF"
 }
 
 # stop_ceilometer() - Stop running processes
@@ -136,6 +141,7 @@
     done
 }
 
+
 # Restore xtrace
 $XTRACE
 
diff --git a/lib/cinder b/lib/cinder
index deace68..40a25ba 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -58,24 +58,54 @@
 # https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1023755
 CINDER_SECURE_DELETE=`trueorfalse True $CINDER_SECURE_DELETE`
 
+# Cinder reports allocations back to the scheduler on periodic intervals
+# it turns out we can get an "out of space" issue when we run tests too
+# quickly just because cinder didn't realize we'd freed up resources.
+# Make this configurable so that devstack-gate/tempest can set it to
+# less than the 60 second default
+# https://bugs.launchpad.net/cinder/+bug/1180976
+CINDER_PERIODIC_INTERVAL=${CINDER_PERIODIC_INTERVAL:-60}
+
 # Name of the lvm volume groups to use/create for iscsi volumes
-# VOLUME_GROUP2 is used only if CINDER_MULTI_LVM_BACKEND = True
 VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
+VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DATA_DIR/${VOLUME_GROUP}-backing-file}
+VOLUME_BACKING_DEVICE=${VOLUME_BACKING_DEVICE:-}
+
+# VOLUME_GROUP2 is used only if CINDER_MULTI_LVM_BACKEND = True
 VOLUME_GROUP2=${VOLUME_GROUP2:-stack-volumes2}
+VOLUME_BACKING_FILE2=${VOLUME_BACKING_FILE2:-$DATA_DIR/${VOLUME_GROUP2}-backing-file}
+VOLUME_BACKING_DEVICE2=${VOLUME_BACKING_DEVICE2:-}
+
 VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
 
-# _clean_volume_group removes all cinder volumes from the specified volume group
-# _clean_volume_group $VOLUME_GROUP $VOLUME_NAME_PREFIX
-function _clean_volume_group() {
+
+# Functions
+# ---------
+# _cleanup_lvm removes all cinder volumes and the backing file of the
+# volume group used by cinder
+# _cleanup_lvm $VOLUME_GROUP $VOLUME_NAME_PREFIX
+function _cleanup_lvm() {
     local vg=$1
-    local vg_prefix=$2
+    local lv_prefix=$2
+
     # Clean out existing volumes
     for lv in `sudo lvs --noheadings -o lv_name $vg`; do
-        # vg_prefix prefixes the LVs we want
-        if [[ "${lv#$vg_prefix}" != "$lv" ]]; then
+        # lv_prefix prefixes the LVs we want
+        if [[ "${lv#$lv_prefix}" != "$lv" ]]; then
             sudo lvremove -f $vg/$lv
         fi
     done
+
+    # if there is no logical volume left, it's safe to attempt a cleanup
+    # of the backing file
+    if [ -z "`sudo lvs --noheadings -o lv_name $vg`" ]; then
+        # if the backing physical device is a loop device, it was probably setup by devstack
+        VG_DEV=$(sudo losetup -j $DATA_DIR/${vg}-backing-file | awk -F':' '/backing-file/ { print $1}')
+        if [[ -n "$VG_DEV" ]]; then
+            sudo losetup -d $VG_DEV
+            rm -f $DATA_DIR/${vg}-backing-file
+        fi
+    fi
 }
 
 # cleanup_cinder() - Remove residual data files, anything left over from previous
@@ -115,9 +145,10 @@
     fi
 
     # Campsite rule: leave behind a volume group at least as clean as we found it
-    _clean_volume_group $VOLUME_GROUP $VOLUME_NAME_PREFIX
+    _cleanup_lvm $VOLUME_GROUP $VOLUME_NAME_PREFIX
+
     if [ "$CINDER_MULTI_LVM_BACKEND" = "True" ]; then
-        _clean_volume_group $VOLUME_GROUP2 $VOLUME_NAME_PREFIX
+        _cleanup_lvm $VOLUME_GROUP2 $VOLUME_NAME_PREFIX
     fi
 }
 
@@ -182,7 +213,7 @@
         iniset $CINDER_CONF lvmdriver-1 volume_backend_name LVM_iSCSI
         iniset $CINDER_CONF lvmdriver-2 volume_group $VOLUME_GROUP2
         iniset $CINDER_CONF lvmdriver-2 volume_driver cinder.volume.drivers.lvm.LVMISCSIDriver
-        iniset $CINDER_CONF lvmdriver-2 volume_backend_name LVM_iSCSI
+        iniset $CINDER_CONF lvmdriver-2 volume_backend_name LVM_iSCSI_2
     else
         iniset $CINDER_CONF DEFAULT volume_group $VOLUME_GROUP
         iniset $CINDER_CONF DEFAULT volume_name_template ${VOLUME_NAME_PREFIX}%s
@@ -193,6 +224,7 @@
     iniset $CINDER_CONF DEFAULT rootwrap_config "$CINDER_CONF_DIR/rootwrap.conf"
     iniset $CINDER_CONF DEFAULT osapi_volume_extension cinder.api.contrib.standard_extensions
     iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH
+    iniset $CINDER_CONF DEFAULT periodic_interval $CINDER_PERIODIC_INTERVAL
 
     if is_service_enabled tls-proxy; then
         # Set the service port for a proxy to take the original
@@ -230,7 +262,33 @@
         )
     elif [ "$CINDER_DRIVER" == "sheepdog" ]; then
         iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.sheepdog.SheepdogDriver"
+    elif [ "$CINDER_DRIVER" == "glusterfs" ]; then
+        # To use glusterfs, set the following in localrc:
+        # CINDER_DRIVER=glusterfs
+        # CINDER_GLUSTERFS_SHARES="127.0.0.1:/vol1;127.0.0.1:/vol2"
+        # Shares are <host>:<volume> and separated by semicolons.
+
+        iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.glusterfs.GlusterfsDriver"
+        iniset $CINDER_CONF DEFAULT glusterfs_shares_config "$CINDER_CONF_DIR/glusterfs_shares"
+        touch $CINDER_CONF_DIR/glusterfs_shares
+        if [ ! -z "$CINDER_GLUSTERFS_SHARES" ]; then
+            CINDER_GLUSTERFS_SHARES=$(echo $CINDER_GLUSTERFS_SHARES | tr ";" "\n")
+            echo "$CINDER_GLUSTERFS_SHARES" > $CINDER_CONF_DIR/glusterfs_shares
+        fi
     fi
+
+    if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then
+        # Cinder clones are slightly larger due to some extra
+        # metadata.  RHEL6 will not allow auto-extending of LV's
+        # without this, leading to clones giving hard-to-track disk
+        # I/O errors.
+        # see https://bugzilla.redhat.com/show_bug.cgi?id=975052
+        sudo sed -i~ \
+            -e 's/snapshot_autoextend_threshold =.*/snapshot_autoextend_threshold = 80/' \
+            -e 's/snapshot_autoextend_percent =.*/snapshot_autoextend_percent = 20/' \
+            /etc/lvm/lvm.conf
+    fi
+
 }
 
 # create_cinder_accounts() - Set up common required cinder accounts
@@ -282,42 +340,45 @@
 }
 
 create_cinder_volume_group() {
-    # According to the CINDER_MULTI_LVM_BACKEND value, configure one or two default volumes
+    # According to the ``CINDER_MULTI_LVM_BACKEND`` value, configure one or two default volumes
     # group called ``stack-volumes`` (and ``stack-volumes2``) for the volume
     # service if it (they) does (do) not yet exist. If you don't wish to use a
     # file backed volume group, create your own volume group called ``stack-volumes``
     # and ``stack-volumes2`` before invoking ``stack.sh``.
     #
-    # By default, the two backing files are 5G in size, and are stored in
-    # ``/opt/stack/data``.
+    # The two backing files are ``VOLUME_BACKING_FILE_SIZE`` in size, and they are stored in
+    # the ``DATA_DIR``.
 
     if ! sudo vgs $VOLUME_GROUP; then
-        VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DATA_DIR/${VOLUME_GROUP}-backing-file}
+        if [ -z "$VOLUME_BACKING_DEVICE" ]; then
+            # Only create if the file doesn't already exists
+            [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE
+            DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE`
 
-        # Only create if the file doesn't already exists
-        [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE
-
-        DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE`
-
-        # Only create if the loopback device doesn't contain $VOLUME_GROUP
-        if ! sudo vgs $VOLUME_GROUP; then
-            sudo vgcreate $VOLUME_GROUP $DEV
+            # Only create if the loopback device doesn't contain $VOLUME_GROUP
+            if ! sudo vgs $VOLUME_GROUP; then
+                sudo vgcreate $VOLUME_GROUP $DEV
+            fi
+        else
+            sudo vgcreate $VOLUME_GROUP $VOLUME_BACKING_DEVICE
         fi
     fi
     if [ "$CINDER_MULTI_LVM_BACKEND" = "True" ]; then
         #set up the second volume if CINDER_MULTI_LVM_BACKEND is enabled
 
         if ! sudo vgs $VOLUME_GROUP2; then
-            VOLUME_BACKING_FILE2=${VOLUME_BACKING_FILE2:-$DATA_DIR/${VOLUME_GROUP2}-backing-file}
+            if [ -z "$VOLUME_BACKING_DEVICE2" ]; then
+                # Only create if the file doesn't already exists
+                [[ -f $VOLUME_BACKING_FILE2 ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE2
 
-            # Only create if the file doesn't already exists
-            [[ -f $VOLUME_BACKING_FILE2 ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE2
+                DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE2`
 
-            DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE2`
-
-            # Only create if the loopback device doesn't contain $VOLUME_GROUP
-            if ! sudo vgs $VOLUME_GROUP2; then
-                sudo vgcreate $VOLUME_GROUP2 $DEV
+                # Only create if the loopback device doesn't contain $VOLUME_GROUP
+                if ! sudo vgs $VOLUME_GROUP2; then
+                    sudo vgcreate $VOLUME_GROUP2 $DEV
+                fi
+            else
+                sudo vgcreate $VOLUME_GROUP2 $VOLUME_BACKING_DEVICE2
             fi
         fi
     fi
@@ -373,21 +434,20 @@
     setup_develop $CINDERCLIENT_DIR
 }
 
-# apply config.d approach (e.g. Oneiric does not have this)
+# apply config.d approach for cinder volumes directory
 function _configure_tgt_for_config_d() {
-    if [[ ! -d /etc/tgt/conf.d/ ]]; then
-        sudo mkdir -p /etc/tgt/conf.d
-        echo "include /etc/tgt/conf.d/*.conf" | sudo tee -a /etc/tgt/targets.conf
+    if [[ ! -d /etc/tgt/stack.d/ ]]; then
+        sudo ln -sf $CINDER_STATE_PATH/volumes /etc/tgt/stack.d
+        echo "include /etc/tgt/stack.d/*" | sudo tee -a /etc/tgt/targets.conf
     fi
 }
 
 # start_cinder() - Start running processes, including screen
 function start_cinder() {
     if is_service_enabled c-vol; then
+        # Delete any old stack.conf
+        sudo rm -f /etc/tgt/conf.d/stack.conf
         _configure_tgt_for_config_d
-        if [[ ! -f /etc/tgt/conf.d/stack.conf ]]; then
-            echo "include $CINDER_STATE_PATH/volumes/*" | sudo tee /etc/tgt/conf.d/stack.conf
-        fi
         if is_ubuntu; then
             # tgt in oneiric doesn't restart properly if tgtd isn't running
             # do it in two steps
@@ -432,6 +492,7 @@
     fi
 }
 
+
 # Restore xtrace
 $XTRACE
 
diff --git a/lib/database b/lib/database
index cbe886f..442ed56 100644
--- a/lib/database
+++ b/lib/database
@@ -20,21 +20,25 @@
 XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
+
 # Register a database backend
 #  $1 The name of the database backend
+# This is required to be defined before the specific database scripts are sourced
 function register_database {
     [ -z "$DATABASE_BACKENDS" ] && DATABASE_BACKENDS=$1 || DATABASE_BACKENDS+=" $1"
 }
 
 # Sourcing the database libs sets DATABASE_BACKENDS with the available list
-for f in $TOP_DIR/lib/databases/*; do source $f; done
+for f in $TOP_DIR/lib/databases/*; do
+    source $f;
+done
 
 # ``DATABASE_BACKENDS`` now contains a list of the supported databases
 # Look in ``ENABLED_SERVICES`` to see if one has been selected
 for db in $DATABASE_BACKENDS; do
     # Set the type for the rest of the backend to use
     if is_service_enabled $db; then
-        # Set this now for the rest of the database funtions
+        # Set this now for the rest of the database functions
         DATABASE_TYPE=$db
     fi
 done
@@ -42,6 +46,9 @@
 # This is not an error as multi-node installs will do this on the compute nodes
 
 
+# Functions
+# ---------
+
 # Get rid of everything enough to cleanly change database backends
 function cleanup_database {
     cleanup_database_$DATABASE_TYPE
@@ -103,15 +110,14 @@
     configure_database_$DATABASE_TYPE
 }
 
-# Generate an SQLAlchemy connection URL and store it in a variable
-#  $1 The variable name in which to store the connection URL
-#  $2 The name of the database
+# Generate an SQLAlchemy connection URL and output it using echo
+#  $1 The name of the database
 function database_connection_url {
-    local var=$1
-    local db=$2
-    database_connection_url_$DATABASE_TYPE $var $db
+    local db=$1
+    database_connection_url_$DATABASE_TYPE $db
 }
 
+
 # Restore xtrace
 $XTRACE
 
diff --git a/lib/databases/mysql b/lib/databases/mysql
index 30450b1..211d797 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -8,8 +8,13 @@
 MY_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
+
 register_database mysql
 
+
+# Functions
+# ---------
+
 # Get rid of everything enough to cleanly change database backends
 function cleanup_database_mysql {
     if is_ubuntu; then
@@ -31,8 +36,8 @@
 function recreate_database_mysql {
     local db=$1
     local charset=$2
-    mysql -u$DATABASE_USER -p$DATABASE_PASSWORD -e "DROP DATABASE IF EXISTS $db;"
-    mysql -u$DATABASE_USER -p$DATABASE_PASSWORD -e "CREATE DATABASE $db CHARACTER SET $charset;"
+    mysql -u$DATABASE_USER -p$DATABASE_PASSWORD -h$MYSQL_HOST -e "DROP DATABASE IF EXISTS $db;"
+    mysql -u$DATABASE_USER -p$DATABASE_PASSWORD -h$MYSQL_HOST -e "CREATE DATABASE $db CHARACTER SET $charset;"
 }
 
 function configure_database_mysql {
@@ -137,6 +142,7 @@
     echo "$BASE_SQL_CONN/$db?charset=utf8"
 }
 
+
 # Restore xtrace
 $MY_XTRACE
 
diff --git a/lib/databases/postgresql b/lib/databases/postgresql
index b64de2c..b173772 100644
--- a/lib/databases/postgresql
+++ b/lib/databases/postgresql
@@ -8,8 +8,13 @@
 PG_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
+
 register_database postgresql
 
+
+# Functions
+# ---------
+
 # Get rid of everything enough to cleanly change database backends
 function cleanup_database_postgresql {
     stop_service postgresql
@@ -88,6 +93,7 @@
     echo "$BASE_SQL_CONN/$db?client_encoding=utf8"
 }
 
+
 # Restore xtrace
 $PG_XTRACE
 
diff --git a/lib/glance b/lib/glance
index 3376400..583f879 100644
--- a/lib/glance
+++ b/lib/glance
@@ -51,8 +51,8 @@
 GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$SERVICE_HOST:9292}
 
 
-# Entry Points
-# ------------
+# Functions
+# ---------
 
 # cleanup_glance() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
@@ -199,6 +199,7 @@
     screen -S $SCREEN_NAME -p g-reg -X kill
 }
 
+
 # Restore xtrace
 $XTRACE
 
diff --git a/lib/heat b/lib/heat
index 88535c3..13bf130 100644
--- a/lib/heat
+++ b/lib/heat
@@ -2,7 +2,7 @@
 # Install and start **Heat** service
 
 # To enable, add the following to localrc
-# ENABLED_SERVICES+=,heat,h-api-cfn,h-api-cw,h-eng
+# ENABLED_SERVICES+=,heat,h-api,h-api-cfn,h-api-cw,h-eng
 
 # Dependencies:
 # - functions
@@ -25,15 +25,20 @@
 
 # Defaults
 # --------
+
+# set up default directories
 HEAT_DIR=$DEST/heat
 HEATCLIENT_DIR=$DEST/python-heatclient
-# set up default directories
+HEAT_AUTH_CACHE_DIR=${HEAT_AUTH_CACHE_DIR:-/var/cache/heat}
+
+
+# Functions
+# ---------
 
 # cleanup_heat() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
 function cleanup_heat() {
-    # This function intentionally left blank
-    :
+    sudo rm -rf $HEAT_AUTH_CACHE_DIR
 }
 
 # configure_heatclient() - Set config files, create data dirs, etc
@@ -59,6 +64,11 @@
     HEAT_API_CW_PORT=${HEAT_API_CW_PORT:-8003}
     HEAT_API_HOST=${HEAT_API_HOST:-$SERVICE_HOST}
     HEAT_API_PORT=${HEAT_API_PORT:-8004}
+    HEAT_API_PASTE_FILE=$HEAT_CONF_DIR/api-paste.ini
+    HEAT_POLICY_FILE=$HEAT_CONF_DIR/policy.json
+
+    cp $HEAT_DIR/etc/heat/api-paste.ini $HEAT_API_PASTE_FILE
+    cp $HEAT_DIR/etc/heat/policy.json $HEAT_POLICY_FILE
 
     # Cloudformation API
     HEAT_API_CFN_CONF=$HEAT_CONF_DIR/heat-api-cfn.conf
@@ -68,21 +78,19 @@
     iniset $HEAT_API_CFN_CONF DEFAULT use_syslog $SYSLOG
     iniset $HEAT_API_CFN_CONF DEFAULT bind_host $HEAT_API_CFN_HOST
     iniset $HEAT_API_CFN_CONF DEFAULT bind_port $HEAT_API_CFN_PORT
+    iniset $HEAT_API_CFN_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST
+    iniset $HEAT_API_CFN_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT
+    iniset $HEAT_API_CFN_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
+    iniset $HEAT_API_CFN_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0
+    iniset $HEAT_API_CFN_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
+    iniset $HEAT_API_CFN_CONF keystone_authtoken admin_user heat
+    iniset $HEAT_API_CFN_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
+    iniset $HEAT_API_CFN_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR/api-cfn
+    iniset $HEAT_API_CFN_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0
+    iniset $HEAT_API_CFN_CONF ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens
 
     iniset_rpc_backend heat $HEAT_API_CFN_CONF DEFAULT
 
-    HEAT_API_CFN_PASTE_INI=$HEAT_CONF_DIR/heat-api-cfn-paste.ini
-    cp $HEAT_DIR/etc/heat/heat-api-cfn-paste.ini $HEAT_API_CFN_PASTE_INI
-    iniset $HEAT_API_CFN_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST
-    iniset $HEAT_API_CFN_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT
-    iniset $HEAT_API_CFN_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
-    iniset $HEAT_API_CFN_PASTE_INI filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0
-    iniset $HEAT_API_CFN_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME
-    iniset $HEAT_API_CFN_PASTE_INI filter:authtoken admin_user heat
-    iniset $HEAT_API_CFN_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD
-    iniset $HEAT_API_CFN_PASTE_INI filter:ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0
-    iniset $HEAT_API_CFN_PASTE_INI filter:ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens
-
     # OpenStack API
     HEAT_API_CONF=$HEAT_CONF_DIR/heat-api.conf
     cp $HEAT_DIR/etc/heat/heat-api.conf $HEAT_API_CONF
@@ -91,20 +99,19 @@
     iniset $HEAT_API_CONF DEFAULT use_syslog $SYSLOG
     iniset $HEAT_API_CONF DEFAULT bind_host $HEAT_API_HOST
     iniset $HEAT_API_CONF DEFAULT bind_port $HEAT_API_PORT
+    iniset $HEAT_API_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST
+    iniset $HEAT_API_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT
+    iniset $HEAT_API_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
+    iniset $HEAT_API_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0
+    iniset $HEAT_API_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
+    iniset $HEAT_API_CONF keystone_authtoken admin_user heat
+    iniset $HEAT_API_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
+    iniset $HEAT_API_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR/api
+    iniset $HEAT_API_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0
+    iniset $HEAT_API_CONF ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens
 
     iniset_rpc_backend heat $HEAT_API_CONF DEFAULT
 
-    HEAT_API_PASTE_INI=$HEAT_CONF_DIR/heat-api-paste.ini
-    cp $HEAT_DIR/etc/heat/heat-api-paste.ini $HEAT_API_PASTE_INI
-    iniset $HEAT_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST
-    iniset $HEAT_API_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT
-    iniset $HEAT_API_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
-    iniset $HEAT_API_PASTE_INI filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0
-    iniset $HEAT_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME
-    iniset $HEAT_API_PASTE_INI filter:authtoken admin_user heat
-    iniset $HEAT_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD
-    iniset $HEAT_API_PASTE_INI filter:ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0
-    iniset $HEAT_API_PASTE_INI filter:ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens
 
     # engine
     HEAT_ENGINE_CONF=$HEAT_CONF_DIR/heat-engine.conf
@@ -130,20 +137,19 @@
     iniset $HEAT_API_CW_CONF DEFAULT use_syslog $SYSLOG
     iniset $HEAT_API_CW_CONF DEFAULT bind_host $HEAT_API_CW_HOST
     iniset $HEAT_API_CW_CONF DEFAULT bind_port $HEAT_API_CW_PORT
+    iniset $HEAT_API_CW_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST
+    iniset $HEAT_API_CW_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT
+    iniset $HEAT_API_CW_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
+    iniset $HEAT_API_CW_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0
+    iniset $HEAT_API_CW_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
+    iniset $HEAT_API_CW_CONF keystone_authtoken admin_user heat
+    iniset $HEAT_API_CW_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
+    iniset $HEAT_API_CW_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR/api-cloudwatch
+    iniset $HEAT_API_CW_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0
+    iniset $HEAT_API_CW_CONF ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens
 
     iniset_rpc_backend heat $HEAT_API_CW_CONF DEFAULT
 
-    HEAT_API_CW_PASTE_INI=$HEAT_CONF_DIR/heat-api-cloudwatch-paste.ini
-    cp $HEAT_DIR/etc/heat/heat-api-cloudwatch-paste.ini $HEAT_API_CW_PASTE_INI
-    iniset $HEAT_API_CW_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST
-    iniset $HEAT_API_CW_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT
-    iniset $HEAT_API_CW_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
-    iniset $HEAT_API_CW_PASTE_INI filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0
-    iniset $HEAT_API_CW_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME
-    iniset $HEAT_API_CW_PASTE_INI filter:authtoken admin_user heat
-    iniset $HEAT_API_CW_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD
-    iniset $HEAT_API_CW_PASTE_INI filter:ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0
-    iniset $HEAT_API_CW_PASTE_INI filter:ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens
 }
 
 # init_heat() - Initialize database
@@ -152,8 +158,19 @@
     # (re)create heat database
     recreate_database heat utf8
 
-    $HEAT_DIR/bin/heat-db-setup $os_PACKAGE -r $DATABASE_PASSWORD
-    $HEAT_DIR/tools/nova_create_flavors.sh
+    $HEAT_DIR/bin/heat-manage db_sync
+    create_heat_cache_dir
+}
+
+# create_heat_cache_dir() - Part of the init_heat() process
+function create_heat_cache_dir() {
+    # Create cache dirs
+    sudo mkdir -p $HEAT_AUTH_CACHE_DIR/api
+    sudo chown $STACK_USER $HEAT_AUTH_CACHE_DIR/api
+    sudo mkdir -p $HEAT_AUTH_CACHE_DIR/api-cfn
+    sudo chown $STACK_USER $HEAT_AUTH_CACHE_DIR/api-cfn
+    sudo mkdir -p $HEAT_AUTH_CACHE_DIR/api-cloudwatch
+    sudo chown $STACK_USER $HEAT_AUTH_CACHE_DIR/api-cloudwatch
 }
 
 # install_heatclient() - Collect source and prepare
@@ -176,12 +193,13 @@
 
 # stop_heat() - Stop running processes
 function stop_heat() {
-    # Kill the cinder screen windows
-    for serv in h-eng h-api-cfn h-api-cw; do
+    # Kill the screen windows
+    for serv in h-eng h-api h-api-cfn h-api-cw; do
         screen -S $SCREEN_NAME -p $serv -X kill
     done
 }
 
+
 # Restore xtrace
 $XTRACE
 
diff --git a/lib/horizon b/lib/horizon
index b63e1f8..0cc250e 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -38,6 +38,22 @@
 APACHE_USER=${APACHE_USER:-$USER}
 APACHE_GROUP=${APACHE_GROUP:-$(id -gn $APACHE_USER)}
 
+# Set up service name and configuration path
+if is_ubuntu; then
+    APACHE_NAME=apache2
+    APACHE_CONF=sites-available/horizon
+elif is_fedora; then
+    APACHE_NAME=httpd
+    APACHE_CONF=conf.d/horizon.conf
+elif is_suse; then
+    APACHE_NAME=apache2
+    APACHE_CONF=vhosts.d/horizon.conf
+fi
+
+
+# Functions
+# ---------
+
 # utility method of setting python option
 function _horizon_config_set() {
     local file=$1
@@ -57,16 +73,21 @@
     fi
 }
 
+
+
 # Entry Points
 # ------------
 
 # cleanup_horizon() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
 function cleanup_horizon() {
-    # kill instances (nova)
-    # delete image files (glance)
-    # This function intentionally left blank
-    :
+    if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then
+        # If ``/usr/bin/node`` points into ``$DEST``
+        # we installed it via ``install_nodejs``
+        if [[ $(readlink -f /usr/bin/node) =~ ($DEST) ]]; then
+            sudo rm /usr/bin/node
+        fi
+    fi
 }
 
 # configure_horizon() - Set config files, create data dirs, etc
@@ -97,10 +118,8 @@
     # Create an empty directory that apache uses as docroot
     sudo mkdir -p $HORIZON_DIR/.blackhole
 
-
+    HORIZON_REQUIRE=''
     if is_ubuntu; then
-        APACHE_NAME=apache2
-        APACHE_CONF=sites-available/horizon
         # Clean up the old config name
         sudo rm -f /etc/apache2/sites-enabled/000-default
         # Be a good citizen and use the distro tools here
@@ -109,18 +128,23 @@
         # WSGI isn't enabled by default, enable it
         sudo a2enmod wsgi
     elif is_fedora; then
-        APACHE_NAME=httpd
-        APACHE_CONF=conf.d/horizon.conf
+        if [[ "$os_RELEASE" -ge "18" ]]; then
+            # fedora 18 has Require all denied  in its httpd.conf
+            # and requires explicit Require all granted
+            HORIZON_REQUIRE='Require all granted'
+        fi
         sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf
     elif is_suse; then
-        APACHE_NAME=apache2
-        APACHE_CONF=vhosts.d/horizon.conf
         # WSGI isn't enabled by default, enable it
         sudo a2enmod wsgi
     else
         exit_distro_not_supported "apache configuration"
     fi
 
+    # Remove old log files that could mess with how devstack detects whether Horizon
+    # has been successfully started (see start_horizon() and functions::screen_it())
+    sudo rm -f /var/log/$APACHE_NAME/horizon_*
+
     # Configure apache to run horizon
     sudo sh -c "sed -e \"
         s,%USER%,$APACHE_USER,g;
@@ -128,8 +152,8 @@
         s,%HORIZON_DIR%,$HORIZON_DIR,g;
         s,%APACHE_NAME%,$APACHE_NAME,g;
         s,%DEST%,$DEST,g;
+        s,%HORIZON_REQUIRE%,$HORIZON_REQUIRE,g;
     \" $FILES/apache-horizon.template >/etc/$APACHE_NAME/$APACHE_CONF"
-
 }
 
 # install_horizon() - Collect source and prepare
@@ -152,6 +176,8 @@
         if [[ ! -e "/usr/bin/node" ]]; then
             install_package nodejs-legacy
         fi
+    elif is_fedora && [[ $DISTRO =~ (rhel6) || "$os_RELEASE" -ge "18" ]]; then
+        install_package nodejs
     fi
 
     git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH $HORIZON_TAG
@@ -165,17 +191,14 @@
 
 # stop_horizon() - Stop running processes (non-screen)
 function stop_horizon() {
-    if is_ubuntu; then
-        stop_service apache2
-    elif is_fedora; then
-        stop_service httpd
-    elif is_suse; then
-        stop_service apache2
+    if [ -n "$APACHE_NAME" ]; then
+        stop_service $APACHE_NAME
     else
         exit_distro_not_supported "apache configuration"
     fi
 }
 
+
 # Restore xtrace
 $XTRACE
 
diff --git a/lib/keystone b/lib/keystone
index 0fbc7d7..2edd137 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -63,8 +63,8 @@
 SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service}
 
 
-# Entry Points
-# ------------
+# Functions
+# ---------
 
 # cleanup_keystone() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
@@ -178,7 +178,6 @@
     cp $KEYSTONE_DIR/etc/logging.conf.sample $KEYSTONE_CONF_DIR/logging.conf
     iniset $KEYSTONE_CONF_DIR/logging.conf logger_root level "DEBUG"
     iniset $KEYSTONE_CONF_DIR/logging.conf logger_root handlers "devel,production"
-
 }
 
 # create_keystone_accounts() - Sets up common required keystone accounts
@@ -254,25 +253,6 @@
             --adminurl "$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0" \
             --internalurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0"
     fi
-
-    # TODO(dtroyer): This is part of a series of changes...remove these when
-    #                complete if they are really unused
-#    KEYSTONEADMIN_ROLE=$(keystone role-create \
-#        --name KeystoneAdmin \
-#        | grep " id " | get_field 2)
-#    KEYSTONESERVICE_ROLE=$(keystone role-create \
-#        --name KeystoneServiceAdmin \
-#        | grep " id " | get_field 2)
-
-    # TODO(termie): these two might be dubious
-#    keystone user-role-add \
-#        --user_id $ADMIN_USER \
-#        --role_id $KEYSTONEADMIN_ROLE \
-#        --tenant_id $ADMIN_TENANT
-#    keystone user-role-add \
-#        --user_id $ADMIN_USER \
-#        --role_id $KEYSTONESERVICE_ROLE \
-#        --tenant_id $ADMIN_TENANT
 }
 
 # init_keystone() - Initialize databases, etc.
@@ -339,6 +319,7 @@
     screen -S $SCREEN_NAME -p key -X kill
 }
 
+
 # Restore xtrace
 $XTRACE
 
diff --git a/lib/ldap b/lib/ldap
index 53f6837..89b31b2 100644
--- a/lib/ldap
+++ b/lib/ldap
@@ -1,13 +1,17 @@
 # lib/ldap
 # Functions to control the installation and configuration of **ldap**
 
-# ``stack.sh`` calls the entry points in this order:
-#
+# ``lib/keystone`` calls the entry points in this order:
+# install_ldap()
 
 # Save trace setting
 XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
+
+# Functions
+# ---------
+
 # install_ldap
 # install_ldap() - Collect source and prepare
 function install_ldap() {
@@ -44,7 +48,7 @@
     fi
 
     # add our top level ldap nodes
-    if ldapsearch -x -w $LDAP_PASSWORD -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -x -b dc=openstack,dc=org | grep -q "Success" ; then
+    if ldapsearch -x -w $LDAP_PASSWORD -H ldap://localhost -D dc=Manager,dc=openstack,dc=org -x -b dc=openstack,dc=org | grep -q "Success"; then
         printf "LDAP already configured for OpenStack\n"
         if [[ "$KEYSTONE_CLEAR_LDAP" == "yes" ]]; then
             # clear LDAP state
diff --git a/lib/nova b/lib/nova
index 8d045b5..afc540e 100644
--- a/lib/nova
+++ b/lib/nova
@@ -37,6 +37,9 @@
 
 NOVA_CONF_DIR=/etc/nova
 NOVA_CONF=$NOVA_CONF_DIR/nova.conf
+NOVA_CELLS_CONF=$NOVA_CONF_DIR/nova-cells.conf
+NOVA_CELLS_DB=${NOVA_CELLS_DB:-nova_cell}
+
 NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini}
 
 # Public facing bits
@@ -74,10 +77,13 @@
 
 # Set defaults according to the virt driver
 if [ "$VIRT_DRIVER" = 'xenserver' ]; then
-    PUBLIC_INTERFACE_DEFAULT=eth3
+    PUBLIC_INTERFACE_DEFAULT=eth2
     GUEST_INTERFACE_DEFAULT=eth1
     # Allow ``build_domU.sh`` to specify the flat network bridge via kernel args
-    FLAT_NETWORK_BRIDGE_DEFAULT=$(grep -o 'flat_network_bridge=[[:alnum:]]*' /proc/cmdline | cut -d= -f 2 | sort -u)
+    FLAT_NETWORK_BRIDGE_DEFAULT=$(sed -e 's/.* flat_network_bridge=\([[:alnum:]]*\).*$/\1/g' /proc/cmdline)
+    if is_service_enabled quantum; then
+        XEN_INTEGRATION_BRIDGE=$(sed -e 's/.* xen_integration_bridge=\([[:alnum:]]*\).*$/\1/g' /proc/cmdline)
+    fi
 elif [ "$VIRT_DRIVER" = 'baremetal' ]; then
     NETWORK_MANAGER=${NETWORK_MANAGER:-FlatManager}
     PUBLIC_INTERFACE_DEFAULT=eth0
@@ -122,12 +128,8 @@
 TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29}
 
 
-# Entry Points
-# ------------
-
-function add_nova_opt {
-    echo "$1" >>$NOVA_CONF
-}
+# Functions
+# ---------
 
 # Helper to clean iptables rules
 function clean_iptables() {
@@ -152,7 +154,7 @@
         instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"`
         if [ ! "$instances" = "" ]; then
             echo $instances | xargs -n1 sudo virsh destroy || true
-            echo $instances | xargs -n1 sudo virsh undefine || true
+            echo $instances | xargs -n1 sudo virsh undefine --managed-save || true
         fi
 
         # Logout and delete iscsi sessions
@@ -235,37 +237,39 @@
         # Force IP forwarding on, just on case
         sudo sysctl -w net.ipv4.ip_forward=1
 
-        # Attempt to load modules: network block device - used to manage qcow images
-        sudo modprobe nbd || true
+        if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
+            # Attempt to load modules: network block device - used to manage qcow images
+            sudo modprobe nbd || true
 
-        # Check for kvm (hardware based virtualization).  If unable to initialize
-        # kvm, we drop back to the slower emulation mode (qemu).  Note: many systems
-        # come with hardware virtualization disabled in BIOS.
-        if [[ "$LIBVIRT_TYPE" == "kvm" ]]; then
-            sudo modprobe kvm || true
-            if [ ! -e /dev/kvm ]; then
-                echo "WARNING: Switching to QEMU"
-                LIBVIRT_TYPE=qemu
-                if which selinuxenabled 2>&1 > /dev/null && selinuxenabled; then
-                    # https://bugzilla.redhat.com/show_bug.cgi?id=753589
-                    sudo setsebool virt_use_execmem on
+            # Check for kvm (hardware based virtualization).  If unable to initialize
+            # kvm, we drop back to the slower emulation mode (qemu).  Note: many systems
+            # come with hardware virtualization disabled in BIOS.
+            if [[ "$LIBVIRT_TYPE" == "kvm" ]]; then
+                sudo modprobe kvm || true
+                if [ ! -e /dev/kvm ]; then
+                    echo "WARNING: Switching to QEMU"
+                    LIBVIRT_TYPE=qemu
+                    if which selinuxenabled 2>&1 > /dev/null && selinuxenabled; then
+                        # https://bugzilla.redhat.com/show_bug.cgi?id=753589
+                        sudo setsebool virt_use_execmem on
+                    fi
                 fi
             fi
-        fi
 
-        # Install and configure **LXC** if specified.  LXC is another approach to
-        # splitting a system into many smaller parts.  LXC uses cgroups and chroot
-        # to simulate multiple systems.
-        if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then
-            if is_ubuntu; then
-                if [[ ! "$DISTRO" > natty ]]; then
-                    cgline="none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0"
-                    sudo mkdir -p /cgroup
-                    if ! grep -q cgroup /etc/fstab; then
-                        echo "$cgline" | sudo tee -a /etc/fstab
-                    fi
-                    if ! mount -n | grep -q cgroup; then
-                        sudo mount /cgroup
+            # Install and configure **LXC** if specified.  LXC is another approach to
+            # splitting a system into many smaller parts.  LXC uses cgroups and chroot
+            # to simulate multiple systems.
+            if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then
+                if is_ubuntu; then
+                    if [[ ! "$DISTRO" > natty ]]; then
+                        cgline="none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0"
+                        sudo mkdir -p /cgroup
+                        if ! grep -q cgroup /etc/fstab; then
+                            echo "$cgline" | sudo tee -a /etc/fstab
+                        fi
+                        if ! mount -n | grep -q cgroup; then
+                            sudo mount /cgroup
+                        fi
                     fi
                 fi
             fi
@@ -276,9 +280,10 @@
             configure_baremetal_nova_dirs
         fi
 
-        if is_service_enabled quantum && is_quantum_ovs_base_plugin && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF ; then
-            # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces
-            cat <<EOF | sudo tee -a $QEMU_CONF
+        if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
+            if is_service_enabled quantum && is_quantum_ovs_base_plugin && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF; then
+                # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces
+                cat <<EOF | sudo tee -a $QEMU_CONF
 cgroup_device_acl = [
     "/dev/null", "/dev/full", "/dev/zero",
     "/dev/random", "/dev/urandom",
@@ -286,45 +291,29 @@
     "/dev/rtc", "/dev/hpet","/dev/net/tun",
 ]
 EOF
-        fi
+            fi
 
-        if is_ubuntu; then
-            LIBVIRT_DAEMON=libvirt-bin
-        else
-            LIBVIRT_DAEMON=libvirtd
-        fi
-
-
-
-        if is_fedora; then
-            # Starting with fedora 18 enable stack-user to virsh -c qemu:///system
-            # by creating a policy-kit rule for stack-user
-            if [[ "$os_RELEASE" -ge "18" ]]; then
-                rules_dir=/etc/polkit-1/rules.d
-                sudo mkdir -p $rules_dir
-                sudo bash -c "cat <<EOF > $rules_dir/50-libvirt-$STACK_USER.rules
-polkit.addRule(function(action, subject) {
-     if (action.id == 'org.libvirt.unix.manage' &&
-         subject.user == '"$STACK_USER"') {
-         return polkit.Result.YES;
-     }
-});
-EOF"
-                unset rules_dir
+            if is_ubuntu; then
+                LIBVIRT_DAEMON=libvirt-bin
             else
-                sudo bash -c 'cat <<EOF >/etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla
+                LIBVIRT_DAEMON=libvirtd
+            fi
+
+            if is_fedora || is_suse; then
+                if is_fedora && [[ $DISTRO =~ (rhel6) || "$os_RELEASE" -le "17" ]]; then
+                    sudo bash -c "cat <<EOF >/etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla
 [libvirt Management Access]
-Identity=unix-group:libvirtd
+Identity=unix-group:$LIBVIRT_GROUP
 Action=org.libvirt.unix.manage
 ResultAny=yes
 ResultInactive=yes
 ResultActive=yes
-EOF'
-            fi
-        elif is_suse; then
-            # Work around the fact that polkit-default-privs overrules pklas
-            # with 'unix-group:$group'.
-            sudo bash -c "cat <<EOF >/etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla
+EOF"
+                elif is_suse && [[ $os_RELEASE = 12.2 || "$os_VENDOR" = "SUSE LINUX" ]]; then
+                    # openSUSE < 12.3 or SLE
+                    # Work around the fact that polkit-default-privs overrules pklas
+                    # with 'unix-group:$group'.
+                    sudo bash -c "cat <<EOF >/etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla
 [libvirt Management Access]
 Identity=unix-user:$USER
 Action=org.libvirt.unix.manage
@@ -332,21 +321,37 @@
 ResultInactive=yes
 ResultActive=yes
 EOF"
+                else
+                    # Starting with fedora 18 and opensuse-12.3 enable stack-user to
+                    # virsh -c qemu:///system by creating a policy-kit rule for
+                    # stack-user using the new Javascript syntax
+                    rules_dir=/etc/polkit-1/rules.d
+                    sudo mkdir -p $rules_dir
+                    sudo bash -c "cat <<EOF > $rules_dir/50-libvirt-$STACK_USER.rules
+polkit.addRule(function(action, subject) {
+     if (action.id == 'org.libvirt.unix.manage' &&
+         subject.user == '"$STACK_USER"') {
+         return polkit.Result.YES;
+     }
+});
+EOF"
+                    unset rules_dir
+                fi
+            fi
+
+            # The user that nova runs as needs to be member of **libvirtd** group otherwise
+            # nova-compute will be unable to use libvirt.
+            if ! getent group $LIBVIRT_GROUP >/dev/null; then
+                sudo groupadd $LIBVIRT_GROUP
+            fi
+            add_user_to_group $STACK_USER $LIBVIRT_GROUP
+
+            # libvirt detects various settings on startup, as we potentially changed
+            # the system configuration (modules, filesystems), we need to restart
+            # libvirt to detect those changes.
+            restart_service $LIBVIRT_DAEMON
         fi
 
-        # The user that nova runs as needs to be member of **libvirtd** group otherwise
-        # nova-compute will be unable to use libvirt.
-        if ! getent group libvirtd >/dev/null; then
-            sudo groupadd libvirtd
-        fi
-        add_user_to_group $STACK_USER libvirtd
-
-        # libvirt detects various settings on startup, as we potentially changed
-        # the system configuration (modules, filesystems), we need to restart
-        # libvirt to detect those changes.
-        restart_service $LIBVIRT_DAEMON
-
-
         # Instance Storage
         # ----------------
 
@@ -413,7 +418,6 @@
 
     # (Re)create ``nova.conf``
     rm -f $NOVA_CONF
-    add_nova_opt "[DEFAULT]"
     iniset $NOVA_CONF DEFAULT verbose "True"
     iniset $NOVA_CONF DEFAULT debug "True"
     iniset $NOVA_CONF DEFAULT auth_strategy "keystone"
@@ -433,9 +437,18 @@
     if is_baremetal; then
         iniset $NOVA_CONF baremetal sql_connection `database_connection_url nova_bm`
     fi
-    iniset $NOVA_CONF DEFAULT libvirt_type "$LIBVIRT_TYPE"
-    iniset $NOVA_CONF DEFAULT libvirt_cpu_mode "none"
+    if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
+        iniset $NOVA_CONF DEFAULT libvirt_type "$LIBVIRT_TYPE"
+        iniset $NOVA_CONF DEFAULT libvirt_cpu_mode "none"
+    fi
     iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x"
+    iniset $NOVA_CONF osapi_v3 enabled "True"
+
+    if is_fedora; then
+        # nova defaults to /usr/local/bin, but fedora pip likes to
+        # install things in /usr/bin
+        iniset $NOVA_CONF DEFAULT bindir "/usr/bin"
+    fi
 
     if is_service_enabled n-api; then
         iniset $NOVA_CONF DEFAULT enabled_apis "$NOVA_ENABLED_APIS"
@@ -480,7 +493,6 @@
         iniset_multiline $NOVA_CONF DEFAULT notification_driver "nova.openstack.common.notifier.rpc_notifier" "ceilometer.compute.nova_notifier"
     fi
 
-
     # Provide some transition from ``EXTRA_FLAGS`` to ``EXTRA_OPTS``
     if [[ -z "$EXTRA_OPTS" && -n "$EXTRA_FLAGS" ]]; then
         EXTRA_OPTS=$EXTRA_FLAGS
@@ -509,7 +521,7 @@
         VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1}
     fi
 
-    if is_service_enabled n-novnc || is_service_enabled n-xvnc ; then
+    if is_service_enabled n-novnc || is_service_enabled n-xvnc; then
       # Address on which instance vncservers will listen on compute hosts.
       # For multi-host, this should be the management ip of the compute host.
       VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1}
@@ -537,6 +549,32 @@
     iniset $NOVA_CONF DEFAULT glance_api_servers "$GLANCE_HOSTPORT"
 }
 
+function init_nova_cells() {
+    if is_service_enabled n-cell; then
+        cp $NOVA_CONF $NOVA_CELLS_CONF
+        iniset $NOVA_CELLS_CONF DEFAULT sql_connection `database_connection_url $NOVA_CELLS_DB`
+        iniset $NOVA_CELLS_CONF DEFAULT rabbit_virtual_host child_cell
+        iniset $NOVA_CELLS_CONF DEFAULT dhcpbridge_flagfile $NOVA_CELLS_CONF
+        iniset $NOVA_CELLS_CONF cells enable True
+        iniset $NOVA_CELLS_CONF cells name child
+
+        iniset $NOVA_CONF DEFAULT scheduler_topic cells
+        iniset $NOVA_CONF DEFAULT compute_api_class nova.compute.cells_api.ComputeCellsAPI
+        iniset $NOVA_CONF cells enable True
+        iniset $NOVA_CONF cells name region
+
+        if is_service_enabled n-api-meta; then
+            NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/,metadata//")
+            iniset $NOVA_CONF DEFAULT enabled_apis $NOVA_ENABLED_APIS
+            iniset $NOVA_CELLS_CONF DEFAULT enabled_apis metadata
+        fi
+
+        $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CELLS_CONF db sync
+        $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CELLS_CONF cell create --name=region --cell_type=parent --username=guest --hostname=$RABBIT_HOST --port=5672 --password=$RABBIT_PASSWORD --virtual_host=/ --woffset=0 --wscale=1
+        $NOVA_BIN_DIR/nova-manage cell create --name=child --cell_type=child --username=guest --hostname=$RABBIT_HOST --port=5672 --password=$RABBIT_PASSWORD --virtual_host=child_cell --woffset=0 --wscale=1
+    fi
+}
+
 # create_nova_cache_dir() - Part of the init_nova() process
 function create_nova_cache_dir() {
     # Create cache dir
@@ -576,6 +614,10 @@
         # Migrate nova database
         $NOVA_BIN_DIR/nova-manage db sync
 
+        if is_service_enabled n-cell; then
+            recreate_database $NOVA_CELLS_DB latin1
+        fi
+
         # (Re)create nova baremetal database
         if is_baremetal; then
             recreate_database nova_bm latin1
@@ -596,26 +638,32 @@
 # install_nova() - Collect source and prepare
 function install_nova() {
     if is_service_enabled n-cpu; then
-        if is_ubuntu; then
-            install_package libvirt-bin
-        elif is_fedora || is_suse; then
-            install_package libvirt
-        else
-            exit_distro_not_supported "libvirt installation"
-        fi
-
-        # Install and configure **LXC** if specified.  LXC is another approach to
-        # splitting a system into many smaller parts.  LXC uses cgroups and chroot
-        # to simulate multiple systems.
-        if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then
+        if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
             if is_ubuntu; then
-                if [[ "$DISTRO" > natty ]]; then
-                    install_package cgroup-lite
-                fi
+                install_package kvm
+                install_package libvirt-bin
+                install_package python-libvirt
+            elif is_fedora || is_suse; then
+                install_package kvm
+                install_package libvirt
+                install_package libvirt-python
             else
-                ### FIXME(dtroyer): figure this out
-                echo "RPM-based cgroup not implemented yet"
-                yum_install libcgroup-tools
+                exit_distro_not_supported "libvirt installation"
+            fi
+
+            # Install and configure **LXC** if specified.  LXC is another approach to
+            # splitting a system into many smaller parts.  LXC uses cgroups and chroot
+            # to simulate multiple systems.
+            if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then
+                if is_ubuntu; then
+                    if [[ "$DISTRO" > natty ]]; then
+                        install_package cgroup-lite
+                    fi
+                else
+                    ### FIXME(dtroyer): figure this out
+                    echo "RPM-based cgroup not implemented yet"
+                    yum_install libcgroup-tools
+                fi
             fi
         fi
     fi
@@ -646,14 +694,30 @@
 
 # start_nova() - Start running processes, including screen
 function start_nova() {
-    # The group **libvirtd** is added to the current user in this script.
-    # Use 'sg' to execute nova-compute as a member of the **libvirtd** group.
+    NOVA_CONF_BOTTOM=$NOVA_CONF
+
     # ``screen_it`` checks ``is_service_enabled``, it is not needed here
     screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor"
-    screen_it n-cpu "cd $NOVA_DIR && sg libvirtd $NOVA_BIN_DIR/nova-compute"
+
+    if is_service_enabled n-cell; then
+        NOVA_CONF_BOTTOM=$NOVA_CELLS_CONF
+        screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor --config-file $NOVA_CELLS_CONF"
+        screen_it n-cell "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CONF"
+        screen_it n-cell "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CELLS_CONF"
+    fi
+
+    if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
+        # The group **$LIBVIRT_GROUP** is added to the current user in this script.
+        # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group.
+        screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM'"
+    else
+        screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM"
+    fi
     screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert"
-    screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network"
-    screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler"
+    screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network --config-file $NOVA_CONF_BOTTOM"
+    screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler --config-file $NOVA_CONF_BOTTOM"
+    screen_it n-api-meta "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api-metadata --config-file $NOVA_CONF_BOTTOM"
+
     screen_it n-novnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-novncproxy --config-file $NOVA_CONF --web $NOVNC_DIR"
     screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $NOVA_CONF"
     screen_it n-spice "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $NOVA_CONF --web $SPICE_DIR"
@@ -668,11 +732,14 @@
 # stop_nova() - Stop running processes (non-screen)
 function stop_nova() {
     # Kill the nova screen windows
-    for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-cond n-spice; do
+    # Some services are listed here twice since more than one instance
+    # of a service may be running in certain configs.
+    for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cond n-cell n-cell n-api-meta; do
         screen -S $SCREEN_NAME -p $serv -X kill
     done
 }
 
+
 # Restore xtrace
 $XTRACE
 
diff --git a/lib/quantum b/lib/quantum
index 68c0539..afe99c4 100644
--- a/lib/quantum
+++ b/lib/quantum
@@ -52,7 +52,10 @@
 # Quantum.
 #
 # With Quantum networking the NETWORK_MANAGER variable is ignored.
-
+#
+# To enable specific configuration options for either the Open vSwitch or
+# LinuxBridge plugin, please see the top level README file under the
+# Quantum section.
 
 # Save trace setting
 XTRACE=$(set +o | grep xtrace)
@@ -62,6 +65,12 @@
 # Quantum Network Configuration
 # -----------------------------
 
+# Gateway and subnet defaults, in case they are not customized in localrc
+NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1}
+PUBLIC_NETWORK_GATEWAY=${PUBLIC_NETWORK_GATEWAY:-172.24.4.225}
+PRIVATE_SUBNET_NAME=${PRIVATE_SUBNET_NAME:-"private-subnet"}
+PUBLIC_SUBNET_NAME=${PUBLIC_SUBNET_NAME:-"public-subnet"}
+
 # Set up default directories
 QUANTUM_DIR=$DEST/quantum
 QUANTUMCLIENT_DIR=$DEST/python-quantumclient
@@ -76,22 +85,27 @@
 # Default Quantum Port
 Q_PORT=${Q_PORT:-9696}
 # Default Quantum Host
-Q_HOST=${Q_HOST:-$HOST_IP}
+Q_HOST=${Q_HOST:-$SERVICE_HOST}
 # Default admin username
 Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-quantum}
 # Default auth strategy
 Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone}
 # Use namespace or not
 Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True}
+# RHEL's support for namespaces requires using veths with ovs
+Q_OVS_USE_VETH=${Q_OVS_USE_VETH:-False}
 Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True}
 # Meta data IP
-Q_META_DATA_IP=${Q_META_DATA_IP:-$HOST_IP}
+Q_META_DATA_IP=${Q_META_DATA_IP:-$SERVICE_HOST}
 # Allow Overlapping IP among subnets
 Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True}
 # Use quantum-debug command
 Q_USE_DEBUG_COMMAND=${Q_USE_DEBUG_COMMAND:-False}
 # The name of the default q-l3 router
 Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1}
+# List of config file names in addition to the main plugin config file
+# See _configure_quantum_common() for details about setting it up
+declare -a Q_PLUGIN_EXTRA_CONF_FILES
 
 if is_service_enabled quantum; then
     Q_RR_CONF_FILE=$QUANTUM_CONF_DIR/rootwrap.conf
@@ -108,18 +122,18 @@
     # The following variables control the Quantum openvswitch and
     # linuxbridge plugins' allocation of tenant networks and
     # availability of provider networks. If these are not configured
-    # in localrc, tenant networks will be local to the host (with no
+    # in ``localrc``, tenant networks will be local to the host (with no
     # remote connectivity), and no physical resources will be
     # available for the allocation of provider networks.
 
     # To use GRE tunnels for tenant networks, set to True in
-    # localrc. GRE tunnels are only supported by the openvswitch
+    # ``localrc``. GRE tunnels are only supported by the openvswitch
     # plugin, and currently only on Ubuntu.
     ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-False}
 
     # If using GRE tunnels for tenant networks, specify the range of
     # tunnel IDs from which tenant networks are allocated. Can be
-    # overriden in localrc in necesssary.
+    # overriden in ``localrc`` in necesssary.
     TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGE:-1:1000}
 
     # To use VLANs for tenant networks, set to True in localrc. VLANs
@@ -127,7 +141,7 @@
     # requiring additional configuration described below.
     ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False}
 
-    # If using VLANs for tenant networks, set in localrc to specify
+    # If using VLANs for tenant networks, set in ``localrc`` to specify
     # the range of VLAN VIDs from which tenant networks are
     # allocated. An external network switch must be configured to
     # trunk these VLANs between hosts for multi-host connectivity.
@@ -136,16 +150,16 @@
     TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-}
 
     # If using VLANs for tenant networks, or if using flat or VLAN
-    # provider networks, set in localrc to the name of the physical
-    # network, and also configure OVS_PHYSICAL_BRIDGE for the
-    # openvswitch agent or LB_PHYSICAL_INTERFACE for the linuxbridge
+    # provider networks, set in ``localrc`` to the name of the physical
+    # network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the
+    # openvswitch agent or ``LB_PHYSICAL_INTERFACE`` for the linuxbridge
     # agent, as described below.
     #
     # Example: ``PHYSICAL_NETWORK=default``
     PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-}
 
     # With the openvswitch plugin, if using VLANs for tenant networks,
-    # or if using flat or VLAN provider networks, set in localrc to
+    # or if using flat or VLAN provider networks, set in ``localrc`` to
     # the name of the OVS bridge to use for the physical network. The
     # bridge will be created if it does not already exist, but a
     # physical interface must be manually added to the bridge as a
@@ -155,14 +169,14 @@
     OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-}
 
     # With the linuxbridge plugin, if using VLANs for tenant networks,
-    # or if using flat or VLAN provider networks, set in localrc to
+    # or if using flat or VLAN provider networks, set in ``localrc`` to
     # the name of the network interface to use for the physical
     # network.
     #
     # Example: ``LB_PHYSICAL_INTERFACE=eth1``
     LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-}
 
-    # With the openvswitch plugin, set to True in localrc to enable
+    # With the openvswitch plugin, set to True in ``localrc`` to enable
     # provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False.
     #
     # Example: ``OVS_ENABLE_TUNNELING=True``
@@ -171,13 +185,15 @@
 
 # Quantum plugin specific functions
 # ---------------------------------
-# Please refer to lib/quantum_plugins/README.md for details.
+
+# Please refer to ``lib/quantum_plugins/README.md`` for details.
 source $TOP_DIR/lib/quantum_plugins/$Q_PLUGIN
 
 # Agent loadbalancer service plugin functions
 # -------------------------------------------
+
 # Hardcoding for 1 service plugin for now
-source $TOP_DIR/lib/quantum_plugins/services/agent_loadbalancer
+source $TOP_DIR/lib/quantum_plugins/services/loadbalancer
 
 # Use security group or not
 if has_quantum_plugin_security_group; then
@@ -186,8 +202,8 @@
     Q_USE_SECGROUP=False
 fi
 
-# Entry Points
-# ------------
+# Functions
+# ---------
 
 # configure_quantum()
 # Set common config for all quantum server and agents.
@@ -294,11 +310,11 @@
             sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE
         done
         NET_ID=$(quantum net-create $PHYSICAL_NETWORK --tenant_id $TENANT_ID --provider:network_type flat --provider:physical_network "$PHYSICAL_NETWORK" | grep ' id ' | get_field 2)
-        SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2)
+        SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2)
         sudo ifconfig $OVS_PHYSICAL_BRIDGE up
     else
         NET_ID=$(quantum net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2)
-        SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2)
+        SUBNET_ID=$(quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2)
     fi
 
     if [[ "$Q_L3_ENABLED" == "True" ]]; then
@@ -313,7 +329,7 @@
         quantum router-interface-add $ROUTER_ID $SUBNET_ID
         # Create an external network, and a subnet. Configure the external network as router gw
         EXT_NET_ID=$(quantum net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2)
-        EXT_GW_IP=$(quantum subnet-create --ip_version 4 ${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2)
+        EXT_GW_IP=$(quantum subnet-create --ip_version 4 ${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} --gateway $PUBLIC_NETWORK_GATEWAY --name $PUBLIC_SUBNET_NAME $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2)
         quantum router-gateway-set $ROUTER_ID $EXT_NET_ID
 
         if is_service_enabled q-l3; then
@@ -352,14 +368,26 @@
 
 # install_quantum_agent_packages() - Collect source and prepare
 function install_quantum_agent_packages() {
-    # install packages that is specific to plugin agent
-    quantum_plugin_install_agent_packages
+    # install packages that are specific to plugin agent(s)
+    if is_service_enabled q-agt q-dhcp q-l3; then
+        quantum_plugin_install_agent_packages
+    fi
+
+    if is_service_enabled q-lbaas; then
+       quantum_agent_lbaas_install_agent_packages
+    fi
 }
 
 # Start running processes, including screen
 function start_quantum_service_and_check() {
+    # build config-file options
+    local cfg_file
+    local CFG_FILE_OPTIONS="--config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE"
+    for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do
+         CFG_FILE_OPTIONS+=" --config-file /$cfg_file"
+    done
     # Start the Quantum service
-    screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE"
+    screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server $CFG_FILE_OPTIONS"
     echo "Waiting for Quantum to start..."
     if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://$Q_HOST:$Q_PORT; do sleep 1; done"; then
       die $LINENO "Quantum did not start"
@@ -374,6 +402,11 @@
     screen_it q-l3 "cd $QUANTUM_DIR && python $AGENT_L3_BINARY --config-file $QUANTUM_CONF --config-file=$Q_L3_CONF_FILE"
     screen_it q-meta "cd $QUANTUM_DIR && python $AGENT_META_BINARY --config-file $QUANTUM_CONF --config-file=$Q_META_CONF_FILE"
 
+    if [ "$VIRT_DRIVER" = 'xenserver' ]; then
+        # For XenServer, start an agent for the domU openvswitch
+        screen_it q-domua "cd $QUANTUM_DIR && python $AGENT_BINARY --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE.domU"
+    fi
+
     if is_service_enabled q-lbaas; then
         screen_it q-lbaas "cd $QUANTUM_DIR && python $AGENT_LBAAS_BINARY --config-file $QUANTUM_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME"
     fi
@@ -385,17 +418,28 @@
         pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }')
         [ ! -z "$pid" ] && sudo kill -9 $pid
     fi
+    if is_service_enabled q-meta; then
+        pid=$(ps aux | awk '/quantum-ns-metadata-proxy/ { print $2 }')
+        [ ! -z "$pid" ] && sudo kill -9 $pid
+    fi
 }
 
 # cleanup_quantum() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
 function cleanup_quantum() {
-    :
+    if is_quantum_ovs_base_plugin; then
+        quantum_ovs_base_cleanup
+    fi
+
+    # delete all namespaces created by quantum
+    for ns in $(sudo ip netns list | grep -o -e qdhcp-[0-9a-f\-]* -e qrouter-[0-9a-f\-]*); do
+        sudo ip netns delete ${ns}
+    done
 }
 
 # _configure_quantum_common()
 # Set common config for all quantum server and agents.
-# This MUST be called before other _configure_quantum_* functions.
+# This MUST be called before other ``_configure_quantum_*`` functions.
 function _configure_quantum_common() {
     # Put config files in ``QUANTUM_CONF_DIR`` for everyone to find
     if [[ ! -d $QUANTUM_CONF_DIR ]]; then
@@ -405,8 +449,11 @@
 
     cp $QUANTUM_DIR/etc/quantum.conf $QUANTUM_CONF
 
-    # set plugin-specific variables
-    # Q_PLUGIN_CONF_PATH, Q_PLUGIN_CONF_FILENAME, Q_DB_NAME, Q_PLUGIN_CLASS
+    # Set plugin-specific variables ``Q_DB_NAME``, ``Q_PLUGIN_CLASS``.
+    # For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``.
+    # For addition plugin config files, set ``Q_PLUGIN_EXTRA_CONF_PATH``,
+    # ``Q_PLUGIN_EXTRA_CONF_FILES``.  For example:
+    #    ``Q_PLUGIN_EXTRA_CONF_FILES=(file1, file2)``
     quantum_plugin_configure_common
 
     if [[ $Q_PLUGIN_CONF_PATH == '' || $Q_PLUGIN_CONF_FILENAME == '' || $Q_PLUGIN_CLASS == '' ]]; then
@@ -418,9 +465,25 @@
     Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME
     cp $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE
 
-    iniset /$Q_PLUGIN_CONF_FILE DATABASE sql_connection `database_connection_url $Q_DB_NAME`
+    iniset /$Q_PLUGIN_CONF_FILE database connection `database_connection_url $Q_DB_NAME`
     iniset $QUANTUM_CONF DEFAULT state_path $DATA_DIR/quantum
 
+    # If addition config files are set, make sure their path name is set as well
+    if [[ ${#Q_PLUGIN_EXTRA_CONF_FILES[@]} > 0 && $Q_PLUGIN_EXTRA_CONF_PATH == '' ]]; then
+        die $LINENO "Quantum additional plugin config not set.. exiting"
+    fi
+
+    # If additional config files exist, copy them over to quantum configuration
+    # directory
+    if [[ $Q_PLUGIN_EXTRA_CONF_PATH != '' ]]; then
+        mkdir -p /$Q_PLUGIN_EXTRA_CONF_PATH
+        local f
+        for (( f=0; $f < ${#Q_PLUGIN_EXTRA_CONF_FILES[@]}; f+=1 )); do
+            Q_PLUGIN_EXTRA_CONF_FILES[$f]=$Q_PLUGIN_EXTRA_CONF_PATH/${Q_PLUGIN_EXTRA_CONF_FILES[$f]}
+            cp $QUANTUM_DIR/${Q_PLUGIN_EXTRA_CONF_FILES[$f]} /${Q_PLUGIN_EXTRA_CONF_FILES[$f]}
+        done
+    fi
+
     _quantum_setup_rootwrap
 }
 
@@ -437,7 +500,7 @@
     iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT root_helper "$Q_RR_COMMAND"
     # Intermediate fix until Quantum patch lands and then line above will
     # be cleaned.
-    iniset $QUANTUM_TEST_CONFIG_FILE AGENT root_helper "$Q_RR_COMMAND"
+    iniset $QUANTUM_TEST_CONFIG_FILE agent root_helper "$Q_RR_COMMAND"
 
     _quantum_setup_keystone $QUANTUM_TEST_CONFIG_FILE DEFAULT set_auth_url
     _quantum_setup_interface_driver $QUANTUM_TEST_CONFIG_FILE
@@ -467,7 +530,6 @@
     # for l3-agent, only use per tenant router if we have namespaces
     Q_L3_ROUTER_PER_TENANT=$Q_USE_NAMESPACE
     AGENT_L3_BINARY="$QUANTUM_DIR/bin/quantum-l3-agent"
-    PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex}
     Q_L3_CONF_FILE=$QUANTUM_CONF_DIR/l3_agent.ini
 
     cp $QUANTUM_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE
@@ -497,9 +559,7 @@
     _quantum_setup_keystone $Q_META_CONF_FILE DEFAULT set_auth_url
 }
 
-function _configure_quantum_lbaas()
-{
-    quantum_agent_lbaas_install_agent_packages
+function _configure_quantum_lbaas() {
     quantum_agent_lbaas_configure_common
     quantum_agent_lbaas_configure_agent
 }
@@ -509,7 +569,9 @@
 function _configure_quantum_plugin_agent() {
     # Specify the default root helper prior to agent configuration to
     # ensure that an agent's configuration can override the default
-    iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_COMMAND"
+    iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_COMMAND"
+    iniset $QUANTUM_CONF DEFAULT verbose True
+    iniset $QUANTUM_CONF DEFAULT debug True
 
     # Configure agent for plugin
     quantum_plugin_configure_plugin_agent
@@ -544,10 +606,6 @@
 
     iniset $QUANTUM_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY
     _quantum_setup_keystone $QUANTUM_CONF keystone_authtoken
-    # Comment out keystone authtoken configuration in api-paste.ini
-    # It is required to avoid any breakage in Quantum where the sample
-    # api-paste.ini has authtoken configurations.
-    _quantum_commentout_keystone_authtoken $Q_API_PASTE_FILE filter:authtoken
 
     # Configure plugin
     quantum_plugin_configure_service
@@ -562,17 +620,17 @@
         return
     fi
     # Deploy new rootwrap filters files (owned by root).
-    # Wipe any existing rootwrap.d files first
+    # Wipe any existing ``rootwrap.d`` files first
     Q_CONF_ROOTWRAP_D=$QUANTUM_CONF_DIR/rootwrap.d
     if [[ -d $Q_CONF_ROOTWRAP_D ]]; then
         sudo rm -rf $Q_CONF_ROOTWRAP_D
     fi
-    # Deploy filters to $QUANTUM_CONF_DIR/rootwrap.d
+    # Deploy filters to ``$QUANTUM_CONF_DIR/rootwrap.d``
     mkdir -p -m 755 $Q_CONF_ROOTWRAP_D
     cp -pr $QUANTUM_DIR/etc/quantum/rootwrap.d/* $Q_CONF_ROOTWRAP_D/
     sudo chown -R root:root $Q_CONF_ROOTWRAP_D
     sudo chmod 644 $Q_CONF_ROOTWRAP_D/*
-    # Set up rootwrap.conf, pointing to $QUANTUM_CONF_DIR/rootwrap.d
+    # Set up ``rootwrap.conf``, pointing to ``$QUANTUM_CONF_DIR/rootwrap.d``
     # location moved in newer versions, prefer new location
     if test -r $QUANTUM_DIR/etc/quantum/rootwrap.conf; then
       sudo cp -p $QUANTUM_DIR/etc/quantum/rootwrap.conf $Q_RR_CONF_FILE
@@ -582,7 +640,7 @@
     sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE
     sudo chown root:root $Q_RR_CONF_FILE
     sudo chmod 0644 $Q_RR_CONF_FILE
-    # Specify rootwrap.conf as first parameter to quantum-rootwrap
+    # Specify ``rootwrap.conf`` as first parameter to quantum-rootwrap
     ROOTWRAP_SUDOER_CMD="$QUANTUM_ROOTWRAP $Q_RR_CONF_FILE *"
 
     # Set up the rootwrap sudoers for quantum
@@ -593,7 +651,7 @@
     sudo mv $TEMPFILE /etc/sudoers.d/quantum-rootwrap
 
     # Update the root_helper
-    iniset $QUANTUM_CONF AGENT root_helper "$Q_RR_COMMAND"
+    iniset $QUANTUM_CONF agent root_helper "$Q_RR_COMMAND"
 }
 
 # Configures keystone integration for quantum service and agents
@@ -618,22 +676,12 @@
     rm -f $QUANTUM_AUTH_CACHE_DIR/*
 }
 
-function _quantum_commentout_keystone_authtoken() {
-    local conf_file=$1
-    local section=$2
-
-    inicomment $conf_file $section auth_host
-    inicomment $conf_file $section auth_port
-    inicomment $conf_file $section auth_protocol
-    inicomment $conf_file $section auth_url
-
-    inicomment $conf_file $section admin_tenant_name
-    inicomment $conf_file $section admin_user
-    inicomment $conf_file $section admin_password
-    inicomment $conf_file $section signing_dir
-}
-
 function _quantum_setup_interface_driver() {
+
+    # ovs_use_veth needs to be set before the plugin configuration
+    # occurs to allow plugins to override the setting.
+    iniset $1 DEFAULT ovs_use_veth $Q_OVS_USE_VETH
+
     quantum_plugin_setup_interface_driver $1
 }
 
@@ -702,14 +750,15 @@
     local timeout_sec=$5
     local probe_cmd = ""
     probe_cmd=`_get_probe_cmd_prefix $from_net`
-    if ! timeout $timeout_sec sh -c "while ! $probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success ; do sleep 1; done"; then
+    if ! timeout $timeout_sec sh -c "while ! $probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success; do sleep 1; done"; then
         die $LINENO "server didn't become ssh-able!"
     fi
 }
 
 # Quantum 3rd party programs
 #---------------------------
-# please refer to lib/quantum_thirdparty/README.md for details
+
+# please refer to ``lib/quantum_thirdparty/README.md`` for details
 QUANTUM_THIRD_PARTIES=""
 for f in $TOP_DIR/lib/quantum_thirdparty/*; do
      third_party=$(basename $f)
diff --git a/lib/quantum_plugins/README.md b/lib/quantum_plugins/README.md
index 05bfb85..e829940 100644
--- a/lib/quantum_plugins/README.md
+++ b/lib/quantum_plugins/README.md
@@ -34,3 +34,5 @@
 * ``quantum_plugin_setup_interface_driver``
 * ``has_quantum_plugin_security_group``:
   return 0 if the plugin support quantum security group otherwise return 1
+* ``quantum_plugin_check_adv_test_requirements``:
+  return 0 if requirements are satisfied otherwise return 1
diff --git a/lib/quantum_plugins/bigswitch_floodlight b/lib/quantum_plugins/bigswitch_floodlight
index 4857f49..cae8882 100644
--- a/lib/quantum_plugins/bigswitch_floodlight
+++ b/lib/quantum_plugins/bigswitch_floodlight
@@ -42,19 +42,33 @@
 }
 
 function quantum_plugin_configure_service() {
-    iniset /$Q_PLUGIN_CONF_FILE RESTPROXY servers $BS_FL_CONTROLLERS_PORT
-    iniset /$Q_PLUGIN_CONF_FILE RESTPROXY servertimeout $BS_FL_CONTROLLER_TIMEOUT
+    iniset /$Q_PLUGIN_CONF_FILE restproxy servers $BS_FL_CONTROLLERS_PORT
+    iniset /$Q_PLUGIN_CONF_FILE restproxy servertimeout $BS_FL_CONTROLLER_TIMEOUT
+    if [ "$BS_FL_VIF_DRIVER" = "ivs" ]
+    then
+        iniset /$Q_PLUGIN_CONF_FILE nova vif_type ivs
+    fi
 }
 
 function quantum_plugin_setup_interface_driver() {
     local conf_file=$1
-    iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver
+    if [ "$BS_FL_VIF_DRIVER" = "ivs" ]
+    then
+        iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.IVSInterfaceDriver
+    else
+        iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver
+    fi
 }
 
+
 function has_quantum_plugin_security_group() {
     # 1 means False here
     return 1
 }
 
+function quantum_plugin_check_adv_test_requirements() {
+    is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
+}
+
 # Restore xtrace
 $MY_XTRACE
diff --git a/lib/quantum_plugins/brocade b/lib/quantum_plugins/brocade
index 6e26ad7..52ce3db 100644
--- a/lib/quantum_plugins/brocade
+++ b/lib/quantum_plugins/brocade
@@ -25,15 +25,16 @@
 }
 
 function quantum_plugin_configure_debug_command() {
-    :
+    iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge
 }
 
 function quantum_plugin_configure_dhcp_agent() {
-    :
+    iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager quantum.agent.dhcp_agent.DhcpAgentWithStateReport
 }
 
 function quantum_plugin_configure_l3_agent() {
-    :
+    iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge
+    iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager quantum.agent.l3_agent.L3NATAgentWithStateReport
 }
 
 function quantum_plugin_configure_plugin_agent() {
@@ -50,5 +51,9 @@
     return 0
 }
 
+function quantum_plugin_check_adv_test_requirements() {
+    is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
+}
+
 # Restore xtrace
 $BRCD_XTRACE
diff --git a/lib/quantum_plugins/cisco b/lib/quantum_plugins/cisco
new file mode 100644
index 0000000..92b91e4
--- /dev/null
+++ b/lib/quantum_plugins/cisco
@@ -0,0 +1,327 @@
+# Quantum Cisco plugin
+# ---------------------------
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+# Scecify the VSM parameters
+Q_CISCO_PLUGIN_VSM_IP=${Q_CISCO_PLUGIN_VSM_IP:-}
+# Specify the VSM username
+Q_CISCO_PLUGIN_VSM_USERNAME=${Q_CISCO_PLUGIN_VSM_USERNAME:-admin}
+# Specify the VSM passward for above username
+Q_CISCO_PLUGIN_VSM_PASSWORD=${Q_CISCO_PLUGIN_VSM_PASSWORD:-}
+# Specify the uVEM integration bridge name
+Q_CISCO_PLUGIN_INTEGRATION_BRIDGE=${Q_CISCO_PLUGIN_INTEGRATION_BRIDGE:-br-int}
+# Specify if tunneling is enabled
+Q_CISCO_PLUGIN_ENABLE_TUNNELING=${Q_CISCO_PLUGIN_ENABLE_TUNNELING:-True}
+# Specify the VXLAN range
+Q_CISCO_PLUGIN_VXLAN_ID_RANGES=${Q_CISCO_PLUGIN_VXLAN_ID_RANGES:-5000:10000}
+# Specify the VLAN range
+Q_CISCO_PLUGIN_VLAN_RANGES=${Q_CISCO_PLUGIN_VLAN_RANGES:-vlan:1:4094}
+
+# Specify ncclient package information
+NCCLIENT_DIR=$DEST/ncclient
+NCCLIENT_VERSION=${NCCLIENT_VERSION:-0.3.1}
+NCCLIENT_REPO=${NCCLIENT_REPO:-${GIT_BASE}/CiscoSystems/ncclient.git}
+NCCLIENT_BRANCH=${NCCLIENT_BRANCH:-master}
+
+# This routine put a prefix on an existing function name
+function _prefix_function() {
+    declare -F $1 > /dev/null || die "$1 doesn't exist"
+    eval "$(echo "${2}_${1}()"; declare -f ${1} | tail -n +2)"
+}
+
+function _has_ovs_subplugin() {
+    local subplugin
+    for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do
+        if [[ "$subplugin" == "openvswitch" ]]; then
+            return 0
+        fi
+    done
+    return 1
+}
+
+function _has_nexus_subplugin() {
+    local subplugin
+    for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do
+        if [[ "$subplugin" == "nexus" ]]; then
+            return 0
+        fi
+    done
+    return 1
+}
+
+function _has_n1kv_subplugin() {
+    local subplugin
+    for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do
+        if [[ "$subplugin" == "n1kv" ]]; then
+            return 0
+        fi
+    done
+    return 1
+}
+
+# This routine populates the cisco config file with the information for
+# a particular nexus switch
+function _config_switch() {
+    local cisco_cfg_file=$1
+    local switch_ip=$2
+    local username=$3
+    local password=$4
+    local ssh_port=$5
+    shift 5
+
+    local section="NEXUS_SWITCH:$switch_ip"
+    iniset $cisco_cfg_file $section username $username
+    iniset $cisco_cfg_file $section password $password
+    iniset $cisco_cfg_file $section ssh_port $ssh_port
+
+    while [[ ${#@} != 0 ]]; do
+        iniset  $cisco_cfg_file $section $1 $2
+        shift 2
+    done
+}
+
+# Prefix openvswitch plugin routines with "ovs" in order to differentiate from
+# cisco plugin routines. This means, ovs plugin routines will coexist with cisco
+# plugin routines in this script.
+source $TOP_DIR/lib/quantum_plugins/openvswitch
+_prefix_function quantum_plugin_create_nova_conf ovs
+_prefix_function quantum_plugin_install_agent_packages ovs
+_prefix_function quantum_plugin_configure_common ovs
+_prefix_function quantum_plugin_configure_debug_command ovs
+_prefix_function quantum_plugin_configure_dhcp_agent ovs
+_prefix_function quantum_plugin_configure_l3_agent ovs
+_prefix_function quantum_plugin_configure_plugin_agent ovs
+_prefix_function quantum_plugin_configure_service ovs
+_prefix_function quantum_plugin_setup_interface_driver ovs
+_prefix_function has_quantum_plugin_security_group ovs
+
+# Check the version of the installed ncclient package
+function check_ncclient_version() {
+python << EOF
+version = '$NCCLIENT_VERSION'
+import sys
+try:
+    import pkg_resources
+    import ncclient
+    module_version = pkg_resources.get_distribution('ncclient').version
+    if version != module_version:
+        sys.exit(1)
+except:
+    sys.exit(1)
+EOF
+}
+
+# Install the ncclient package
+function install_ncclient() {
+    git_clone $NCCLIENT_REPO $NCCLIENT_DIR $NCCLIENT_BRANCH
+    (cd $NCCLIENT_DIR; sudo python setup.py install)
+}
+
+# Check if the required version of ncclient has been installed
+function is_ncclient_installed() {
+    # Check if the Cisco ncclient repository exists
+    if [[ -d $NCCLIENT_DIR ]]; then
+        remotes=$(cd $NCCLIENT_DIR; git remote -v | grep fetch | awk '{ print $2}')
+        for remote in $remotes; do
+            if [[ $remote == $NCCLIENT_REPO ]]; then
+                break;
+            fi
+        done
+        if [[ $remote != $NCCLIENT_REPO ]]; then
+            return 1
+        fi
+    else
+        return 1
+    fi
+
+    # Check if the ncclient is installed with the right version
+    if ! check_ncclient_version; then
+        return 1
+    fi
+    return 0
+}
+
+function has_quantum_plugin_security_group() {
+    if _has_ovs_subplugin; then
+        ovs_has_quantum_plugin_security_group
+    else
+        return 1
+    fi
+}
+
+function is_quantum_ovs_base_plugin() {
+    # Cisco uses OVS if openvswitch subplugin is deployed
+    _has_ovs_subplugin
+    return
+}
+
+# populate required nova configuration parameters
+function quantum_plugin_create_nova_conf() {
+    if _has_ovs_subplugin; then
+        ovs_quantum_plugin_create_nova_conf
+    else
+        _quantum_ovs_base_configure_nova_vif_driver
+    fi
+}
+
+function quantum_plugin_install_agent_packages() {
+    # Cisco plugin uses openvswitch to operate in one of its configurations
+    ovs_quantum_plugin_install_agent_packages
+}
+
+# Configure common parameters
+function quantum_plugin_configure_common() {
+    # setup default subplugins
+    if [ ! -v Q_CISCO_PLUGIN_SUBPLUGINS ]; then
+        declare -ga Q_CISCO_PLUGIN_SUBPLUGINS
+        Q_CISCO_PLUGIN_SUBPLUGINS=(openvswitch nexus)
+    fi
+    if _has_ovs_subplugin; then
+        ovs_quantum_plugin_configure_common
+        Q_PLUGIN_EXTRA_CONF_PATH=etc/quantum/plugins/cisco
+        Q_PLUGIN_EXTRA_CONF_FILES=(cisco_plugins.ini)
+    else
+        Q_PLUGIN_CONF_PATH=etc/quantum/plugins/cisco
+        Q_PLUGIN_CONF_FILENAME=cisco_plugins.ini
+    fi
+    Q_PLUGIN_CLASS="quantum.plugins.cisco.network_plugin.PluginV2"
+    Q_DB_NAME=cisco_quantum
+}
+
+function quantum_plugin_configure_debug_command() {
+    if _has_ovs_subplugin; then
+        ovs_quantum_plugin_configure_debug_command
+    fi
+}
+
+function quantum_plugin_configure_dhcp_agent() {
+    iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager quantum.agent.dhcp_agent.DhcpAgentWithStateReport
+}
+
+function quantum_plugin_configure_l3_agent() {
+    if _has_ovs_subplugin; then
+        ovs_quantum_plugin_configure_l3_agent
+    fi
+}
+
+function _configure_nexus_subplugin() {
+    local cisco_cfg_file=$1
+
+    # Install a known compatible ncclient from the Cisco repository if necessary
+    if ! is_ncclient_installed; then
+        # Preserve the two global variables
+        local offline=$OFFLINE
+        local reclone=$RECLONE
+        # Change their values to allow installation
+        OFFLINE=False
+        RECLONE=yes
+        install_ncclient
+        # Restore their values
+        OFFLINE=$offline
+        RECLONE=$reclone
+    fi
+
+    # Setup default nexus switch information
+    if [ ! -v Q_CISCO_PLUGIN_SWITCH_INFO ]; then
+        declare -A Q_CISCO_PLUGIN_SWITCH_INFO
+        HOST_NAME=$(hostname)
+        Q_CISCO_PLUGIN_SWITCH_INFO=([1.1.1.1]=stack:stack:22:${HOST_NAME}:1/10)
+    else
+        iniset $cisco_cfg_file CISCO nexus_driver quantum.plugins.cisco.nexus.cisco_nexus_network_driver_v2.CiscoNEXUSDriver
+    fi
+
+    # Setup the switch configurations
+    local nswitch
+    local sw_info
+    local segment
+    local sw_info_array
+    declare -i count=0
+    for nswitch in ${!Q_CISCO_PLUGIN_SWITCH_INFO[@]}; do
+        sw_info=${Q_CISCO_PLUGIN_SWITCH_INFO[$nswitch]}
+        sw_info_array=${sw_info//:/ }
+        sw_info_array=( $sw_info_array )
+        count=${#sw_info_array[@]}
+        if [[ $count < 5 || $(( ($count-3) % 2 )) != 0 ]]; then
+            die $LINENO "Incorrect switch configuration: ${Q_CISCO_PLUGIN_SWITCH_INFO[$nswitch]}"
+        fi
+        _config_switch $cisco_cfg_file $nswitch ${sw_info_array[@]}
+    done
+}
+
+# Configure n1kv plugin
+function _configure_n1kv_subplugin() {
+    local cisco_cfg_file=$1
+
+    # populate the cisco plugin cfg file with the VSM information
+    echo "Configuring n1kv in $cisco_cfg_file-- $Q_CISCO_PLUGIN_VSM_IP $Q_CISCO_PLUGIN_VSM_USERNAME $Q_CISCO_PLUGIN_VSM_PASSWORD"
+    iniset $cisco_cfg_file N1KV:$Q_CISCO_PLUGIN_VSM_IP username $Q_CISCO_PLUGIN_VSM_USERNAME
+    iniset $cisco_cfg_file N1KV:$Q_CISCO_PLUGIN_VSM_IP password $Q_CISCO_PLUGIN_VSM_PASSWORD
+
+    iniset $cisco_cfg_file CISCO_N1K integration_bridge $Q_CISCO_PLUGIN_INTEGRATION_BRIDGE
+    iniset $cisco_cfg_file CISCO_N1K enable_tunneling $Q_CISCO_PLUGIN_ENABLE_TUNNELING
+    iniset $cisco_cfg_file CISCO_N1K vxlan_id_ranges $Q_CISCO_PLUGIN_VXLAN_ID_RANGES
+    iniset $cisco_cfg_file CISCO_N1K network_vlan_ranges $Q_CISCO_PLUGIN_VLAN_RANGES
+
+    # Setup the integration bridge by calling the ovs_base
+    OVS_BRIDGE=$Q_CISCO_PLUGIN_INTEGRATION_BRIDGE
+    _quantum_ovs_base_setup_bridge $OVS_BRIDGE
+}
+
+function quantum_plugin_configure_plugin_agent() {
+    if _has_ovs_subplugin; then
+        ovs_quantum_plugin_configure_plugin_agent
+    fi
+}
+
+function quantum_plugin_configure_service() {
+    local subplugin
+    local cisco_cfg_file
+
+    if _has_ovs_subplugin; then
+        ovs_quantum_plugin_configure_service
+        cisco_cfg_file=/${Q_PLUGIN_EXTRA_CONF_FILES[0]}
+    else
+        cisco_cfg_file=/$Q_PLUGIN_CONF_FILE
+    fi
+
+    # Setup the [CISCO_PLUGINS] section
+    if [[ ${#Q_CISCO_PLUGIN_SUBPLUGINS[@]} > 2 ]]; then
+        die $LINENO "At most two subplugins are supported."
+    fi
+
+    if _has_ovs_subplugin && _has_n1kv_subplugin; then
+        die $LINENO "OVS subplugin and n1kv subplugin cannot coexist"
+    fi
+
+    # Setup the subplugins
+    inicomment $cisco_cfg_file CISCO_PLUGINS nexus_plugin
+    inicomment $cisco_cfg_file CISCO_PLUGINS vswitch_plugin
+    inicomment $cisco_cfg_file CISCO_TEST host
+    for subplugin in ${Q_CISCO_PLUGIN_SUBPLUGINS[@]}; do
+        case $subplugin in
+            nexus) iniset $cisco_cfg_file CISCO_PLUGINS nexus_plugin quantum.plugins.cisco.nexus.cisco_nexus_plugin_v2.NexusPlugin;;
+            openvswitch) iniset $cisco_cfg_file CISCO_PLUGINS vswitch_plugin quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2;;
+            n1kv) iniset $cisco_cfg_file CISCO_PLUGINS vswitch_plugin quantum.plugins.cisco.n1kv.n1kv_quantum_plugin.N1kvQuantumPluginV2;;
+            *) die $LINENO "Unsupported cisco subplugin: $subplugin";;
+        esac
+    done
+
+    if _has_nexus_subplugin; then
+        _configure_nexus_subplugin $cisco_cfg_file
+    fi
+
+    if _has_n1kv_subplugin; then
+        _configure_n1kv_subplugin $cisco_cfg_file
+    fi
+}
+
+function quantum_plugin_setup_interface_driver() {
+    local conf_file=$1
+    iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/quantum_plugins/linuxbridge b/lib/quantum_plugins/linuxbridge
index 324e255..989b930 100644
--- a/lib/quantum_plugins/linuxbridge
+++ b/lib/quantum_plugins/linuxbridge
@@ -5,19 +5,6 @@
 MY_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
-function is_quantum_ovs_base_plugin() {
-    # linuxbridge doesn't use OVS
-    return 1
-}
-
-function quantum_plugin_create_nova_conf() {
-    NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
-}
-
-function quantum_plugin_install_agent_packages() {
-    install_package bridge-utils
-}
-
 function quantum_plugin_configure_common() {
     Q_PLUGIN_CONF_PATH=etc/quantum/plugins/linuxbridge
     Q_PLUGIN_CONF_FILENAME=linuxbridge_conf.ini
@@ -25,40 +12,9 @@
     Q_PLUGIN_CLASS="quantum.plugins.linuxbridge.lb_quantum_plugin.LinuxBridgePluginV2"
 }
 
-function quantum_plugin_configure_debug_command() {
-    iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge
-}
-
-function quantum_plugin_configure_dhcp_agent() {
-    iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager quantum.agent.dhcp_agent.DhcpAgentWithStateReport
-}
-
-function quantum_plugin_configure_l3_agent() {
-    iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge
-    iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager quantum.agent.l3_agent.L3NATAgentWithStateReport
-}
-
-function quantum_plugin_configure_plugin_agent() {
-    # Setup physical network interface mappings.  Override
-    # ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` for more
-    # complex physical network configurations.
-    if [[ "$LB_INTERFACE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$LB_PHYSICAL_INTERFACE" != "" ]]; then
-        LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE
-    fi
-    if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE LINUX_BRIDGE physical_interface_mappings $LB_INTERFACE_MAPPINGS
-    fi
-    if [[ "$Q_USE_SECGROUP" == "True" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.linux.iptables_firewall.IptablesFirewallDriver
-    else
-        iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.firewall.NoopFirewallDriver
-    fi
-    AGENT_BINARY="$QUANTUM_DIR/bin/quantum-linuxbridge-agent"
-}
-
 function quantum_plugin_configure_service() {
     if [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE VLANS tenant_network_type vlan
+        iniset /$Q_PLUGIN_CONF_FILE vlans tenant_network_type vlan
     else
         echo "WARNING - The linuxbridge plugin is using local tenant networks, with no connectivity between hosts."
     fi
@@ -72,13 +28,21 @@
         fi
     fi
     if [[ "$LB_VLAN_RANGES" != "" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE VLANS network_vlan_ranges $LB_VLAN_RANGES
+        iniset /$Q_PLUGIN_CONF_FILE vlans network_vlan_ranges $LB_VLAN_RANGES
     fi
-}
+    if [[ "$Q_USE_SECGROUP" == "True" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver quantum.agent.linux.iptables_firewall.IptablesFirewallDriver
+    else
+        iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver quantum.agent.firewall.NoopFirewallDriver
+    fi
 
-function quantum_plugin_setup_interface_driver() {
-    local conf_file=$1
-    iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver
+    # Define extra "LINUX_BRIDGE" configuration options when q-svc is configured by defining
+    # the array ``Q_SRV_EXTRA_OPTS``.
+    # For Example: ``Q_SRV_EXTRA_OPTS=(foo=true bar=2)``
+    for I in "${Q_SRV_EXTRA_OPTS[@]}"; do
+        # Replace the first '=' with ' ' for iniset syntax
+        iniset /$Q_PLUGIN_CONF_FILE linux_bridge ${I/=/ }
+    done
 }
 
 function has_quantum_plugin_security_group() {
diff --git a/lib/quantum_plugins/linuxbridge_agent b/lib/quantum_plugins/linuxbridge_agent
new file mode 100644
index 0000000..b3ca8b1
--- /dev/null
+++ b/lib/quantum_plugins/linuxbridge_agent
@@ -0,0 +1,76 @@
+# Quantum Linux Bridge L2 agent
+# -----------------------------
+
+# Save trace setting
+PLUGIN_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+function is_quantum_ovs_base_plugin() {
+    # linuxbridge doesn't use OVS
+    return 1
+}
+
+function quantum_plugin_create_nova_conf() {
+    NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+}
+
+function quantum_plugin_install_agent_packages() {
+    install_package bridge-utils
+}
+
+function quantum_plugin_configure_debug_command() {
+    iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge
+}
+
+function quantum_plugin_configure_dhcp_agent() {
+    iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager quantum.agent.dhcp_agent.DhcpAgentWithStateReport
+}
+
+function quantum_plugin_configure_l3_agent() {
+    iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge
+    iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager quantum.agent.l3_agent.L3NATAgentWithStateReport
+}
+
+function quantum_plugin_configure_plugin_agent() {
+    # Setup physical network interface mappings.  Override
+    # ``LB_VLAN_RANGES`` and ``LB_INTERFACE_MAPPINGS`` in ``localrc`` for more
+    # complex physical network configurations.
+    if [[ "$LB_INTERFACE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$LB_PHYSICAL_INTERFACE" != "" ]]; then
+        LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE
+    fi
+    if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE linux_bridge physical_interface_mappings $LB_INTERFACE_MAPPINGS
+    fi
+    if [[ "$Q_USE_SECGROUP" == "True" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver quantum.agent.linux.iptables_firewall.IptablesFirewallDriver
+    else
+        iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver quantum.agent.firewall.NoopFirewallDriver
+    fi
+    AGENT_BINARY="$QUANTUM_DIR/bin/quantum-linuxbridge-agent"
+    # Define extra "AGENT" configuration options when q-agt is configured by defining
+    # the array ``Q_AGENT_EXTRA_AGENT_OPTS``.
+    # For Example: ``Q_AGENT_EXTRA_AGENT_OPTS=(foo=true bar=2)``
+    for I in "${Q_AGENT_EXTRA_AGENT_OPTS[@]}"; do
+        # Replace the first '=' with ' ' for iniset syntax
+        iniset /$Q_PLUGIN_CONF_FILE agent ${I/=/ }
+    done
+    # Define extra "LINUX_BRIDGE" configuration options when q-agt is configured by defining
+    # the array ``Q_AGENT_EXTRA_SRV_OPTS``.
+    # For Example: ``Q_AGENT_EXTRA_SRV_OPTS=(foo=true bar=2)``
+    for I in "${Q_AGENT_EXTRA_SRV_OPTS[@]}"; do
+        # Replace the first '=' with ' ' for iniset syntax
+        iniset /$Q_PLUGIN_CONF_FILE linux_bridge ${I/=/ }
+    done
+}
+
+function quantum_plugin_setup_interface_driver() {
+    local conf_file=$1
+    iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver
+}
+
+function quantum_plugin_check_adv_test_requirements() {
+    is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
+}
+
+# Restore xtrace
+$PLUGIN_XTRACE
diff --git a/lib/quantum_plugins/ml2 b/lib/quantum_plugins/ml2
new file mode 100644
index 0000000..ae8fe6c
--- /dev/null
+++ b/lib/quantum_plugins/ml2
@@ -0,0 +1,62 @@
+# Quantum Modular Layer 2 plugin
+# ------------------------------
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+# Default openvswitch L2 agent
+Q_AGENT=${Q_AGENT:-openvswitch}
+source $TOP_DIR/lib/quantum_plugins/${Q_AGENT}_agent
+
+function quantum_plugin_configure_common() {
+    Q_PLUGIN_CONF_PATH=etc/quantum/plugins/ml2
+    Q_PLUGIN_CONF_FILENAME=ml2_conf.ini
+    Q_DB_NAME="quantum_ml2"
+    Q_PLUGIN_CLASS="quantum.plugins.ml2.plugin.Ml2Plugin"
+}
+
+function quantum_plugin_configure_service() {
+    if [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE ml2 tenant_network_types gre
+        iniset /$Q_PLUGIN_CONF_FILE ml2_type_gre tunnel_id_ranges $TENANT_TUNNEL_RANGES
+    elif [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE ml2 tenant_network_types vlan
+    else
+        echo "WARNING - The ml2 plugin is using local tenant networks, with no connectivity between hosts."
+    fi
+
+    # Override ``ML2_VLAN_RANGES`` and any needed agent configuration
+    # variables in ``localrc`` for more complex physical network
+    # configurations.
+    if [[ "$ML2_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then
+        ML2_VLAN_RANGES=$PHYSICAL_NETWORK
+        if [[ "$TENANT_VLAN_RANGE" != "" ]]; then
+            ML2_VLAN_RANGES=$ML2_VLAN_RANGES:$TENANT_VLAN_RANGE
+        fi
+    fi
+    if [[ "$ML2_VLAN_RANGES" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE ml2_type_vlan network_vlan_ranges $ML2_VLAN_RANGES
+    fi
+
+    # REVISIT(rkukura): Setting firewall_driver here for
+    # quantum.agent.securitygroups_rpc.is_firewall_enabled() which is
+    # used in the server, in case no L2 agent is configured on the
+    # server's node. If an L2 agent is configured, this will get
+    # overridden with the correct driver. The ml2 plugin should
+    # instead use its own config variable to indicate whether security
+    # groups is enabled, and that will need to be set here instead.
+    if [[ "$Q_USE_SECGROUP" == "True" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.not.a.real.FirewallDriver
+    else
+        iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.firewall.NoopFirewallDriver
+    fi
+
+}
+
+function has_quantum_plugin_security_group() {
+    return 0
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/quantum_plugins/nec b/lib/quantum_plugins/nec
index f61f50b..69bbe0e 100644
--- a/lib/quantum_plugins/nec
+++ b/lib/quantum_plugins/nec
@@ -17,8 +17,6 @@
 OFC_RETRY_MAX=${OFC_RETRY_MAX:-0}
 OFC_RETRY_INTERVAL=${OFC_RETRY_INTERVAL:-1}
 
-OVS_BRIDGE=${OVS_BRIDGE:-br-int}
-
 # Main logic
 # ---------------------------
 
@@ -79,11 +77,13 @@
 
 function quantum_plugin_configure_service() {
     iniset $QUANTUM_CONF DEFAULT api_extensions_path quantum/plugins/nec/extensions/
-    iniset /$Q_PLUGIN_CONF_FILE OFC host $OFC_API_HOST
-    iniset /$Q_PLUGIN_CONF_FILE OFC port $OFC_API_PORT
-    iniset /$Q_PLUGIN_CONF_FILE OFC driver $OFC_DRIVER
-    iniset /$Q_PLUGIN_CONF_FILE OFC api_retry_max OFC_RETRY_MAX
-    iniset /$Q_PLUGIN_CONF_FILE OFC api_retry_interval OFC_RETRY_INTERVAL
+    iniset /$Q_PLUGIN_CONF_FILE ofc host $OFC_API_HOST
+    iniset /$Q_PLUGIN_CONF_FILE ofc port $OFC_API_PORT
+    iniset /$Q_PLUGIN_CONF_FILE ofc driver $OFC_DRIVER
+    iniset /$Q_PLUGIN_CONF_FILE ofc api_retry_max OFC_RETRY_MAX
+    iniset /$Q_PLUGIN_CONF_FILE ofc api_retry_interval OFC_RETRY_INTERVAL
+
+    _quantum_ovs_base_configure_firewall_driver
 }
 
 function quantum_plugin_setup_interface_driver() {
@@ -118,5 +118,9 @@
     return 0
 }
 
+function quantum_plugin_check_adv_test_requirements() {
+    is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
+}
+
 # Restore xtrace
 $MY_XTRACE
diff --git a/lib/quantum_plugins/nicira b/lib/quantum_plugins/nicira
index 6eefb02..d4b3e51 100644
--- a/lib/quantum_plugins/nicira
+++ b/lib/quantum_plugins/nicira
@@ -8,16 +8,12 @@
 source $TOP_DIR/lib/quantum_plugins/ovs_base
 
 function setup_integration_bridge() {
-    OVS_BRIDGE=${OVS_BRIDGE:-br-int}
     _quantum_ovs_base_setup_bridge $OVS_BRIDGE
     # Set manager to NVP controller (1st of list)
     if [[ "$NVP_CONTROLLERS" != "" ]]; then
         # Get the first controller
         controllers=(${NVP_CONTROLLERS//,/ })
         OVS_MGR_IP=${controllers[0]}
-    elif [[ "$NVP_CONTROLLER_CONNECTION" != "" ]]; then
-        conn=(${NVP_CONTROLLER_CONNECTION//\:/ })
-        OVS_MGR_IP=${conn[0]}
     else
         die $LINENO "Error - No controller specified. Unable to set a manager for OVS"
     fi
@@ -38,8 +34,8 @@
 }
 
 function quantum_plugin_install_agent_packages() {
-    # Nicira Plugin does not run q-agt
-    :
+    # Nicira Plugin does not run q-agt, but it currently needs dhcp and metadata agents
+    _quantum_ovs_base_install_agent_packages
 }
 
 function quantum_plugin_configure_common() {
@@ -50,7 +46,7 @@
 }
 
 function quantum_plugin_configure_debug_command() {
-    :
+    sudo ovs-vsctl --no-wait -- --may-exist add-br $PUBLIC_BRIDGE
 }
 
 function quantum_plugin_configure_dhcp_agent() {
@@ -72,67 +68,55 @@
 
 function quantum_plugin_configure_service() {
     if [[ "$MAX_LP_PER_BRIDGED_LS" != "" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE NVP max_lp_per_bridged_ls $MAX_LP_PER_BRIDGED_LS
+        iniset /$Q_PLUGIN_CONF_FILE nvp max_lp_per_bridged_ls $MAX_LP_PER_BRIDGED_LS
     fi
     if [[ "$MAX_LP_PER_OVERLAY_LS" != "" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE NVP max_lp_per_overlay_ls $MAX_LP_PER_OVERLAY_LS
+        iniset /$Q_PLUGIN_CONF_FILE nvp max_lp_per_overlay_ls $MAX_LP_PER_OVERLAY_LS
     fi
     if [[ "$FAILOVER_TIME" != "" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE NVP failover_time $FAILOVER_TIME
+        iniset /$Q_PLUGIN_CONF_FILE nvp failover_time $FAILOVER_TIME
     fi
     if [[ "$CONCURRENT_CONNECTIONS" != "" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE NVP concurrent_connections $CONCURRENT_CONNECTIONS
+        iniset /$Q_PLUGIN_CONF_FILE nvp concurrent_connections $CONCURRENT_CONNECTIONS
     fi
 
-    if [[ "$DEFAULT_CLUSTER" != "" ]]; then
-        # Make name shorter for sake of readability
-        DC=$DEFAULT_CLUSTER
-        if [[ "$DEFAULT_TZ_UUID" != "" ]]; then
-            iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" default_tz_uuid $DEFAULT_TZ_UUID
-        else
-            die $LINENO "The nicira plugin won't work without a default transport zone."
-        fi
-        if [[ "$DEFAULT_L3_GW_SVC_UUID" != "" ]]; then
-            iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" default_l3_gw_service_uuid $DEFAULT_L3_GW_SVC_UUID
-            Q_L3_ENABLED=True
-            Q_L3_ROUTER_PER_TENANT=True
-            iniset /$Q_PLUGIN_CONF_FILE NVP enable_metadata_access_network True
-        else
-            echo "WARNING - No l3 gw service enabled.  You will not be able to use the L3 API extension"
-        fi
-        if [[ "$DEFAULT_L2_GW_SVC_UUID" != "" ]]; then
-            iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" default_l2_gw_service_uuid $DEFAULT_L2_GW_SVC_UUID
-        fi
-        # NVP_CONTROLLERS must be a comma separated string
-        if [[ "$NVP_CONTROLLERS" != "" ]]; then
-            iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" nvp_controllers $NVP_CONTROLLERS
-        elif [[ "$NVP_CONTROLLER_CONNECTION" != "" ]]; then
-            # Only 1 controller can be specified in this case
-            iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" nvp_controller_connection $NVP_CONTROLLER_CONNECTION
-        else
-            die $LINENO "The nicira plugin needs at least an NVP controller."
-        fi
-        if [[ "$NVP_USER" != "" ]]; then
-            iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" nvp_user $NVP_USER
-        fi
-        if [[ "$NVP_PASSWORD" != "" ]]; then
-            iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" nvp_password $NVP_PASSWORD
-        fi
-        if [[ "$NVP_REQ_TIMEOUT" != "" ]]; then
-            iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" req_timeout $NVP_REQ_TIMEOUT
-        fi
-        if [[ "$NVP_HTTP_TIMEOUT" != "" ]]; then
-            iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" http_timeout $NVP_HTTP_TIMEOUT
-        fi
-        if [[ "$NVP_RETRIES" != "" ]]; then
-            iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" retries $NVP_RETRIES
-        fi
-        if [[ "$NVP_REDIRECTS" != "" ]]; then
-            iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" redirects $NVP_REDIRECTS
-        fi
+    if [[ "$DEFAULT_TZ_UUID" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_tz_uuid $DEFAULT_TZ_UUID
     else
-        echo "ERROR - Default cluster not configured. Quantum will not start"
-        exit 1
+        die $LINENO "The nicira plugin won't work without a default transport zone."
+    fi
+    if [[ "$DEFAULT_L3_GW_SVC_UUID" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l3_gw_service_uuid $DEFAULT_L3_GW_SVC_UUID
+        Q_L3_ENABLED=True
+        Q_L3_ROUTER_PER_TENANT=True
+        iniset /$Q_PLUGIN_CONF_FILE nvp enable_metadata_access_network True
+    fi
+    if [[ "$DEFAULT_L2_GW_SVC_UUID" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l2_gw_service_uuid $DEFAULT_L2_GW_SVC_UUID
+    fi
+    # NVP_CONTROLLERS must be a comma separated string
+    if [[ "$NVP_CONTROLLERS" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE DEFAULT nvp_controllers $NVP_CONTROLLERS
+    else
+        die $LINENO "The nicira plugin needs at least an NVP controller."
+    fi
+    if [[ "$NVP_USER" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE DEFAULT nvp_user $NVP_USER
+    fi
+    if [[ "$NVP_PASSWORD" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE DEFAULT nvp_password $NVP_PASSWORD
+    fi
+    if [[ "$NVP_REQ_TIMEOUT" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE DEFAULT req_timeout $NVP_REQ_TIMEOUT
+    fi
+    if [[ "$NVP_HTTP_TIMEOUT" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE DEFAULT http_timeout $NVP_HTTP_TIMEOUT
+    fi
+    if [[ "$NVP_RETRIES" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE DEFAULT retries $NVP_RETRIES
+    fi
+    if [[ "$NVP_REDIRECTS" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE DEFAULT redirects $NVP_REDIRECTS
     fi
 }
 
@@ -146,5 +130,9 @@
     return 0
 }
 
+function quantum_plugin_check_adv_test_requirements() {
+    is_service_enabled q-dhcp && return 0
+}
+
 # Restore xtrace
 $MY_XTRACE
diff --git a/lib/quantum_plugins/openvswitch b/lib/quantum_plugins/openvswitch
index ab16483..4aac9f8 100644
--- a/lib/quantum_plugins/openvswitch
+++ b/lib/quantum_plugins/openvswitch
@@ -1,23 +1,11 @@
-# Quantum Open vSwtich plugin
+# Quantum Open vSwitch plugin
 # ---------------------------
 
 # Save trace setting
 MY_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
-source $TOP_DIR/lib/quantum_plugins/ovs_base
-
-function quantum_plugin_create_nova_conf() {
-    _quantum_ovs_base_configure_nova_vif_driver
-    if [ "$VIRT_DRIVER" = 'xenserver' ]; then
-        iniset $NOVA_CONF DEFAULT xenapi_vif_driver nova.virt.xenapi.vif.XenAPIOpenVswitchDriver
-        iniset $NOVA_CONF DEFAULT xenapi_ovs_integration_bridge $FLAT_NETWORK_BRIDGE
-    fi
-}
-
-function quantum_plugin_install_agent_packages() {
-    _quantum_ovs_base_install_agent_packages
-}
+source $TOP_DIR/lib/quantum_plugins/openvswitch_agent
 
 function quantum_plugin_configure_common() {
     Q_PLUGIN_CONF_PATH=etc/quantum/plugins/openvswitch
@@ -26,93 +14,12 @@
     Q_PLUGIN_CLASS="quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2"
 }
 
-function quantum_plugin_configure_debug_command() {
-    _quantum_ovs_base_configure_debug_command
-}
-
-function quantum_plugin_configure_dhcp_agent() {
-    iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager quantum.agent.dhcp_agent.DhcpAgentWithStateReport
-}
-
-function quantum_plugin_configure_l3_agent() {
-    _quantum_ovs_base_configure_l3_agent
-    iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager quantum.agent.l3_agent.L3NATAgentWithStateReport
-}
-
-function quantum_plugin_configure_plugin_agent() {
-    # Setup integration bridge
-    OVS_BRIDGE=${OVS_BRIDGE:-br-int}
-    _quantum_ovs_base_setup_bridge $OVS_BRIDGE
-    _quantum_ovs_base_configure_firewall_driver
-
-    # Setup agent for tunneling
-    if [[ "$OVS_ENABLE_TUNNELING" = "True" ]]; then
-        # Verify tunnels are supported
-        # REVISIT - also check kernel module support for GRE and patch ports
-        OVS_VERSION=`ovs-vsctl --version | head -n 1 | awk '{print $4;}'`
-        if [ $OVS_VERSION \< "1.4" ] && ! is_service_enabled q-svc ; then
-            die $LINENO "You are running OVS version $OVS_VERSION. OVS 1.4+ is required for tunneling between multiple hosts."
-        fi
-        iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True
-        iniset /$Q_PLUGIN_CONF_FILE OVS local_ip $HOST_IP
-    fi
-
-    # Setup physical network bridge mappings.  Override
-    # ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more
-    # complex physical network configurations.
-    if [[ "$OVS_BRIDGE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then
-        OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE
-
-        # Configure bridge manually with physical interface as port for multi-node
-        sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_PHYSICAL_BRIDGE
-    fi
-    if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings $OVS_BRIDGE_MAPPINGS
-    fi
-    AGENT_BINARY="$QUANTUM_DIR/bin/quantum-openvswitch-agent"
-
-    if [ "$VIRT_DRIVER" = 'xenserver' ]; then
-        # Nova will always be installed along with quantum for a domU
-        # devstack install, so it should be safe to rely on nova.conf
-        # for xenapi configuration.
-        Q_RR_DOM0_COMMAND="$QUANTUM_DIR/bin/quantum-rootwrap-xen-dom0 $NOVA_CONF"
-        # Under XS/XCP, the ovs agent needs to target the dom0
-        # integration bridge.  This is enabled by using a root wrapper
-        # that executes commands on dom0 via a XenAPI plugin.
-        iniset /$Q_PLUGIN_CONF_FILE AGENT root_helper "$Q_RR_DOM0_COMMAND"
-
-        # FLAT_NETWORK_BRIDGE is the dom0 integration bridge.  To
-        # ensure the bridge lacks direct connectivity, set
-        # VM_VLAN=-1;VM_DEV=invalid in localrc
-        iniset /$Q_PLUGIN_CONF_FILE OVS integration_bridge $FLAT_NETWORK_BRIDGE
-
-        # The ovs agent needs to ensure that the ports associated with
-        # a given network share the same local vlan tag.  On
-        # single-node XS/XCP, this requires monitoring both the dom0
-        # bridge, where VM's are attached, and the domU bridge, where
-        # dhcp servers are attached.
-        if is_service_enabled q-dhcp; then
-            iniset /$Q_PLUGIN_CONF_FILE OVS domu_integration_bridge $OVS_BRIDGE
-            # DomU will use the regular rootwrap
-            iniset /$Q_PLUGIN_CONF_FILE AGENT domu_root_helper "$Q_RR_COMMAND"
-            # Plug the vm interface into the domU integration bridge.
-            sudo ip addr flush dev $GUEST_INTERFACE_DEFAULT
-            sudo ip link set $OVS_BRIDGE up
-            # Assign the VM IP only if it has been set explicitly
-            if [[ "$VM_IP" != "" ]]; then
-                sudo ip addr add $VM_IP dev $OVS_BRIDGE
-            fi
-            sudo ovs-vsctl add-port $OVS_BRIDGE $GUEST_INTERFACE_DEFAULT
-        fi
-    fi
-}
-
 function quantum_plugin_configure_service() {
     if [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type gre
-        iniset /$Q_PLUGIN_CONF_FILE OVS tunnel_id_ranges $TENANT_TUNNEL_RANGES
+        iniset /$Q_PLUGIN_CONF_FILE ovs tenant_network_type gre
+        iniset /$Q_PLUGIN_CONF_FILE ovs tunnel_id_ranges $TENANT_TUNNEL_RANGES
     elif [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE OVS tenant_network_type vlan
+        iniset /$Q_PLUGIN_CONF_FILE ovs tenant_network_type vlan
     else
         echo "WARNING - The openvswitch plugin is using local tenant networks, with no connectivity between hosts."
     fi
@@ -126,18 +33,23 @@
         fi
     fi
     if [[ "$OVS_VLAN_RANGES" != "" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE OVS network_vlan_ranges $OVS_VLAN_RANGES
+        iniset /$Q_PLUGIN_CONF_FILE ovs network_vlan_ranges $OVS_VLAN_RANGES
     fi
 
     # Enable tunnel networks if selected
     if [[ $OVS_ENABLE_TUNNELING = "True" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True
+        iniset /$Q_PLUGIN_CONF_FILE ovs enable_tunneling True
     fi
-}
 
-function quantum_plugin_setup_interface_driver() {
-    local conf_file=$1
-    iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver
+    _quantum_ovs_base_configure_firewall_driver
+
+    # Define extra "OVS" configuration options when q-svc is configured by defining
+    # the array ``Q_SRV_EXTRA_OPTS``.
+    # For Example: ``Q_SRV_EXTRA_OPTS=(foo=true bar=2)``
+    for I in "${Q_SRV_EXTRA_OPTS[@]}"; do
+        # Replace the first '=' with ' ' for iniset syntax
+        iniset /$Q_PLUGIN_CONF_FILE ovs ${I/=/ }
+    done
 }
 
 function has_quantum_plugin_security_group() {
diff --git a/lib/quantum_plugins/openvswitch_agent b/lib/quantum_plugins/openvswitch_agent
new file mode 100644
index 0000000..608c3ea
--- /dev/null
+++ b/lib/quantum_plugins/openvswitch_agent
@@ -0,0 +1,131 @@
+# Quantum Open vSwitch L2 agent
+# -----------------------------
+
+# Save trace setting
+PLUGIN_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+source $TOP_DIR/lib/quantum_plugins/ovs_base
+
+function quantum_plugin_create_nova_conf() {
+    _quantum_ovs_base_configure_nova_vif_driver
+    if [ "$VIRT_DRIVER" = 'xenserver' ]; then
+        iniset $NOVA_CONF DEFAULT xenapi_vif_driver nova.virt.xenapi.vif.XenAPIOpenVswitchDriver
+        iniset $NOVA_CONF DEFAULT xenapi_ovs_integration_bridge $XEN_INTEGRATION_BRIDGE
+        # Disable nova's firewall so that it does not conflict with quantum
+        iniset $NOVA_CONF DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
+    fi
+}
+
+function quantum_plugin_install_agent_packages() {
+    _quantum_ovs_base_install_agent_packages
+}
+
+function quantum_plugin_configure_debug_command() {
+    _quantum_ovs_base_configure_debug_command
+}
+
+function quantum_plugin_configure_dhcp_agent() {
+    iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager quantum.agent.dhcp_agent.DhcpAgentWithStateReport
+}
+
+function quantum_plugin_configure_l3_agent() {
+    _quantum_ovs_base_configure_l3_agent
+    iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager quantum.agent.l3_agent.L3NATAgentWithStateReport
+}
+
+function quantum_plugin_configure_plugin_agent() {
+    # Setup integration bridge
+    _quantum_ovs_base_setup_bridge $OVS_BRIDGE
+    _quantum_ovs_base_configure_firewall_driver
+
+    # Setup agent for tunneling
+    if [[ "$OVS_ENABLE_TUNNELING" = "True" ]]; then
+        # Verify tunnels are supported
+        # REVISIT - also check kernel module support for GRE and patch ports
+        OVS_VERSION=`ovs-vsctl --version | head -n 1 | grep -E -o "[0-9]+\.[0-9]+"`
+        if [ `vercmp_numbers "$OVS_VERSION" "1.4"` -lt "0" ] && ! is_service_enabled q-svc ; then
+            die $LINENO "You are running OVS version $OVS_VERSION. OVS 1.4+ is required for tunneling between multiple hosts."
+        fi
+        iniset /$Q_PLUGIN_CONF_FILE ovs enable_tunneling True
+        iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $HOST_IP
+    fi
+
+    # Setup physical network bridge mappings.  Override
+    # ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more
+    # complex physical network configurations.
+    if [[ "$OVS_BRIDGE_MAPPINGS" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then
+        OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE
+
+        # Configure bridge manually with physical interface as port for multi-node
+        sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_PHYSICAL_BRIDGE
+    fi
+    if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE ovs bridge_mappings $OVS_BRIDGE_MAPPINGS
+    fi
+    AGENT_BINARY="$QUANTUM_DIR/bin/quantum-openvswitch-agent"
+
+    if [ "$VIRT_DRIVER" = 'xenserver' ]; then
+        # Make a copy of our config for domU
+        sudo cp /$Q_PLUGIN_CONF_FILE "/$Q_PLUGIN_CONF_FILE.domu"
+
+        # Deal with Dom0's L2 Agent:
+        Q_RR_DOM0_COMMAND="$QUANTUM_DIR/bin/quantum-rootwrap-xen-dom0 $Q_RR_CONF_FILE"
+
+        # For now, duplicate the xen configuration already found in nova.conf
+        iniset $Q_RR_CONF_FILE xenapi xenapi_connection_url "$XENAPI_CONNECTION_URL"
+        iniset $Q_RR_CONF_FILE xenapi xenapi_connection_username "$XENAPI_USER"
+        iniset $Q_RR_CONF_FILE xenapi xenapi_connection_password "$XENAPI_PASSWORD"
+
+        # Under XS/XCP, the ovs agent needs to target the dom0
+        # integration bridge.  This is enabled by using a root wrapper
+        # that executes commands on dom0 via a XenAPI plugin.
+        iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_DOM0_COMMAND"
+
+        # Set "physical" mapping
+        iniset /$Q_PLUGIN_CONF_FILE ovs bridge_mappings "physnet1:$FLAT_NETWORK_BRIDGE"
+
+        # XEN_INTEGRATION_BRIDGE is the integration bridge in dom0
+        iniset /$Q_PLUGIN_CONF_FILE ovs integration_bridge $XEN_INTEGRATION_BRIDGE
+
+        # Set up domU's L2 agent:
+
+        # Create a bridge "br-$GUEST_INTERFACE_DEFAULT"
+        sudo ovs-vsctl --no-wait -- --may-exist add-br "br-$GUEST_INTERFACE_DEFAULT"
+        # Add $GUEST_INTERFACE_DEFAULT to that bridge
+        sudo ovs-vsctl add-port "br-$GUEST_INTERFACE_DEFAULT" $GUEST_INTERFACE_DEFAULT
+
+        # Set bridge mappings to "physnet1:br-$GUEST_INTERFACE_DEFAULT"
+        iniset "/$Q_PLUGIN_CONF_FILE.domU" ovs bridge_mappings "physnet1:br-$GUEST_INTERFACE_DEFAULT"
+        # Set integration bridge to domU's
+        iniset "/$Q_PLUGIN_CONF_FILE.domU" ovs integration_bridge $OVS_BRIDGE
+        # Set root wrap
+        iniset "/$Q_PLUGIN_CONF_FILE.domU" agent root_helper "$Q_RR_COMMAND"
+    fi
+    # Define extra "AGENT" configuration options when q-agt is configured by defining
+    # defining the array ``Q_AGENT_EXTRA_AGENT_OPTS``.
+    # For Example: ``Q_AGENT_EXTRA_AGENT_OPTS=(foo=true bar=2)``
+    for I in "${Q_AGENT_EXTRA_AGENT_OPTS[@]}"; do
+        # Replace the first '=' with ' ' for iniset syntax
+        iniset /$Q_PLUGIN_CONF_FILE agent ${I/=/ }
+    done
+    # Define extra "OVS" configuration options when q-agt is configured by defining
+    # defining the array ``Q_AGENT_EXTRA_SRV_OPTS``.
+    # For Example: ``Q_AGENT_EXTRA_SRV_OPTS=(foo=true bar=2)``
+    for I in "${Q_AGENT_EXTRA_SRV_OPTS[@]}"; do
+        # Replace the first '=' with ' ' for iniset syntax
+        iniset /$Q_PLUGIN_CONF_FILE ovs ${I/=/ }
+    done
+}
+
+function quantum_plugin_setup_interface_driver() {
+    local conf_file=$1
+    iniset $conf_file DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver
+}
+
+function quantum_plugin_check_adv_test_requirements() {
+    is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
+}
+
+# Restore xtrace
+$PLUGIN_XTRACE
diff --git a/lib/quantum_plugins/ovs_base b/lib/quantum_plugins/ovs_base
index 2ada0db..646ff4a 100644
--- a/lib/quantum_plugins/ovs_base
+++ b/lib/quantum_plugins/ovs_base
@@ -5,6 +5,9 @@
 MY_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
+OVS_BRIDGE=${OVS_BRIDGE:-br-int}
+PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex}
+
 function is_quantum_ovs_base_plugin() {
     # Yes, we use OVS.
     return 0
@@ -17,6 +20,18 @@
     sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge
 }
 
+function quantum_ovs_base_cleanup() {
+    # remove all OVS ports that look like Quantum created ports
+    for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do
+        sudo ovs-vsctl del-port ${port}
+    done
+
+    # remove all OVS bridges created by Quantum
+    for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${OVS_BRIDGE} -e ${PUBLIC_BRIDGE}); do
+        sudo ovs-vsctl del-br ${bridge}
+    done
+}
+
 function _quantum_ovs_base_install_agent_packages() {
     local kernel_version
     # Install deps
@@ -41,9 +56,9 @@
 
 function _quantum_ovs_base_configure_firewall_driver() {
     if [[ "$Q_USE_SECGROUP" == "True" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
+        iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver quantum.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
     else
-        iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver quantum.agent.firewall.NoopFirewallDriver
+        iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver quantum.agent.firewall.NoopFirewallDriver
     fi
 }
 
diff --git a/lib/quantum_plugins/plumgrid b/lib/quantum_plugins/plumgrid
index b49aa92..dde18c8 100644
--- a/lib/quantum_plugins/plumgrid
+++ b/lib/quantum_plugins/plumgrid
@@ -25,13 +25,18 @@
 }
 
 function quantum_plugin_configure_service() {
-    iniset /$Q_PLUGIN_CONF_FILE PLUMgridNOS nos_server localhost
-    iniset /$Q_PLUGIN_CONF_FILE PLUMgridNOS nos_server_port 7766
+    PLUMGRID_NOS_IP=${PLUMGRID_NOS_IP:-localhost}
+    PLUMGRID_NOS_PORT=${PLUMGRID_NOS_PORT:-7766}
+    iniset /$Q_PLUGIN_CONF_FILE plumgridnos nos_server $PLUMGRID_NOS_IP
+    iniset /$Q_PLUGIN_CONF_FILE plumgridnos nos_server_port $PLUMGRID_NOS_PORT
 }
 
 function quantum_plugin_configure_debug_command() {
     :
 }
 
+function quantum_plugin_check_adv_test_requirements() {
+    is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
+}
 # Restore xtrace
 $MY_XTRACE
diff --git a/lib/quantum_plugins/ryu b/lib/quantum_plugins/ryu
index 1139232..53c4f41 100644
--- a/lib/quantum_plugins/ryu
+++ b/lib/quantum_plugins/ryu
@@ -45,19 +45,20 @@
 
 function quantum_plugin_configure_plugin_agent() {
     # Set up integration bridge
-    OVS_BRIDGE=${OVS_BRIDGE:-br-int}
     _quantum_ovs_base_setup_bridge $OVS_BRIDGE
     if [ -n "$RYU_INTERNAL_INTERFACE" ]; then
         sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_BRIDGE $RYU_INTERNAL_INTERFACE
     fi
-    iniset /$Q_PLUGIN_CONF_FILE OVS integration_bridge $OVS_BRIDGE
+    iniset /$Q_PLUGIN_CONF_FILE ovs integration_bridge $OVS_BRIDGE
     AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/ryu/agent/ryu_quantum_agent.py"
 
     _quantum_ovs_base_configure_firewall_driver
 }
 
 function quantum_plugin_configure_service() {
-    iniset /$Q_PLUGIN_CONF_FILE OVS openflow_rest_api $RYU_API_HOST:$RYU_API_PORT
+    iniset /$Q_PLUGIN_CONF_FILE ovs openflow_rest_api $RYU_API_HOST:$RYU_API_PORT
+
+    _quantum_ovs_base_configure_firewall_driver
 }
 
 function quantum_plugin_setup_interface_driver() {
@@ -71,5 +72,9 @@
     return 0
 }
 
+function quantum_plugin_check_adv_test_requirements() {
+    is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
+}
+
 # Restore xtrace
 $MY_XTRACE
diff --git a/lib/quantum_plugins/services/agent_loadbalancer b/lib/quantum_plugins/services/loadbalancer
similarity index 66%
rename from lib/quantum_plugins/services/agent_loadbalancer
rename to lib/quantum_plugins/services/loadbalancer
index b6528b0..ac8501f 100644
--- a/lib/quantum_plugins/services/agent_loadbalancer
+++ b/lib/quantum_plugins/services/loadbalancer
@@ -7,7 +7,7 @@
 
 
 AGENT_LBAAS_BINARY="$QUANTUM_DIR/bin/quantum-lbaas-agent"
-AGENT_LBAAS_PLUGIN=quantum.plugins.services.agent_loadbalancer.plugin.LoadBalancerPlugin
+LBAAS_PLUGIN=quantum.services.loadbalancer.plugin.LoadBalancerPlugin
 
 function quantum_agent_lbaas_install_agent_packages() {
     if is_ubuntu || is_fedora; then
@@ -20,20 +20,25 @@
 
 function quantum_agent_lbaas_configure_common() {
     if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then
-        Q_SERVICE_PLUGIN_CLASSES=$AGENT_LBAAS_PLUGIN
+        Q_SERVICE_PLUGIN_CLASSES=$LBAAS_PLUGIN
     else
-        Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$AGENT_LBAAS_PLUGIN"
+        Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$LBAAS_PLUGIN"
     fi
 }
 
 function quantum_agent_lbaas_configure_agent() {
-    LBAAS_AGENT_CONF_PATH=/etc/quantum/plugins/services/agent_loadbalancer
+    LBAAS_AGENT_CONF_PATH=/etc/quantum/services/loadbalancer/haproxy
     mkdir -p $LBAAS_AGENT_CONF_PATH
 
     LBAAS_AGENT_CONF_FILENAME="$LBAAS_AGENT_CONF_PATH/lbaas_agent.ini"
 
     cp $QUANTUM_DIR/etc/lbaas_agent.ini $LBAAS_AGENT_CONF_FILENAME
 
+    iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT use_namespaces $Q_USE_NAMESPACE
+    # ovs_use_veth needs to be set before the plugin configuration
+    # occurs to allow plugins to override the setting.
+    iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT ovs_use_veth $Q_OVS_USE_VETH
+
     quantum_plugin_setup_interface_driver $LBAAS_AGENT_CONF_FILENAME
 
     if is_fedora; then
diff --git a/lib/quantum_thirdparty/bigswitch_floodlight b/lib/quantum_thirdparty/bigswitch_floodlight
index 60e3924..385bd0d 100644
--- a/lib/quantum_thirdparty/bigswitch_floodlight
+++ b/lib/quantum_thirdparty/bigswitch_floodlight
@@ -7,7 +7,6 @@
 
 BS_FL_CONTROLLERS_PORT=${BS_FL_CONTROLLERS_PORT:-localhost:80}
 BS_FL_OF_PORT=${BS_FL_OF_PORT:-6633}
-OVS_BRIDGE=${OVS_BRIDGE:-br-int}
 
 function configure_bigswitch_floodlight() {
     :
diff --git a/lib/quantum_thirdparty/nicira b/lib/quantum_thirdparty/nicira
new file mode 100644
index 0000000..5a20934
--- /dev/null
+++ b/lib/quantum_thirdparty/nicira
@@ -0,0 +1,52 @@
+# Nicira NVP
+# ----------
+
+# This third-party addition can be used to configure connectivity between a DevStack instance
+# and an NVP Gateway in dev/test environments. In order to use this correctly, the following
+# env variables need to be set (e.g. in your localrc file):
+#
+# * enable_service nicira            --> to execute this third-party addition
+# * PUBLIC_BRIDGE                    --> bridge used for external connectivity, typically br-ex
+# * NVP_GATEWAY_NETWORK_INTERFACE    --> interface used to communicate with the NVP Gateway
+# * NVP_GATEWAY_NETWORK_CIDR         --> CIDR to configure br-ex, e.g. 172.24.4.211/24
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+# This is the interface that connects the Devstack instance
+# to an network that allows it to talk to the gateway for
+# testing purposes
+NVP_GATEWAY_NETWORK_INTERFACE=${NVP_GATEWAY_NETWORK_INTERFACE:-eth2}
+
+function configure_nicira() {
+    :
+}
+
+function init_nicira() {
+    die_if_not_set $LINENO NVP_GATEWAY_NETWORK_CIDR "Please, specify CIDR for the gateway network interface."
+    # Make sure the interface is up, but not configured
+    sudo ifconfig $NVP_GATEWAY_NETWORK_INTERFACE up
+    sudo ip addr flush $NVP_GATEWAY_NETWORK_INTERFACE
+    # Use the PUBLIC Bridge to route traffic to the NVP gateway
+    # NOTE(armando-migliaccio): if running in a nested environment this will work
+    # only with mac learning enabled, portsecurity and security profiles disabled
+    sudo ovs-vsctl -- --may-exist add-port $PUBLIC_BRIDGE $NVP_GATEWAY_NETWORK_INTERFACE
+    nvp_gw_net_if_mac=$(ip link show $NVP_GATEWAY_NETWORK_INTERFACE | awk '/ether/ {print $2}')
+    sudo ifconfig $PUBLIC_BRIDGE $NVP_GATEWAY_NETWORK_CIDR hw ether $nvp_gw_net_if_mac
+}
+
+function install_nicira() {
+    :
+}
+
+function start_nicira() {
+    :
+}
+
+function stop_nicira() {
+    :
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/rpc_backend b/lib/rpc_backend
index d08cb01..4b04053 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -17,12 +17,26 @@
 XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
-# Entry Points
-# ------------
+
+# Functions
+# ---------
+
 
 # Make sure we only have one rpc backend enabled.
 # Also check the specified rpc backend is available on your platform.
 function check_rpc_backend() {
+    local rpc_needed=1
+    # We rely on the fact that filenames in lib/* match the service names
+    # that can be passed as arguments to is_service_enabled.
+    # We check for a call to iniset_rpc_backend in these files, meaning
+    # the service needs a backend.
+    rpc_candidates=$(grep -rl iniset_rpc_backend $TOP_DIR/lib/ | awk -F/ '{print $NF}')
+    for c in ${rpc_candidates}; do
+        if is_service_enabled $c; then
+            rpc_needed=0
+            break
+        fi
+    done
     local rpc_backend_cnt=0
     for svc in qpid zeromq rabbit; do
         is_service_enabled $svc &&
@@ -32,7 +46,7 @@
         echo "ERROR: only one rpc backend may be enabled,"
         echo "       set only one of 'rabbit', 'qpid', 'zeromq'"
         echo "       via ENABLED_SERVICES."
-    elif [ "$rpc_backend_cnt" == 0 ]; then
+    elif [ "$rpc_backend_cnt" == 0 ] && [ "$rpc_needed" == 0 ]; then
         echo "ERROR: at least one rpc backend must be enabled,"
         echo "       set one of 'rabbit', 'qpid', 'zeromq'"
         echo "       via ENABLED_SERVICES."
@@ -56,7 +70,7 @@
         fi
     elif is_service_enabled qpid; then
         if is_fedora; then
-            uninstall_package qpid-cpp-server-daemon
+            uninstall_package qpid-cpp-server
         elif is_ubuntu; then
             uninstall_package qpidd
         else
@@ -64,14 +78,18 @@
         fi
     elif is_service_enabled zeromq; then
         if is_fedora; then
-            uninstall_package zeromq python-zmq
+            uninstall_package zeromq python-zmq redis
         elif is_ubuntu; then
-            uninstall_package libzmq1 python-zmq
+            uninstall_package libzmq1 python-zmq redis-server
         elif is_suse; then
-            uninstall_package libzmq1 python-pyzmq
+            uninstall_package libzmq1 python-pyzmq redis
         else
             exit_distro_not_supported "zeromq installation"
         fi
+
+        # Necessary directory for socket location.
+        sudo mkdir -p /var/run/openstack
+        sudo chown $STACK_USER /var/run/openstack
     fi
 }
 
@@ -86,7 +104,13 @@
         rm -f "$tfile"
     elif is_service_enabled qpid; then
         if is_fedora; then
-            install_package qpid-cpp-server-daemon
+            install_package qpid-cpp-server
+            if [[ $DISTRO =~ (rhel6) ]]; then
+               # RHEL6 leaves "auth=yes" in /etc/qpidd.conf, it needs to
+               # be no or you get GSS authentication errors as it
+               # attempts to default to this.
+                sudo sed -i.bak 's/^auth=yes$/auth=no/' /etc/qpidd.conf
+            fi
         elif is_ubuntu; then
             install_package qpidd
             sudo sed -i '/PLAIN/!s/mech_list: /mech_list: PLAIN /' /etc/sasl2/qpidd.conf
@@ -95,12 +119,15 @@
             exit_distro_not_supported "qpid installation"
         fi
     elif is_service_enabled zeromq; then
+        # NOTE(ewindisch): Redis is not strictly necessary
+        # but there is a matchmaker driver that works
+        # really well & out of the box for multi-node.
         if is_fedora; then
-            install_package zeromq python-zmq
+            install_package zeromq python-zmq redis
         elif is_ubuntu; then
-            install_package libzmq1 python-zmq
+            install_package libzmq1 python-zmq redis-server
         elif is_suse; then
-            install_package libzmq1 python-pyzmq
+            install_package libzmq1 python-pyzmq redis
         else
             exit_distro_not_supported "zeromq installation"
         fi
@@ -118,6 +145,13 @@
         fi
         # change the rabbit password since the default is "guest"
         sudo rabbitmqctl change_password guest $RABBIT_PASSWORD
+        if is_service_enabled n-cell; then
+            # Add partitioned access for the child cell
+            if [ -z `sudo rabbitmqctl list_vhosts | grep child_cell` ]; then
+                sudo rabbitmqctl add_vhost child_cell
+                sudo rabbitmqctl set_permissions -p child_cell guest ".*" ".*" ".*"
+            fi
+        fi
     elif is_service_enabled qpid; then
         echo_summary "Starting qpid"
         restart_service qpidd
@@ -131,6 +165,11 @@
     local section=$3
     if is_service_enabled zeromq; then
         iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_zmq
+        iniset $file $section rpc_zmq_matchmaker \
+            ${package}.openstack.common.rpc.matchmaker_redis.MatchMakerRedis
+        # Set MATCHMAKER_REDIS_HOST if running multi-node.
+        MATCHMAKER_REDIS_HOST=${MATCHMAKER_REDIS_HOST:-127.0.0.1}
+        iniset $file matchmaker_redis host $MATCHMAKER_REDIS_HOST
     elif is_service_enabled qpid; then
         iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_qpid
         if is_ubuntu; then
@@ -157,6 +196,7 @@
     ( ! ([[ "$DISTRO" = "oneiric" ]] || is_suse) )
 }
 
+
 # Restore xtrace
 $XTRACE
 
diff --git a/lib/swift b/lib/swift
index eb57477..36bca4c 100644
--- a/lib/swift
+++ b/lib/swift
@@ -84,8 +84,8 @@
 ACCOUNT_PORT_BASE=${ACCOUNT_PORT_BASE:-6012}
 
 
-# Entry Points
-# ------------
+# Functions
+# ---------
 
 # cleanup_swift() - Remove residual data files
 function cleanup_swift() {
@@ -169,7 +169,7 @@
     # configured keystone it will configure swift with it.
     if is_service_enabled key;then
         if is_service_enabled swift3;then
-            swift_pipeline=" s3token swift3 "
+            swift_pipeline=" swift3 s3token "
         fi
         swift_pipeline+=" authtoken keystoneauth "
     else
diff --git a/lib/tempest b/lib/tempest
index c1dc3a3..8b4ae0e 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -3,7 +3,7 @@
 
 # Dependencies:
 # ``functions`` file
-# ``lib/nova`` service is runing
+# ``lib/nova`` service is running
 # <list other global vars that are assumed to be defined>
 # - ``DEST``, ``FILES``
 # - ``ADMIN_PASSWORD``
@@ -23,6 +23,7 @@
 # ``USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION``
 # ``DEFAULT_INSTANCE_TYPE``
 # ``DEFAULT_INSTANCE_USER``
+# ``CINDER_MULTI_LVM_BACKEND``
 # ``stack.sh`` calls the entry points in this order:
 #
 # install_tempest
@@ -44,14 +45,15 @@
 
 NOVA_SOURCE_DIR=$DEST/nova
 
-BUILD_INTERVAL=3
+BUILD_INTERVAL=1
 BUILD_TIMEOUT=400
 
 
 BOTO_MATERIALS_PATH="$FILES/images/s3-materials/cirros-0.3.1"
 
-# Entry Points
-# ------------
+
+# Functions
+# ---------
 
 # configure_tempest() - Set config files, create data dirs, etc
 function configure_tempest() {
@@ -76,7 +78,7 @@
     # sudo python setup.py deploy
 
     # This function exits on an error so that errors don't compound and you see
-    # only the first error that occured.
+    # only the first error that occurred.
     errexit=$(set +o | grep errexit)
     set -o errexit
 
@@ -165,9 +167,15 @@
         fi
         flavor_ref=${flavors[0]}
         flavor_ref_alt=$flavor_ref
-        if [[ $num_flavors -gt 1 ]]; then
-            flavor_ref_alt=${flavors[1]}
-        fi
+
+        # ensure flavor_ref and flavor_ref_alt have different values
+        # some resize instance in tempest tests depends on this.
+        for f in ${flavors[@]:1}; do
+            if [[ $f -ne $flavor_ref ]]; then
+                flavor_ref_alt=$f
+                break
+            fi
+        done
     fi
 
     if [ "$Q_USE_NAMESPACE" != "False" ]; then
@@ -204,6 +212,13 @@
     iniset $TEMPEST_CONF identity alt_tenant_name $ALT_TENANT_NAME
     iniset $TEMPEST_CONF identity admin_password "$password"
 
+    # Image
+    # for the gate we want to be able to override this variable so we aren't
+    # doing an HTTP fetch over the wide internet for this test
+    if [[ ! -z "$TEMPEST_HTTP_IMAGE" ]]; then
+        iniset $TEMPEST_CONF image http_image $TEMPEST_HTTP_IMAGE
+    fi
+
     # Compute
     iniset $TEMPEST_CONF compute change_password_available False
     # Note(nati) current tempest don't create network for each tenant
@@ -233,11 +248,10 @@
     iniset $TEMPEST_CONF whitebox path_to_private_key $TEMPEST_DIR/id_rsa
     iniset $TEMPEST_CONF whitebox db_uri $BASE_SQL_CONN/nova
 
-
-    # compute admin
+    # Compute admin
     iniset $TEMPEST_CONF "compute-admin" password "$password" # DEPRECATED
 
-    # network
+    # Network
     if is_service_enabled quantum; then
         iniset $TEMPEST_CONF network quantum_available "True"
     fi
@@ -246,7 +260,7 @@
     iniset $TEMPEST_CONF network public_network_id "$public_network_id"
     iniset $TEMPEST_CONF network public_router_id "$public_router_id"
 
-    #boto
+    # boto
     iniset $TEMPEST_CONF boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud"
     iniset $TEMPEST_CONF boto s3_url "http://$SERVICE_HOST:${S3_SERVICE_PORT:-3333}"
     iniset $TEMPEST_CONF boto s3_materials_path "$BOTO_MATERIALS_PATH"
@@ -254,6 +268,22 @@
     iniset $TEMPEST_CONF boto http_socket_timeout 30
     iniset $TEMPEST_CONF boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros}
 
+    # Orchestration
+    if is_service_enabled heat; then
+        iniset $TEMPEST_CONF orchestration heat_available "True"
+    fi
+
+    # Volume
+    CINDER_MULTI_LVM_BACKEND=$(trueorfalse False $CINDER_MULTI_LVM_BACKEND)
+    if [ $CINDER_MULTI_LVM_BACKEND == "True" ]; then
+        iniset $TEMPEST_CONF volume multi_backend_enabled "True"
+        iniset $TEMPEST_CONF volume backend1_name "LVM_iSCSI"
+        iniset $TEMPEST_CONF volume backend2_name "LVM_iSCSI_2"
+    fi
+
+    # cli
+    iniset $TEMPEST_CONF cli cli_dir $NOVA_BIN_DIR
+
     echo "Created tempest configuration file:"
     cat $TEMPEST_CONF
 
diff --git a/lib/tls b/lib/tls
index fb8f4b9..f7dcffa 100644
--- a/lib/tls
+++ b/lib/tls
@@ -21,6 +21,9 @@
 # start_tls_proxy HOST_IP 5000 localhost 5000
 
 
+# Defaults
+# --------
+
 if is_service_enabled tls-proxy; then
     # TODO(dtroyer): revisit this below after the search for HOST_IP has been done
     TLS_IP=${TLS_IP:-$SERVICE_IP}
@@ -317,6 +320,7 @@
     stud $STUD_PROTO -f $f_host,$f_port -b $b_host,$b_port $DEVSTACK_CERT 2>/dev/null
 }
 
+
 # Local variables:
 # mode: shell-script
 # End:
diff --git a/openrc b/openrc
index 8af2854..adf92b9 100644
--- a/openrc
+++ b/openrc
@@ -37,7 +37,7 @@
 # The introduction of Keystone to the OpenStack ecosystem has standardized the
 # term **tenant** as the entity that owns resources.  In some places references
 # still exist to the original Nova term **project** for this use.  Also,
-# **tenant_name** is prefered to **tenant_id**.
+# **tenant_name** is preferred to **tenant_id**.
 export OS_TENANT_NAME=${OS_TENANT_NAME:-demo}
 
 # In addition to the owning entity (tenant), nova stores the entity performing
@@ -80,7 +80,3 @@
 export NOVA_VERSION=${NOVA_VERSION:-1.1}
 # In the future this will change names:
 export COMPUTE_API_VERSION=${COMPUTE_API_VERSION:-$NOVA_VERSION}
-
-# set log level to DEBUG (helps debug issues)
-# export KEYSTONECLIENT_DEBUG=1
-# export NOVACLIENT_DEBUG=1
diff --git a/rejoin-stack.sh b/rejoin-stack.sh
index a82c73c..65ba721 100755
--- a/rejoin-stack.sh
+++ b/rejoin-stack.sh
@@ -5,13 +5,19 @@
 
 TOP_DIR=`dirname $0`
 
+# Import common functions in case the localrc (loaded via stackrc)
+# uses them.
+source $TOP_DIR/functions
+
+source $TOP_DIR/stackrc
+
 # if screenrc exists, run screen
 if [[ -e $TOP_DIR/stack-screenrc ]]; then
     if screen -ls | egrep -q "[0-9].stack"; then
         echo "Attaching to already started screen session.."
         exec screen -r stack
     fi
-    exec screen -c $TOP_DIR/stack-screenrc
+    exec screen -c $TOP_DIR/stack-screenrc -S $SCREEN_NAME
 fi
 
 echo "Couldn't find $TOP_DIR/stack-screenrc file; have you run stack.sh yet?"
diff --git a/samples/local.sh b/samples/local.sh
index 5901525..970cbb9 100755
--- a/samples/local.sh
+++ b/samples/local.sh
@@ -1,7 +1,7 @@
 #!/usr/bin/env bash
 
 # Sample ``local.sh`` for user-configurable tasks to run automatically
-# at the sucessful conclusion of ``stack.sh``.
+# at the successful conclusion of ``stack.sh``.
 
 # NOTE: Copy this file to the root ``devstack`` directory for it to
 # work properly.
diff --git a/stack.sh b/stack.sh
index 62309dc..05b53af 100755
--- a/stack.sh
+++ b/stack.sh
@@ -3,7 +3,7 @@
 # ``stack.sh`` is an opinionated OpenStack developer installation.  It
 # installs and configures various combinations of **Ceilometer**, **Cinder**,
 # **Glance**, **Heat**, **Horizon**, **Keystone**, **Nova**, **Quantum**
-# and **Swift**
+# and **Swift**.
 
 # This script allows you to specify configuration options of what git
 # repositories to use, enabled services, network configuration and various
@@ -12,12 +12,17 @@
 # developer install.
 
 # To keep this script simple we assume you are running on a recent **Ubuntu**
-# (11.10 Oneiric or newer) or **Fedora** (F16 or newer) machine.  It
-# should work in a VM or physical server.  Additionally we put the list of
-# ``apt`` and ``rpm`` dependencies and other configuration files in this repo.
+# (12.04 Precise or newer) or **Fedora** (F16 or newer) machine.  (It may work
+# on other platforms but support for those platforms is left to those who added
+# them to DevStack.)  It should work in a VM or physical server.  Additionally
+# we maintain a list of ``apt`` and ``rpm`` dependencies and other configuration
+# files in this repo.
 
 # Learn more and get the most recent version at http://devstack.org
 
+# Make sure custom grep options don't get in the way
+unset GREP_OPTIONS
+
 # Keep track of the devstack directory
 TOP_DIR=$(cd $(dirname "$0") && pwd)
 
@@ -33,15 +38,17 @@
 # Global Settings
 # ===============
 
-# ``stack.sh`` is customizable through setting environment variables.  If you
-# want to override a setting you can set and export it::
+# ``stack.sh`` is customizable by setting environment variables.  Override a
+# default setting via export::
 #
 #     export DATABASE_PASSWORD=anothersecret
 #     ./stack.sh
 #
-# You can also pass options on a single line ``DATABASE_PASSWORD=simple ./stack.sh``
+# or by setting the variable on the command line::
 #
-# Additionally, you can put any local variables into a ``localrc`` file::
+#     DATABASE_PASSWORD=simple ./stack.sh
+#
+# Persistent variables can be placed in a ``localrc`` file::
 #
 #     DATABASE_PASSWORD=anothersecret
 #     DATABASE_USER=hellaroot
@@ -51,8 +58,8 @@
 # be overwritten by a DevStack update.
 #
 # DevStack distributes ``stackrc`` which contains locations for the OpenStack
-# repositories and branches to configure.  ``stackrc`` sources ``localrc`` to
-# allow you to safely override those settings.
+# repositories, branches to configure, and other configuration defaults.
+# ``stackrc`` sources ``localrc`` to allow you to safely override those settings.
 
 if [[ ! -r $TOP_DIR/stackrc ]]; then
     log_error $LINENO "missing $TOP_DIR/stackrc - did you grab more than just stack.sh?"
@@ -78,6 +85,19 @@
     rm $TOP_DIR/.stackenv
 fi
 
+# ``stack.sh`` keeps the list of ``apt`` and ``rpm`` dependencies and config
+# templates and other useful files in the ``files`` subdirectory
+FILES=$TOP_DIR/files
+if [ ! -d $FILES ]; then
+    log_error $LINENO "missing devstack/files"
+fi
+
+# ``stack.sh`` keeps function libraries here
+# Make sure ``$TOP_DIR/lib`` directory is present
+if [ ! -d $TOP_DIR/lib ]; then
+    log_error $LINENO "missing devstack/lib"
+fi
+
 # Import common services (database, message queue) configuration
 source $TOP_DIR/lib/database
 source $TOP_DIR/lib/rpc_backend
@@ -89,7 +109,7 @@
 
 # Warn users who aren't on an explicitly supported distro, but allow them to
 # override check and attempt installation with ``FORCE=yes ./stack``
-if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17|f18|opensuse-12.2) ]]; then
+if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|saucy|7.0|wheezy|sid|testing|jessie|f16|f17|f18|f19|opensuse-12.2|rhel6) ]]; then
     echo "WARNING: this script has not been tested on $DISTRO"
     if [[ "$FORCE" != "yes" ]]; then
         die $LINENO "If you wish to run this script anyway run with FORCE=yes"
@@ -100,21 +120,8 @@
 # and the specified rpc backend is available on your platform.
 check_rpc_backend
 
-# ``stack.sh`` keeps function libraries here
-# Make sure ``$TOP_DIR/lib`` directory is present
-if [ ! -d $TOP_DIR/lib ]; then
-    log_error $LINENO "missing devstack/lib"
-fi
-
-# ``stack.sh`` keeps the list of ``apt`` and ``rpm`` dependencies and config
-# templates and other useful files in the ``files`` subdirectory
-FILES=$TOP_DIR/files
-if [ ! -d $FILES ]; then
-    log_error $LINENO "missing devstack/files"
-fi
-
-SCREEN_NAME=${SCREEN_NAME:-stack}
 # Check to see if we are already running DevStack
+# Note that this may fail if USE_SCREEN=False
 if type -p screen >/dev/null && screen -ls | egrep -q "[0-9].$SCREEN_NAME"; then
     echo "You are already running a stack.sh session."
     echo "To rejoin this session type 'screen -x stack'."
@@ -126,6 +133,41 @@
 VERBOSE=$(trueorfalse True $VERBOSE)
 
 
+# Additional repos
+# ================
+
+# Some distros need to add repos beyond the defaults provided by the vendor
+# to pick up required packages.
+
+# The Debian Wheezy official repositories do not contain all required packages,
+# add gplhost repository.
+if [[ "$os_VENDOR" =~ (Debian) ]]; then
+    echo 'deb http://archive.gplhost.com/debian grizzly main' | sudo tee /etc/apt/sources.list.d/gplhost_wheezy-backports.list
+    echo 'deb http://archive.gplhost.com/debian grizzly-backports main' | sudo tee -a /etc/apt/sources.list.d/gplhost_wheezy-backports.list
+    apt_get update
+    apt_get install --force-yes gplhost-archive-keyring
+fi
+
+if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then
+    # Installing Open vSwitch on RHEL6 requires enabling the RDO repo.
+    RHEL6_RDO_REPO_RPM=${RHEL6_RDO_REPO_RPM:-"http://rdo.fedorapeople.org/openstack/openstack-grizzly/rdo-release-grizzly-3.noarch.rpm"}
+    RHEL6_RDO_REPO_ID=${RHEL6_RDO_REPO_ID:-"openstack-grizzly"}
+    if ! yum repolist enabled $RHEL6_RDO_REPO_ID | grep -q $RHEL6_RDO_REPO_ID; then
+        echo "RDO repo not detected; installing"
+        yum_install $RHEL6_RDO_REPO_RPM || \
+            die $LINENO "Error installing RDO repo, cannot continue"
+    fi
+
+    # RHEL6 requires EPEL for many Open Stack dependencies
+    RHEL6_EPEL_RPM=${RHEL6_EPEL_RPM:-"http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm"}
+    if ! yum repolist enabled epel | grep -q 'epel'; then
+        echo "EPEL not detected; installing"
+        yum_install ${RHEL6_EPEL_RPM} || \
+            die $LINENO "Error installing EPEL repo, cannot continue"
+    fi
+fi
+
+
 # root Access
 # -----------
 
@@ -195,6 +237,9 @@
 sudo mkdir -p $DEST
 sudo chown -R $STACK_USER $DEST
 
+# a basic test for $DEST path permissions (fatal on error unless skipped)
+check_path_perm_sanity ${DEST}
+
 # Set ``OFFLINE`` to ``True`` to configure ``stack.sh`` to run cleanly without
 # Internet access. ``stack.sh`` must have been previously run with Internet
 # access to install prerequisites and fetch repositories.
@@ -221,7 +266,6 @@
 FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.224/28}
 FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24}
 FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256}
-NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1}
 
 HOST_IP=$(get_default_host_ip $FIXED_RANGE $FLOATING_RANGE "$HOST_IP_IFACE" "$HOST_IP")
 if [ "$HOST_IP" == "" ]; then
@@ -230,6 +274,8 @@
 
 # Allow the use of an alternate hostname (such as localhost/127.0.0.1) for service endpoints.
 SERVICE_HOST=${SERVICE_HOST:-$HOST_IP}
+
+# Allow the use of an alternate protocol (such as https) for service endpoints
 SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http}
 
 # Configure services to use syslog instead of writing to individual log files
@@ -241,7 +287,6 @@
 SYSSTAT_FILE=${SYSSTAT_FILE:-"sysstat.dat"}
 SYSSTAT_INTERVAL=${SYSSTAT_INTERVAL:-"1"}
 
-
 # Use color for logging output (only available if syslog is not used)
 LOG_COLOR=`trueorfalse True $LOG_COLOR`
 
@@ -252,7 +297,7 @@
 # Configure Projects
 # ==================
 
-# Get project function libraries
+# Source project function libraries
 source $TOP_DIR/lib/tls
 source $TOP_DIR/lib/horizon
 source $TOP_DIR/lib/keystone
@@ -266,9 +311,9 @@
 source $TOP_DIR/lib/baremetal
 source $TOP_DIR/lib/ldap
 
-# Set the destination directories for OpenStack projects
-HORIZON_DIR=$DEST/horizon
+# Set the destination directories for other OpenStack projects
 OPENSTACKCLIENT_DIR=$DEST/python-openstackclient
+PBR_DIR=$DEST/pbr
 
 
 # Interactive Configuration
@@ -538,14 +583,66 @@
     install_database
 fi
 
-if is_service_enabled q-agt; then
+if is_service_enabled quantum; then
     install_quantum_agent_packages
 fi
 
+
+# System-specific preconfigure
+# ============================
+
+if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then
+    # Disable selinux to avoid configuring to allow Apache access
+    # to Horizon files or run nodejs (LP#1175444)
+    if selinuxenabled; then
+        sudo setenforce 0
+    fi
+
+    # An old version of ``python-crypto`` (2.0.1) may be installed on a
+    # fresh system via Anaconda and the dependency chain
+    # ``cas`` -> ``python-paramiko`` -> ``python-crypto``.
+    # ``pip uninstall pycrypto`` will remove the packaged ``.egg-info`` file
+    # but leave most of the actual library files behind in ``/usr/lib64/python2.6/Crypto``.
+    # Later ``pip install pycrypto`` will install over the packaged files resulting
+    # in a useless mess of old, rpm-packaged files and pip-installed files.
+    # Remove the package so that ``pip install python-crypto`` installs cleanly.
+    # Note: other RPM packages may require ``python-crypto`` as well.  For example,
+    # RHEL6 does not install ``python-paramiko packages``.
+    uninstall_package python-crypto
+
+    # A similar situation occurs with ``python-lxml``, which is required by
+    # ``ipa-client``, an auditing package we don't care about.  The
+    # build-dependencies needed for ``pip install lxml`` (``gcc``,
+    # ``libxml2-dev`` and ``libxslt-dev``) are present in ``files/rpms/general``.
+    uninstall_package python-lxml
+
+    # If the ``dbus`` package was installed by DevStack dependencies the
+    # uuid may not be generated because the service was never started (PR#598200),
+    # causing Nova to stop later on complaining that ``/var/lib/dbus/machine-id``
+    # does not exist.
+    sudo service messagebus restart
+
+    # ``setup.py`` contains a ``setup_requires`` package that is supposed
+    # to be transient.  However, RHEL6 distribute has a bug where
+    # ``setup_requires`` registers entry points that are not cleaned
+    # out properly after the setup-phase resulting in installation failures
+    # (bz#924038).  Pre-install the problem package so the ``setup_requires``
+    # dependency is satisfied and it will not be installed transiently.
+    # Note we do this before the track-depends below.
+    pip_install hgtools
+
+    # RHEL6's version of ``python-nose`` is incompatible with Tempest.
+    # Install nose 1.1 (Tempest-compatible) from EPEL
+    install_package python-nose1.1
+    # Add a symlink for the new nosetests to allow tox for Tempest to
+    # work unmolested.
+    sudo ln -sf /usr/bin/nosetests1.1 /usr/local/bin/nosetests
+fi
+
 TRACK_DEPENDS=${TRACK_DEPENDS:-False}
 
 # Install python packages into a virtualenv so that we can track them
-if [[ $TRACK_DEPENDS = True ]] ; then
+if [[ $TRACK_DEPENDS = True ]]; then
     echo_summary "Installing Python packages into a virtualenv $DEST/.venv"
     install_package python-virtualenv
 
@@ -555,12 +652,15 @@
     $DEST/.venv/bin/pip freeze > $DEST/requires-pre-pip
 fi
 
-
 # Check Out and Install Source
 # ----------------------------
 
 echo_summary "Installing OpenStack project source"
 
+# Install pbr
+git_clone $PBR_REPO $PBR_DIR $PBR_BRANCH
+setup_develop $PBR_DIR
+
 # Install clients libraries
 install_keystoneclient
 install_glanceclient
@@ -585,8 +685,10 @@
     install_swift
     configure_swift
 
+    # swift3 middleware to provide S3 emulation to Swift
     if is_service_enabled swift3; then
-        # swift3 middleware to provide S3 emulation to Swift
+        # replace the nova-objectstore port by the swift port
+        S3_SERVICE_PORT=8080
         git_clone $SWIFT3_REPO $SWIFT3_DIR $SWIFT3_BRANCH
         setup_develop $SWIFT3_DIR
     fi
@@ -639,6 +741,7 @@
 if is_service_enabled heat; then
     install_heat
     install_heatclient
+    cleanup_heat
     configure_heat
     configure_heatclient
 fi
@@ -651,9 +754,9 @@
     # don't be naive and add to existing line!
 fi
 
-if [[ $TRACK_DEPENDS = True ]] ; then
+if [[ $TRACK_DEPENDS = True ]]; then
     $DEST/.venv/bin/pip freeze > $DEST/requires-post-pip
-    if ! diff -Nru $DEST/requires-pre-pip $DEST/requires-post-pip > $DEST/requires.diff ; then
+    if ! diff -Nru $DEST/requires-pre-pip $DEST/requires-post-pip > $DEST/requires.diff; then
         cat $DEST/requires.diff
     fi
     echo "Ran stack.sh in depend tracking mode, bailing out now"
@@ -679,6 +782,22 @@
 EOF
         sudo mv /tmp/90-stack-s.conf /etc/rsyslog.d
     fi
+
+    RSYSLOGCONF="/etc/rsyslog.conf"
+    if [ -f $RSYSLOGCONF ]; then
+        sudo cp -b $RSYSLOGCONF $RSYSLOGCONF.bak
+        if [[ $(grep '$SystemLogRateLimitBurst' $RSYSLOGCONF)  ]]; then
+            sudo sed -i 's/$SystemLogRateLimitBurst\ .*/$SystemLogRateLimitBurst\ 0/' $RSYSLOGCONF
+        else
+            sudo sed -i '$ i $SystemLogRateLimitBurst\ 0' $RSYSLOGCONF
+        fi
+        if [[ $(grep '$SystemLogRateLimitInterval' $RSYSLOGCONF)  ]]; then
+            sudo sed -i 's/$SystemLogRateLimitInterval\ .*/$SystemLogRateLimitInterval\ 0/' $RSYSLOGCONF
+        else
+            sudo sed -i '$ i $SystemLogRateLimitInterval\ 0' $RSYSLOGCONF
+        fi
+    fi
+
     echo_summary "Starting rsyslog"
     restart_service rsyslog
 fi
@@ -711,6 +830,7 @@
         SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})'
     fi
     screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS"
+    screen -r $SCREEN_NAME -X setenv PROMPT_COMMAND /bin/true
 fi
 
 # Clear screen rc file
@@ -719,14 +839,14 @@
     echo -n > $SCREENRC
 fi
 
-
 # Initialize the directory for service status check
 init_service_check
 
-# Kick off Sysstat
-# ------------------------
-# run sysstat if it is enabled, this has to be early as daemon
-# startup is one of the things to track.
+
+# Sysstat
+# -------
+
+# If enabled, systat has to start early to track OpenStack service startup.
 if is_service_enabled sysstat;then
     if [[ -n ${SCREEN_LOGDIR} ]]; then
         screen_it sysstat "sar -o $SCREEN_LOGDIR/$SYSSTAT_FILE $SYSSTAT_INTERVAL"
@@ -735,6 +855,7 @@
     fi
 fi
 
+
 # Keystone
 # --------
 
@@ -839,7 +960,7 @@
     rm -rf ${NOVA_STATE_PATH}/networks
     sudo mkdir -p ${NOVA_STATE_PATH}/networks
     sudo chown -R ${USER} ${NOVA_STATE_PATH}/networks
-    # Force IP forwarding on, just on case
+    # Force IP forwarding on, just in case
     sudo sysctl -w net.ipv4.ip_forward=1
 fi
 
@@ -880,10 +1001,11 @@
 
     if [ "$VIRT_DRIVER" = 'xenserver' ]; then
         echo_summary "Using XenServer virtualization driver"
+        if [ -z "$XENAPI_CONNECTION_URL" ]; then
+            die $LINENO "XENAPI_CONNECTION_URL is not specified"
+        fi
         read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN."
         iniset $NOVA_CONF DEFAULT compute_driver "xenapi.XenAPIDriver"
-        XENAPI_CONNECTION_URL=${XENAPI_CONNECTION_URL:-"http://169.254.0.1"}
-        XENAPI_USER=${XENAPI_USER:-"root"}
         iniset $NOVA_CONF DEFAULT xenapi_connection_url "$XENAPI_CONNECTION_URL"
         iniset $NOVA_CONF DEFAULT xenapi_connection_username "$XENAPI_USER"
         iniset $NOVA_CONF DEFAULT xenapi_connection_password "$XENAPI_PASSWORD"
@@ -892,6 +1014,7 @@
         XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"}
         iniset $NOVA_CONF DEFAULT firewall_driver "$XEN_FIREWALL_DRIVER"
 
+
     # OpenVZ
     # ------
 
@@ -902,6 +1025,7 @@
         LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"}
         iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER"
 
+
     # Bare Metal
     # ----------
 
@@ -924,15 +1048,76 @@
            iniset $NOVA_CONF baremetal ${I/=/ }
         done
 
-    # Default
-    # -------
+
+   # PowerVM
+   # -------
+
+    elif [ "$VIRT_DRIVER" = 'powervm' ]; then
+        echo_summary "Using PowerVM driver"
+        POWERVM_MGR_TYPE=${POWERVM_MGR_TYPE:-"ivm"}
+        POWERVM_MGR_HOST=${POWERVM_MGR_HOST:-"powervm.host"}
+        POWERVM_MGR_USER=${POWERVM_MGR_USER:-"padmin"}
+        POWERVM_MGR_PASSWD=${POWERVM_MGR_PASSWD:-"password"}
+        POWERVM_IMG_REMOTE_PATH=${POWERVM_IMG_REMOTE_PATH:-"/tmp"}
+        POWERVM_IMG_LOCAL_PATH=${POWERVM_IMG_LOCAL_PATH:-"/tmp"}
+        iniset $NOVA_CONF DEFAULT compute_driver nova.virt.powervm.PowerVMDriver
+        iniset $NOVA_CONF DEFAULT powervm_mgr_type $POWERVM_MGR_TYPE
+        iniset $NOVA_CONF DEFAULT powervm_mgr $POWERVM_MGR_HOST
+        iniset $NOVA_CONF DEFAULT powervm_mgr_user $POWERVM_MGR_USER
+        iniset $NOVA_CONF DEFAULT powervm_mgr_passwd $POWERVM_MGR_PASSWD
+        iniset $NOVA_CONF DEFAULT powervm_img_remote_path $POWERVM_IMG_REMOTE_PATH
+        iniset $NOVA_CONF DEFAULT powervm_img_local_path $POWERVM_IMG_LOCAL_PATH
+
+
+    # vSphere API
+    # -----------
+
+    elif [ "$VIRT_DRIVER" = 'vsphere' ]; then
+        echo_summary "Using VMware vCenter driver"
+        iniset $NOVA_CONF DEFAULT compute_driver "vmwareapi.VMwareVCDriver"
+        VMWAREAPI_USER=${VMWAREAPI_USER:-"root"}
+        iniset $NOVA_CONF DEFAULT vmwareapi_host_ip "$VMWAREAPI_IP"
+        iniset $NOVA_CONF DEFAULT vmwareapi_host_username "$VMWAREAPI_USER"
+        iniset $NOVA_CONF DEFAULT vmwareapi_host_password "$VMWAREAPI_PASSWORD"
+        iniset $NOVA_CONF DEFAULT vmwareapi_cluster_name "$VMWAREAPI_CLUSTER"
+
+
+    # fake
+    # ----
+
+    elif [ "$VIRT_DRIVER" = 'fake' ]; then
+        echo_summary "Using fake Virt driver"
+        iniset $NOVA_CONF DEFAULT compute_driver "nova.virt.fake.FakeDriver"
+        # Disable arbitrary limits
+        iniset $NOVA_CONF DEFAULT quota_instances -1
+        iniset $NOVA_CONF DEFAULT quota_cores -1
+        iniset $NOVA_CONF DEFAULT quota_ram -1
+        iniset $NOVA_CONF DEFAULT quota_floating_ips -1
+        iniset $NOVA_CONF DEFAULT quota_fixed_ips -1
+        iniset $NOVA_CONF DEFAULT quota_metadata_items -1
+        iniset $NOVA_CONF DEFAULT quota_injected_files -1
+        iniset $NOVA_CONF DEFAULT quota_injected_file_path_bytes -1
+        iniset $NOVA_CONF DEFAULT quota_security_groups -1
+        iniset $NOVA_CONF DEFAULT quota_security_group_rules -1
+        iniset $NOVA_CONF DEFAULT quota_key_pairs -1
+        iniset $NOVA_CONF DEFAULT scheduler_default_filters "RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter"
+
+
+    # Default libvirt
+    # ---------------
 
     else
         echo_summary "Using libvirt virtualization driver"
         iniset $NOVA_CONF DEFAULT compute_driver "libvirt.LibvirtDriver"
         LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"}
         iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER"
+        # Power architecture currently does not support graphical consoles.
+        if is_arch "ppc64"; then
+            iniset $NOVA_CONF DEFAULT vnc_enabled "false"
+        fi
     fi
+
+    init_nova_cells
 fi
 
 # Extra things to prepare nova for baremetal, before nova starts
@@ -993,14 +1178,19 @@
     create_quantum_initial_network
     setup_quantum_debug
 elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then
+    NM_CONF=${NOVA_CONF}
+    if is_service_enabled n-cell; then
+        NM_CONF=${NOVA_CELLS_CONF}
+    fi
+
     # Create a small network
-    $NOVA_BIN_DIR/nova-manage network create "$PRIVATE_NETWORK_NAME" $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS
+    $NOVA_BIN_DIR/nova-manage --config-file $NM_CONF network create "$PRIVATE_NETWORK_NAME" $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS
 
     # Create some floating ips
-    $NOVA_BIN_DIR/nova-manage floating create $FLOATING_RANGE --pool=$PUBLIC_NETWORK_NAME
+    $NOVA_BIN_DIR/nova-manage --config-file $NM_CONF floating create $FLOATING_RANGE --pool=$PUBLIC_NETWORK_NAME
 
     # Create a second pool
-    $NOVA_BIN_DIR/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL
+    $NOVA_BIN_DIR/nova-manage --config-file $NM_CONF floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL
 fi
 
 if is_service_enabled quantum; then
@@ -1111,7 +1301,6 @@
     screen_it baremetal "nova-baremetal-deploy-helper"
 fi
 
-
 # Save some values we generated for later use
 CURRENT_RUN_TIME=$(date "+$TIMESTAMP_FORMAT")
 echo "# $CURRENT_RUN_TIME" >$TOP_DIR/.stackenv
diff --git a/stackrc b/stackrc
index 7c4fa68..49cf026 100644
--- a/stackrc
+++ b/stackrc
@@ -44,137 +44,162 @@
 # be disabled for automated testing by setting this value to False.
 USE_SCREEN=True
 
+# allow local overrides of env variables, including repo config
+if [ -f $RC_DIR/localrc ]; then
+    source $RC_DIR/localrc
+fi
+
+
 # Repositories
 # ------------
 
 # Base GIT Repo URL
 # Another option is http://review.openstack.org/p
-GIT_BASE=https://github.com
+GIT_BASE=${GIT_BASE:-https://github.com}
 
 # metering service
-CEILOMETER_REPO=${GIT_BASE}/openstack/ceilometer.git
-CEILOMETER_BRANCH=master
+CEILOMETER_REPO=${CEILOMETER_REPO:-${GIT_BASE}/openstack/ceilometer.git}
+CEILOMETER_BRANCH=${CEILOMETER_BRANCH:-master}
 
 # ceilometer client library
-CEILOMETERCLIENT_REPO=${GIT_BASE}/openstack/python-ceilometerclient.git
-CEILOMETERCLIENT_BRANCH=master
+CEILOMETERCLIENT_REPO=${CEILOMETERCLIENT_REPO:-${GIT_BASE}/openstack/python-ceilometerclient.git}
+CEILOMETERCLIENT_BRANCH=${CEILOMETERCLIENT_BRANCH:-master}
 
 # volume service
-CINDER_REPO=${GIT_BASE}/openstack/cinder.git
-CINDER_BRANCH=master
+CINDER_REPO=${CINDER_REPO:-${GIT_BASE}/openstack/cinder.git}
+CINDER_BRANCH=${CINDER_BRANCH:-master}
 
 # volume client
-CINDERCLIENT_REPO=${GIT_BASE}/openstack/python-cinderclient.git
-CINDERCLIENT_BRANCH=master
-
-# compute service
-NOVA_REPO=${GIT_BASE}/openstack/nova.git
-NOVA_BRANCH=master
-
-# storage service
-SWIFT_REPO=${GIT_BASE}/openstack/swift.git
-SWIFT_BRANCH=master
-SWIFT3_REPO=${GIT_BASE}/fujita/swift3.git
-SWIFT3_BRANCH=master
-
-# python swift client library
-SWIFTCLIENT_REPO=${GIT_BASE}/openstack/python-swiftclient.git
-SWIFTCLIENT_BRANCH=master
+CINDERCLIENT_REPO=${CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python-cinderclient.git}
+CINDERCLIENT_BRANCH=${CINDERCLIENT_BRANCH:-master}
 
 # image catalog service
-GLANCE_REPO=${GIT_BASE}/openstack/glance.git
-GLANCE_BRANCH=master
+GLANCE_REPO=${GLANCE_REPO:-${GIT_BASE}/openstack/glance.git}
+GLANCE_BRANCH=${GLANCE_BRANCH:-master}
 
 # python glance client library
-GLANCECLIENT_REPO=${GIT_BASE}/openstack/python-glanceclient.git
-GLANCECLIENT_BRANCH=master
-
-# unified auth system (manages accounts/tokens)
-KEYSTONE_REPO=${GIT_BASE}/openstack/keystone.git
-KEYSTONE_BRANCH=master
-
-# a websockets/html5 or flash powered VNC console for vm instances
-NOVNC_REPO=${GIT_BASE}/kanaka/noVNC.git
-NOVNC_BRANCH=master
-
-# a websockets/html5 or flash powered SPICE console for vm instances
-SPICE_REPO=http://anongit.freedesktop.org/git/spice/spice-html5.git
-SPICE_BRANCH=master
-
-# django powered web control panel for openstack
-HORIZON_REPO=${GIT_BASE}/openstack/horizon.git
-HORIZON_BRANCH=master
-
-# python client library to nova that horizon (and others) use
-NOVACLIENT_REPO=${GIT_BASE}/openstack/python-novaclient.git
-NOVACLIENT_BRANCH=master
-
-# consolidated openstack python client
-OPENSTACKCLIENT_REPO=${GIT_BASE}/openstack/python-openstackclient.git
-OPENSTACKCLIENT_BRANCH=master
-
-# python keystone client library to nova that horizon uses
-KEYSTONECLIENT_REPO=${GIT_BASE}/openstack/python-keystoneclient.git
-KEYSTONECLIENT_BRANCH=master
-
-# quantum service
-QUANTUM_REPO=${GIT_BASE}/openstack/quantum.git
-QUANTUM_BRANCH=master
-
-# quantum client
-QUANTUMCLIENT_REPO=${GIT_BASE}/openstack/python-quantumclient.git
-QUANTUMCLIENT_BRANCH=master
-
-# Tempest test suite
-TEMPEST_REPO=${GIT_BASE}/openstack/tempest.git
-TEMPEST_BRANCH=master
+GLANCECLIENT_REPO=${GLANCECLIENT_REPO:-${GIT_BASE}/openstack/python-glanceclient.git}
+GLANCECLIENT_BRANCH=${GLANCECLIENT_BRANCH:-master}
 
 # heat service
-HEAT_REPO=${GIT_BASE}/openstack/heat.git
-HEAT_BRANCH=master
+HEAT_REPO=${HEAT_REPO:-${GIT_BASE}/openstack/heat.git}
+HEAT_BRANCH=${HEAT_BRANCH:-master}
 
 # python heat client library
-HEATCLIENT_REPO=${GIT_BASE}/openstack/python-heatclient.git
-HEATCLIENT_BRANCH=master
+HEATCLIENT_REPO=${HEATCLIENT_REPO:-${GIT_BASE}/openstack/python-heatclient.git}
+HEATCLIENT_BRANCH=${HEATCLIENT_BRANCH:-master}
 
-# ryu service
-RYU_REPO=${GIT_BASE}/osrg/ryu.git
-RYU_BRANCH=master
+# django powered web control panel for openstack
+HORIZON_REPO=${HORIZON_REPO:-${GIT_BASE}/openstack/horizon.git}
+HORIZON_BRANCH=${HORIZON_BRANCH:-master}
+
+# unified auth system (manages accounts/tokens)
+KEYSTONE_REPO=${KEYSTONE_REPO:-${GIT_BASE}/openstack/keystone.git}
+KEYSTONE_BRANCH=${KEYSTONE_BRANCH:-master}
+
+# python keystone client library to nova that horizon uses
+KEYSTONECLIENT_REPO=${KEYSTONECLIENT_REPO:-${GIT_BASE}/openstack/python-keystoneclient.git}
+KEYSTONECLIENT_BRANCH=${KEYSTONECLIENT_BRANCH:-master}
+
+# compute service
+NOVA_REPO=${NOVA_REPO:-${GIT_BASE}/openstack/nova.git}
+NOVA_BRANCH=${NOVA_BRANCH:-master}
+
+# python client library to nova that horizon (and others) use
+NOVACLIENT_REPO=${NOVACLIENT_REPO:-${GIT_BASE}/openstack/python-novaclient.git}
+NOVACLIENT_BRANCH=${NOVACLIENT_BRANCH:-master}
+
+# consolidated openstack python client
+OPENSTACKCLIENT_REPO=${OPENSTACKCLIENT_REPO:-${GIT_BASE}/openstack/python-openstackclient.git}
+OPENSTACKCLIENT_BRANCH=${OPENSTACKCLIENT_BRANCH:-master}
+
+# pbr drives the setuptools configs
+PBR_REPO=${PBR_REPO:-${GIT_BASE}/openstack-dev/pbr.git}
+PBR_BRANCH=${PBR_BRANCH:-master}
+
+# quantum service
+QUANTUM_REPO=${QUANTUM_REPO:-${GIT_BASE}/openstack/quantum.git}
+QUANTUM_BRANCH=${QUANTUM_BRANCH:-master}
+
+# quantum client
+QUANTUMCLIENT_REPO=${QUANTUMCLIENT_REPO:-${GIT_BASE}/openstack/python-quantumclient.git}
+QUANTUMCLIENT_BRANCH=${QUANTUMCLIENT_BRANCH:-master}
+
+# storage service
+SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git}
+SWIFT_BRANCH=${SWIFT_BRANCH:-master}
+SWIFT3_REPO=${SWIFT3_REPO:-${GIT_BASE}/fujita/swift3.git}
+SWIFT3_BRANCH=${SWIFT3_BRANCH:-master}
+
+# python swift client library
+SWIFTCLIENT_REPO=${SWIFTCLIENT_REPO:-${GIT_BASE}/openstack/python-swiftclient.git}
+SWIFTCLIENT_BRANCH=${SWIFTCLIENT_BRANCH:-master}
+
+# Tempest test suite
+TEMPEST_REPO=${TEMPEST_REPO:-${GIT_BASE}/openstack/tempest.git}
+TEMPEST_BRANCH=${TEMPEST_BRANCH:-master}
+
 
 # diskimage-builder
-BM_IMAGE_BUILD_REPO=${GIT_BASE}/stackforge/diskimage-builder.git
-BM_IMAGE_BUILD_BRANCH=master
+BM_IMAGE_BUILD_REPO=${BM_IMAGE_BUILD_REPO:-${GIT_BASE}/stackforge/diskimage-builder.git}
+BM_IMAGE_BUILD_BRANCH=${BM_IMAGE_BUILD_BRANCH:-master}
 
 # bm_poseur
 # Used to simulate a hardware environment for baremetal
 # Only used if BM_USE_FAKE_ENV is set
-BM_POSEUR_REPO=${GIT_BASE}/tripleo/bm_poseur.git
-BM_POSEUR_BRANCH=master
+BM_POSEUR_REPO=${BM_POSEUR_REPO:-${GIT_BASE}/tripleo/bm_poseur.git}
+BM_POSEUR_BRANCH=${BM_POSEUR_BRANCH:-master}
+
+# a websockets/html5 or flash powered VNC console for vm instances
+NOVNC_REPO=${NOVNC_REPO:-${GIT_BASE}/kanaka/noVNC.git}
+NOVNC_BRANCH=${NOVNC_BRANCH:-master}
+
+# ryu service
+RYU_REPO=${RYU_REPO:-${GIT_BASE}/osrg/ryu.git}
+RYU_BRANCH=${RYU_BRANCH:-master}
+
+# a websockets/html5 or flash powered SPICE console for vm instances
+SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git}
+SPICE_BRANCH=${SPICE_BRANCH:-master}
 
 
 # Nova hypervisor configuration.  We default to libvirt with **kvm** but will
 # drop back to **qemu** if we are unable to load the kvm module.  ``stack.sh`` can
-# also install an **LXC** or **OpenVZ** based system.
+# also install an **LXC**, **OpenVZ** or **XenAPI** based system.
 VIRT_DRIVER=${VIRT_DRIVER:-libvirt}
-LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm}
+case "$VIRT_DRIVER" in
+    libvirt)
+        LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm}
+        if [[ "$os_VENDOR" =~ (Debian) ]]; then
+            LIBVIRT_GROUP=libvirt
+        else
+            LIBVIRT_GROUP=libvirtd
+        fi
+        ;;
+    xenserver)
+        # Xen config common to nova and quantum
+        XENAPI_USER=${XENAPI_USER:-"root"}
+        ;;
+    *)
+        ;;
+esac
 
-# allow local overrides of env variables
-if [ -f $RC_DIR/localrc ]; then
-    source $RC_DIR/localrc
-fi
 
-# Specify a comma-separated list of UEC images to download and install into glance.
-# supported urls here are:
+# Images
+# ------
+
+# Specify a comma-separated list of images to download and install into glance.
+# Supported urls here are:
 #  * "uec-style" images:
 #     If the file ends in .tar.gz, uncompress the tarball and and select the first
 #     .img file inside it as the image.  If present, use "*-vmlinuz*" as the kernel
 #     and "*-initrd*" as the ramdisk
-#     example: http://cloud-images.ubuntu.com/releases/oneiric/release/ubuntu-11.10-server-cloudimg-amd64.tar.gz
+#     example: http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-amd64.tar.gz
 #  * disk image (*.img,*.img.gz)
 #    if file ends in .img, then it will be uploaded and registered as a to
 #    glance as a disk image.  If it ends in .gz, it is uncompressed first.
 #    example:
-#      http://cloud-images.ubuntu.com/releases/oneiric/release/ubuntu-11.10-server-cloudimg-armel-disk1.img
+#      http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-armel-disk1.img
 #      http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-rootfs.img.gz
 #  * OpenVZ image:
 #    OpenVZ uses its own format of image, and does not support UEC style images
@@ -199,13 +224,16 @@
                 IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz"};;
         esac
         ;;
-    *) # otherwise, use the uec style image (with kernel, ramdisk, disk)
+    vsphere)
+        IMAGE_URLS="";;
+    *) # Default to Cirros with kernel, ramdisk and disk image
         DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.1-x86_64-uec}
         IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz"};;
 esac
 
-# 5Gb default volume backing file size
-VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-5130M}
+
+# 10Gb default volume backing file size
+VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-10250M}
 
 # Name of the LVM volume group to use/create for iscsi volumes
 VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
@@ -222,6 +250,9 @@
 # Compatibility until it's eradicated from CI
 USE_SCREEN=${SCREEN_DEV:-$USE_SCREEN}
 
+# Set default screen name
+SCREEN_NAME=${SCREEN_NAME:-stack}
+
 # Local variables:
 # mode: shell-script
 # End:
diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh
index 7c4386f..68f11ce 100755
--- a/tools/install_prereqs.sh
+++ b/tools/install_prereqs.sh
@@ -42,7 +42,8 @@
 LAST_RUN=$(head -1 $PREREQ_RERUN_MARKER 2>/dev/null || echo "0")
 DELTA=$(($NOW - $LAST_RUN))
 if [[ $DELTA -lt $PREREQ_RERUN_SECONDS && -z "$FORCE_PREREQ" ]]; then
-    echo "Re-run time has not expired ($(($PREREQ_RERUN_SECONDS - $DELTA)) seconds remaining); exiting..."
+    echo "Re-run time has not expired ($(($PREREQ_RERUN_SECONDS - $DELTA)) seconds remaining) "
+    echo "and FORCE_PREREQ not set; exiting..."
     return 0
 fi
 
diff --git a/tools/xen/README.md b/tools/xen/README.md
index 1cd45cf..9f39080 100644
--- a/tools/xen/README.md
+++ b/tools/xen/README.md
@@ -1,5 +1,4 @@
-Getting Started With XenServer 5.6 and Devstack
-===============================================
+# Getting Started With XenServer 5.6 and Devstack
 The purpose of the code in this directory it to help developers bootstrap
 a XenServer 5.6 (or greater) + Openstack development environment.  This file gives
 some pointers on how to get started.
@@ -9,81 +8,120 @@
 machine on the Xenserver host (called OS domU). The VM uses the XAPI toolstack
 to communicate with the host.
 
-Step 1: Install Xenserver
-------------------------
+The provided localrc helps to build a basic environment.
+The requirements are:
+ - An internet-enabled network with a DHCP server on it
+ - XenServer box plugged in to the same network
+This network will be used as the OpenStack management network. The VM Network
+and the Public Network will not be connected to any physical interfaces, only
+new virtual networks will be created by the `install_os_domU.sh` script.
+
+Steps to follow:
+ - Install XenServer
+ - Download Devstack to XenServer
+ - Customise `localrc`
+ - Start `install_os_domU.sh` script
+
+The `install_os_domU.sh` script will:
+ - Setup XenAPI plugins
+ - Create the named networks, if they don't exist
+ - Install an Ubuntu Virtual Machine, with 4 network interfaces:
+   - eth0 - internal xapi interface
+   - eth1 - VM interface, connected to `VM_BRIDGE_OR_NET_NAME` defaults to
+   `"OpenStack VM Network"`.
+   - eth2 - Management interface, connected to `MGT_BRIDGE_OR_NET_NAME`,
+     defaults to `xenbr0`, XenServer's bridge associated with the Hypervisors
+     `eth0`.
+   - eth3 - Public interface, connected to `PUB_BRIDGE_OR_NET_NAME` defaults to
+   `"OpenStack Public Network"`.
+ - After the Ubuntu install process finished, the network configuration is
+ modified to:
+   - eth0 - Management interface, connected to `MGT_BRIDGE_OR_NET_NAME`
+   - eth1 - VM interface, connected to `VM_BRIDGE_OR_NET_NAME`
+   - eth2 - Public interface, connected to `PUB_BRIDGE_OR_NET_NAME`
+   - (eth3) - Optional network interface if quantum is used, to enforce xapi to
+   create the underlying bridge.
+ - Start devstack inside the created OpenStack VM
+
+## Step 1: Install Xenserver
 Install XenServer 5.6+ on a clean box. You can get XenServer by signing
 up for an account on citrix.com, and then visiting:
 https://www.citrix.com/English/ss/downloads/details.asp?downloadId=2311504&productId=683148
 
 For details on installation, see: http://wiki.openstack.org/XenServer/Install
 
-Here are some sample Xenserver network settings for when you are just
-getting started (Settings like this have been used with a laptop + cheap wifi router):
+The XenServer IP configuration depends on your local network setup. If you are
+using dhcp, make a reservation for XenServer, so its IP address won't change
+over time. Make a note of the XenServer's IP address, as it has to be specified
+in `localrc`. The other option is to manually specify the IP setup for the
+XenServer box. Please make sure, that a gateway and a nameserver is configured,
+as `install_os_domU.sh` will connect to github.com to get source-code snapshots.
 
-* XenServer Host IP: 192.168.1.10
-* XenServer Netmask: 255.255.255.0
-* XenServer Gateway: 192.168.1.1
-* XenServer DNS: 192.168.1.1
-
-Step 2: Download devstack
---------------------------
+## Step 2: Download devstack
 On your XenServer host, run the following commands as root:
 
     wget --no-check-certificate https://github.com/openstack-dev/devstack/zipball/master
     unzip -o master -d ./devstack
     cd devstack/*/
 
-Step 3: Configure your localrc inside the devstack directory
-------------------------------------------------------------
+## Step 3: Configure your localrc inside the devstack directory
 Devstack uses a localrc for user-specific configuration.  Note that
-the XENAPI_PASSWORD must be your dom0 root password.
+the `XENAPI_PASSWORD` must be your dom0 root password.
 Of course, use real passwords if this machine is exposed.
 
     cat > ./localrc <<EOF
+    # Passwords
+    # NOTE: these need to be specified, otherwise devstack will try
+    # to prompt for these passwords, blocking the install process.
+
     MYSQL_PASSWORD=my_super_secret
     SERVICE_TOKEN=my_super_secret
     ADMIN_PASSWORD=my_super_secret
-    SERVICE_PASSWORD=$ADMIN_PASSWORD
+    SERVICE_PASSWORD=my_super_secret
     RABBIT_PASSWORD=my_super_secret
-    # This is the password for your guest (for both stack and root users)
+    SWIFT_HASH="66a3d6b56c1f479c8b4e70ab5c2000f5"
+    # This will be the password for the OpenStack VM (both stack and root users)
     GUEST_PASSWORD=my_super_secret
-    # IMPORTANT: The following must be set to your dom0 root password!
-    XENAPI_PASSWORD=my_super_secret
-    # Do not download the usual images yet!
+
+    # XenAPI parameters
+    # NOTE: The following must be set to your XenServer root password!
+
+    XENAPI_PASSWORD=my_xenserver_root_password
+
+    XENAPI_CONNECTION_URL="http://address_of_your_xenserver"
+    VNCSERVER_PROXYCLIENT_ADDRESS=address_of_your_xenserver
+
+    # Do not download the usual images
     IMAGE_URLS=""
     # Explicitly set virt driver here
     VIRT_DRIVER=xenserver
-    # Explicitly set multi-host
+    # Explicitly enable multi-host
     MULTI_HOST=1
     # Give extra time for boot
     ACTIVE_TIMEOUT=45
-    # Interface on which you would like to access services
-    HOST_IP_IFACE=ethX
-    # First time Ubuntu network install params
-    NETINSTALLIP="dhcp"
-    NAMESERVERS=""
-    NETMASK=""
-    GATEWAY=""
+
+    # Settings for netinstalling Ubuntu
+    UBUNTU_INST_RELEASE=precise
+
+    # NOTE: the value of FLAT_NETWORK_BRIDGE will automatically be determined
+    # by install_os_domU.sh script.
     EOF
 
-Step 4: Run ./install_os_domU.sh from the tools/xen directory
--------------------------------------------------------------
-cd tools/xen
-./install_os_domU.sh
+## Step 4: Run `./install_os_domU.sh` from the `tools/xen` directory
 
-Once this script finishes executing, log into the VM (openstack domU)
-that it installed and tail the run.sh.log file. You will need to wait
-until it run.sh has finished executing.
+    cd tools/xen
+    ./install_os_domU.sh
 
+Once this script finishes executing, log into the VM (openstack domU) that it
+installed and tail the run.sh.log file. You will need to wait until it run.sh
+has finished executing.
 
-Step 5: Do cloudy stuff!
---------------------------
+## Step 5: Do cloudy stuff!
 * Play with horizon
 * Play with the CLI
 * Log bugs to devstack and core projects, and submit fixes!
 
-Step 6: Run from snapshot
--------------------------
+## Step 6: Run from snapshot
 If you want to quicky re-run devstack from a clean state,
 using the same settings you used in your previous run,
-you can revert the DomU to the snapshot called "before_first_boot"
+you can revert the DomU to the snapshot called `before_first_boot`
diff --git a/tools/xen/build_xva.sh b/tools/xen/build_xva.sh
index b0fd003..d0cdf17 100755
--- a/tools/xen/build_xva.sh
+++ b/tools/xen/build_xva.sh
@@ -32,6 +32,41 @@
 #
 GUEST_NAME="$1"
 
+function _print_interface_config() {
+    local device_nr
+    local ip_address
+    local netmask
+
+    device_nr="$1"
+    ip_address="$2"
+    netmask="$3"
+
+    local device
+
+    device="eth${device_nr}"
+
+    echo "auto $device"
+    if [ $ip_address == "dhcp" ]; then
+        echo "iface $device inet dhcp"
+    else
+        echo "iface $device inet static"
+        echo "  address $ip_address"
+        echo "  netmask $netmask"
+    fi
+
+    # Turn off tx checksumming for better performance
+    echo "  post-up ethtool -K $device tx off"
+}
+
+function print_interfaces_config() {
+    echo "auto lo"
+    echo "iface lo inet loopback"
+
+    _print_interface_config $PUB_DEV_NR $PUB_IP $PUB_NETMASK
+    _print_interface_config $VM_DEV_NR $VM_IP $VM_NETMASK
+    _print_interface_config $MGT_DEV_NR $MGT_IP $MGT_NETMASK
+}
+
 #
 # Mount the VDI
 #
@@ -81,42 +116,7 @@
 EOF
 
 # Configure the network
-INTERFACES=$STAGING_DIR/etc/network/interfaces
-TEMPLATES_DIR=$TOP_DIR/templates
-cp $TEMPLATES_DIR/interfaces.in  $INTERFACES
-if [ $VM_IP == "dhcp" ]; then
-    echo 'eth1 on dhcp'
-    sed -e "s,iface eth1 inet static,iface eth1 inet dhcp,g" -i $INTERFACES
-    sed -e '/@ETH1_/d' -i $INTERFACES
-else
-    sed -e "s,@ETH1_IP@,$VM_IP,g" -i $INTERFACES
-    sed -e "s,@ETH1_NETMASK@,$VM_NETMASK,g" -i $INTERFACES
-fi
-
-if [ $MGT_IP == "dhcp" ]; then
-    echo 'eth2 on dhcp'
-    sed -e "s,iface eth2 inet static,iface eth2 inet dhcp,g" -i $INTERFACES
-    sed -e '/@ETH2_/d' -i $INTERFACES
-else
-    sed -e "s,@ETH2_IP@,$MGT_IP,g" -i $INTERFACES
-    sed -e "s,@ETH2_NETMASK@,$MGT_NETMASK,g" -i $INTERFACES
-fi
-
-if [ $PUB_IP == "dhcp" ]; then
-    echo 'eth3 on dhcp'
-    sed -e "s,iface eth3 inet static,iface eth3 inet dhcp,g" -i $INTERFACES
-    sed -e '/@ETH3_/d' -i $INTERFACES
-else
-    sed -e "s,@ETH3_IP@,$PUB_IP,g" -i $INTERFACES
-    sed -e "s,@ETH3_NETMASK@,$PUB_NETMASK,g" -i $INTERFACES
-fi
-
-if [ "$ENABLE_GI" == "true" ]; then
-    cat <<EOF >>$INTERFACES
-auto eth0
-iface eth0 inet dhcp
-EOF
-fi
+print_interfaces_config > $STAGING_DIR/etc/network/interfaces
 
 # Gracefully cp only if source file/dir exists
 function cp_it {
diff --git a/tools/xen/functions b/tools/xen/functions
index 5b4a661..4e37554 100644
--- a/tools/xen/functions
+++ b/tools/xen/functions
@@ -1,10 +1,8 @@
 #!/bin/bash
 
 function xapi_plugin_location {
-    for PLUGIN_DIR in "/etc/xapi.d/plugins/" "/usr/lib/xcp/plugins/"
-    do
-        if [ -d $PLUGIN_DIR ]
-        then
+    for PLUGIN_DIR in "/etc/xapi.d/plugins/" "/usr/lib/xcp/plugins/"; do
+        if [ -d $PLUGIN_DIR ]; then
             echo $PLUGIN_DIR
             return 0
         fi
@@ -17,7 +15,23 @@
 }
 
 function create_directory_for_kernels {
-    mkdir -p "/boot/guest"
+    if [ -d "/boot/guest" ]; then
+        echo "INFO: /boot/guest directory already exists, using that" >&2
+    else
+        local LOCALPATH="$(get_local_sr_path)/os-guest-kernels"
+        mkdir -p $LOCALPATH
+        ln -s $LOCALPATH /boot/guest
+    fi
+}
+
+function create_directory_for_images {
+    if [ -d "/images" ]; then
+        echo "INFO: /images directory already exists, using that" >&2
+    else
+        local LOCALPATH="$(get_local_sr_path)/os-images"
+        mkdir -p $LOCALPATH
+        ln -s $LOCALPATH /images
+    fi
 }
 
 function extract_remote_zipball {
@@ -53,3 +67,186 @@
     rm -rf $EXTRACTED_FILES
     chmod a+x ${XAPI_PLUGIN_DIR}*
 }
+
+function get_local_sr {
+    xe sr-list name-label="Local storage" --minimal
+}
+
+function get_local_sr_path {
+    echo "/var/run/sr-mount/$(get_local_sr)"
+}
+
+function find_ip_by_name() {
+    local guest_name="$1"
+    local interface="$2"
+
+    local period=10
+    local max_tries=10
+    local i=0
+
+    while true; do
+        if [ $i -ge $max_tries ]; then
+            echo "Timeout: ip address for interface $interface of $guest_name"
+            exit 11
+        fi
+
+        ipaddress=$(xe vm-list --minimal \
+                    name-label=$guest_name \
+                    params=networks | sed -ne "s,^.*${interface}/ip: \([0-9.]*\).*\$,\1,p")
+
+        if [ -z "$ipaddress" ]; then
+            sleep $period
+            ((i++))
+        else
+            echo $ipaddress
+            break
+        fi
+    done
+}
+
+function _vm_uuid() {
+    local vm_name_label
+
+    vm_name_label="$1"
+
+    xe vm-list name-label="$vm_name_label" --minimal
+}
+
+function _create_new_network() {
+    local name_label
+    name_label=$1
+
+    xe network-create name-label="$name_label"
+}
+
+function _multiple_networks_with_name() {
+    local name_label
+    name_label=$1
+
+    # A comma indicates multiple matches
+    xe network-list name-label="$name_label" --minimal | grep -q ","
+}
+
+function _network_exists() {
+    local name_label
+    name_label=$1
+
+    ! [ -z $(xe network-list name-label="$name_label" --minimal) ]
+}
+
+function _bridge_exists() {
+    local bridge
+    bridge=$1
+
+    ! [ -z $(xe network-list bridge="$bridge" --minimal) ]
+}
+
+function _network_uuid() {
+    local bridge_or_net_name
+    bridge_or_net_name=$1
+
+    if _bridge_exists "$bridge_or_net_name"; then
+        xe network-list bridge="$bridge_or_net_name" --minimal
+    else
+        xe network-list name-label="$bridge_or_net_name" --minimal
+    fi
+}
+
+function add_interface() {
+    local vm_name_label
+    local bridge_or_network_name
+
+    vm_name_label="$1"
+    bridge_or_network_name="$2"
+    device_number="$3"
+
+    local vm
+    local net
+
+    vm=$(_vm_uuid "$vm_name_label")
+    net=$(_network_uuid "$bridge_or_network_name")
+    xe vif-create network-uuid=$net vm-uuid=$vm device=$device_number
+}
+
+function setup_network() {
+    local bridge_or_net_name
+    bridge_or_net_name=$1
+
+    if ! _bridge_exists "$bridge_or_net_name"; then
+        if _network_exists "$bridge_or_net_name"; then
+            if _multiple_networks_with_name "$bridge_or_net_name"; then
+                cat >&2 << EOF
+ERROR: Multiple networks found matching name-label to "$bridge_or_net_name"
+please review your XenServer network configuration / localrc file.
+EOF
+                exit 1
+            fi
+        else
+            _create_new_network "$bridge_or_net_name"
+        fi
+    fi
+}
+
+function bridge_for() {
+    local bridge_or_net_name
+    bridge_or_net_name=$1
+
+    if _bridge_exists "$bridge_or_net_name"; then
+        echo "$bridge_or_net_name"
+    else
+        xe network-list name-label="$bridge_or_net_name" params=bridge --minimal
+    fi
+}
+
+function xenapi_ip_on() {
+    local bridge_or_net_name
+    bridge_or_net_name=$1
+
+    ifconfig $(bridge_for "$bridge_or_net_name") | grep "inet addr" | cut -d ":" -f2 | sed "s/ .*//"
+}
+
+function xenapi_is_listening_on() {
+    local bridge_or_net_name
+    bridge_or_net_name=$1
+
+    ! [ -z $(xenapi_ip_on "$bridge_or_net_name") ]
+}
+
+function parameter_is_specified() {
+    local parameter_name
+    parameter_name=$1
+
+    compgen -v | grep "$parameter_name"
+}
+
+function append_kernel_cmdline()
+{
+    local vm_name_label
+    local kernel_args
+
+    vm_name_label="$1"
+    kernel_args="$2"
+
+    local vm
+    local pv_args
+
+    vm=$(_vm_uuid "$vm_name_label")
+    pv_args=$(xe vm-param-get param-name=PV-args uuid=$vm)
+    xe vm-param-set PV-args="$pv_args $kernel_args" uuid=$vm
+}
+
+function destroy_all_vifs_of()
+{
+    local vm_name_label
+
+    vm_name_label="$1"
+
+    local vm
+
+    vm=$(_vm_uuid "$vm_name_label")
+    IFS=,
+    for vif in $(xe vif-list vm-uuid=$vm --minimal); do
+        xe vif-destroy uuid="$vif"
+    done
+    unset IFS
+}
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
index 7c3b839..d74b1ad 100755
--- a/tools/xen/install_os_domU.sh
+++ b/tools/xen/install_os_domU.sh
@@ -1,15 +1,13 @@
 #!/bin/bash
 
-# This script is a level script
-# It must be run on a XenServer or XCP machine
+# This script must be run on a XenServer or XCP machine
 #
 # It creates a DomU VM that runs OpenStack services
 #
 # For more details see: README.md
 
-# Exit on errors
 set -o errexit
-# Echo commands
+set -o nounset
 set -o xtrace
 
 # Abort if localrc is not set
@@ -31,13 +29,12 @@
 # xapi functions
 . $THIS_DIR/functions
 
-
 #
 # Get Settings
 #
 
 # Source params - override xenrc params in your localrc to suit your taste
-source xenrc
+source $THIS_DIR/xenrc
 
 xe_min()
 {
@@ -66,101 +63,39 @@
 fi
 
 create_directory_for_kernels
+create_directory_for_images
 
 #
 # Configure Networking
 #
+setup_network "$VM_BRIDGE_OR_NET_NAME"
+setup_network "$MGT_BRIDGE_OR_NET_NAME"
+setup_network "$PUB_BRIDGE_OR_NET_NAME"
 
-# Helper to create networks
-# Uses echo trickery to return network uuid
-function create_network() {
-    br=$1
-    dev=$2
-    vlan=$3
-    netname=$4
-    if [ -z $br ]
-    then
-        pif=$(xe_min pif-list device=$dev VLAN=$vlan)
-        if [ -z $pif ]
-        then
-            net=$(xe network-create name-label=$netname)
-        else
-            net=$(xe_min network-list  PIF-uuids=$pif)
-        fi
-        echo $net
-        return 0
-    fi
-    if [ ! $(xe_min network-list  params=bridge | grep -w --only-matching $br) ]
-    then
-        echo "Specified bridge $br does not exist"
-        echo "If you wish to use defaults, please keep the bridge name empty"
-        exit 1
-    else
-        net=$(xe_min network-list  bridge=$br)
-        echo $net
-    fi
-}
-
-function errorcheck() {
-    rc=$?
-    if [ $rc -ne 0 ]
-    then
-        exit $rc
-    fi
-}
-
-# Create host, vm, mgmt, pub networks on XenServer
-VM_NET=$(create_network "$VM_BR" "$VM_DEV" "$VM_VLAN" "vmbr")
-errorcheck
-MGT_NET=$(create_network "$MGT_BR" "$MGT_DEV" "$MGT_VLAN" "mgtbr")
-errorcheck
-PUB_NET=$(create_network "$PUB_BR" "$PUB_DEV" "$PUB_VLAN" "pubbr")
-errorcheck
-
-# Helper to create vlans
-function create_vlan() {
-    dev=$1
-    vlan=$2
-    net=$3
-    # VLAN -1 refers to no VLAN (physical network)
-    if [ $vlan -eq -1 ]
-    then
-        return
-    fi
-    if [ -z $(xe_min vlan-list  tag=$vlan) ]
-    then
-        pif=$(xe_min pif-list  network-uuid=$net)
-        # We created a brand new network this time
-        if [ -z $pif ]
-        then
-            pif=$(xe_min pif-list  device=$dev VLAN=-1)
-            xe vlan-create pif-uuid=$pif vlan=$vlan network-uuid=$net
-        else
-            echo "VLAN does not exist but PIF attached to this network"
-            echo "How did we reach here?"
-            exit 1
-        fi
-    fi
-}
-
-# Create vlans for vm and management
-create_vlan $PUB_DEV $PUB_VLAN $PUB_NET
-create_vlan $VM_DEV $VM_VLAN $VM_NET
-create_vlan $MGT_DEV $MGT_VLAN $MGT_NET
-
-# Get final bridge names
-if [ -z $VM_BR ]; then
-    VM_BR=$(xe_min network-list  uuid=$VM_NET params=bridge)
-fi
-if [ -z $MGT_BR ]; then
-    MGT_BR=$(xe_min network-list  uuid=$MGT_NET params=bridge)
-fi
-if [ -z $PUB_BR ]; then
-    PUB_BR=$(xe_min network-list  uuid=$PUB_NET params=bridge)
+# With quantum, one more network is required, which is internal to the
+# hypervisor, and used by the VMs
+if is_service_enabled quantum; then
+    setup_network "$XEN_INT_BRIDGE_OR_NET_NAME"
 fi
 
-# dom0 ip, XenAPI is assumed to be listening
-HOST_IP=${HOST_IP:-`ifconfig xenbr0 | grep "inet addr" | cut -d ":" -f2 | sed "s/ .*//"`}
+if parameter_is_specified "FLAT_NETWORK_BRIDGE"; then
+    cat >&2 << EOF
+ERROR: FLAT_NETWORK_BRIDGE is specified in localrc file
+This is considered as an error, as its value will be derived from the
+VM_BRIDGE_OR_NET_NAME variable's value.
+EOF
+    exit 1
+fi
+
+if ! xenapi_is_listening_on "$MGT_BRIDGE_OR_NET_NAME"; then
+    cat >&2 << EOF
+ERROR: XenAPI does not have an assigned IP address on the management network.
+please review your XenServer network configuration / localrc file.
+EOF
+    exit 1
+fi
+
+HOST_IP=$(xenapi_ip_on "$MGT_BRIDGE_OR_NET_NAME")
 
 # Set up ip forwarding, but skip on xcp-xapi
 if [ -a /etc/sysconfig/network ]; then
@@ -253,11 +188,12 @@
             mkdir -p $HTTP_SERVER_LOCATION
         fi
         cp -f $THIS_DIR/devstackubuntupreseed.cfg $HTTP_SERVER_LOCATION
-        MIRROR=${MIRROR:-""}
-        if [ -n "$MIRROR" ]; then
-            sed -e "s,d-i mirror/http/hostname string .*,d-i mirror/http/hostname string $MIRROR," \
-                -i "${HTTP_SERVER_LOCATION}/devstackubuntupreseed.cfg"
-        fi
+
+        sed \
+            -e "s,\(d-i mirror/http/hostname string\).*,\1 $UBUNTU_INST_HTTP_HOSTNAME,g" \
+            -e "s,\(d-i mirror/http/directory string\).*,\1 $UBUNTU_INST_HTTP_DIRECTORY,g" \
+            -e "s,\(d-i mirror/http/proxy string\).*,\1 $UBUNTU_INST_HTTP_PROXY,g" \
+            -i "${HTTP_SERVER_LOCATION}/devstackubuntupreseed.cfg"
     fi
 
     # Update the template
@@ -265,7 +201,13 @@
 
     # create a new VM with the given template
     # creating the correct VIFs and metadata
-    $THIS_DIR/scripts/install-os-vpx.sh -t "$UBUNTU_INST_TEMPLATE_NAME" -v $VM_BR -m $MGT_BR -p $PUB_BR -l $GUEST_NAME -r $OSDOMU_MEM_MB -k "flat_network_bridge=${VM_BR}"
+    $THIS_DIR/scripts/install-os-vpx.sh \
+        -t "$UBUNTU_INST_TEMPLATE_NAME" \
+        -v "$VM_BRIDGE_OR_NET_NAME" \
+        -m "$MGT_BRIDGE_OR_NET_NAME" \
+        -p "$PUB_BRIDGE_OR_NET_NAME" \
+        -l "$GUEST_NAME" \
+        -r "$OSDOMU_MEM_MB"
 
     # wait for install to finish
     wait_for_VM_to_halt
@@ -297,69 +239,78 @@
     vm_uuid=$(xe vm-install template="$TNAME" new-name-label="$GUEST_NAME")
 fi
 
+## Setup network cards
+# Wipe out all
+destroy_all_vifs_of "$GUEST_NAME"
+# Tenant network
+add_interface "$GUEST_NAME" "$VM_BRIDGE_OR_NET_NAME" "$VM_DEV_NR"
+# Management network
+add_interface "$GUEST_NAME" "$MGT_BRIDGE_OR_NET_NAME" "$MGT_DEV_NR"
+# Public network
+add_interface "$GUEST_NAME" "$PUB_BRIDGE_OR_NET_NAME" "$PUB_DEV_NR"
 
 #
 # Inject DevStack inside VM disk
 #
 $THIS_DIR/build_xva.sh "$GUEST_NAME"
 
+# Attach a network interface for the integration network (so that the bridge
+# is created by XenServer). This is required for Quantum. Also pass that as a
+# kernel parameter for DomU
+if is_service_enabled quantum; then
+    add_interface "$GUEST_NAME" "$XEN_INT_BRIDGE_OR_NET_NAME" $XEN_INT_DEV_NR
+
+    XEN_INTEGRATION_BRIDGE=$(bridge_for "$XEN_INT_BRIDGE_OR_NET_NAME")
+    append_kernel_cmdline \
+        "$GUEST_NAME" \
+        "xen_integration_bridge=${XEN_INTEGRATION_BRIDGE}"
+fi
+
+FLAT_NETWORK_BRIDGE=$(bridge_for "$VM_BRIDGE_OR_NET_NAME")
+append_kernel_cmdline "$GUEST_NAME" "flat_network_bridge=${FLAT_NETWORK_BRIDGE}"
+
+# Add a separate xvdb, if it was requested
+if [[ "0" != "$XEN_XVDB_SIZE_GB" ]]; then
+    vm=$(xe vm-list name-label="$GUEST_NAME" --minimal)
+
+    # Add a new disk
+    localsr=$(get_local_sr)
+    extra_vdi=$(xe vdi-create \
+        name-label=xvdb-added-by-devstack \
+        virtual-size="${XEN_XVDB_SIZE_GB}GiB" \
+        sr-uuid=$localsr type=user)
+    xe vbd-create vm-uuid=$vm vdi-uuid=$extra_vdi device=1
+fi
+
 # create a snapshot before the first boot
 # to allow a quick re-run with the same settings
 xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME_FIRST_BOOT"
 
-
 #
 # Run DevStack VM
 #
 xe vm-start vm="$GUEST_NAME"
 
-
-#
-# Find IP and optionally wait for stack.sh to complete
-#
-
-function find_ip_by_name() {
-  local guest_name="$1"
-  local interface="$2"
-  local period=10
-  max_tries=10
-  i=0
-  while true
-  do
-    if [ $i -ge $max_tries ]; then
-      echo "Timed out waiting for devstack ip address"
-      exit 11
-    fi
-
-    devstackip=$(xe vm-list --minimal \
-                 name-label=$guest_name \
-                 params=networks | sed -ne "s,^.*${interface}/ip: \([0-9.]*\).*\$,\1,p")
-    if [ -z "$devstackip" ]
-    then
-      sleep $period
-      ((i++))
-    else
-      echo $devstackip
-      break
-    fi
-  done
-}
-
 function ssh_no_check() {
     ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no "$@"
 }
 
-# Note the XenServer needs to be on the chosen
-# network, so XenServer can access Glance API
-if [ $HOST_IP_IFACE == "eth2" ]; then
-    DOMU_IP=$MGT_IP
+# Get hold of the Management IP of OpenStack VM
+OS_VM_MANAGEMENT_ADDRESS=$MGT_IP
+if [ $OS_VM_MANAGEMENT_ADDRESS == "dhcp" ]; then
+    OS_VM_MANAGEMENT_ADDRESS=$(find_ip_by_name $GUEST_NAME $MGT_DEV_NR)
+fi
+
+# Get hold of the Service IP of OpenStack VM
+if [ $HOST_IP_IFACE == "eth${MGT_DEV_NR}" ]; then
+    OS_VM_SERVICES_ADDRESS=$MGT_IP
     if [ $MGT_IP == "dhcp" ]; then
-        DOMU_IP=$(find_ip_by_name $GUEST_NAME 2)
+        OS_VM_SERVICES_ADDRESS=$(find_ip_by_name $GUEST_NAME $MGT_DEV_NR)
     fi
 else
-    DOMU_IP=$PUB_IP
+    OS_VM_SERVICES_ADDRESS=$PUB_IP
     if [ $PUB_IP == "dhcp" ]; then
-        DOMU_IP=$(find_ip_by_name $GUEST_NAME 3)
+        OS_VM_SERVICES_ADDRESS=$(find_ip_by_name $GUEST_NAME $PUB_DEV_NR)
     fi
 fi
 
@@ -371,11 +322,11 @@
 
     echo "VM Launched - Waiting for startup script"
     # wait for log to appear
-    while ! ssh_no_check -q stack@$DOMU_IP "[ -e run.sh.log ]"; do
+    while ! ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS "[ -e run.sh.log ]"; do
         sleep 10
     done
     echo -n "Running"
-    while [ `ssh_no_check -q stack@$DOMU_IP pgrep -c run.sh` -ge 1 ]
+    while [ `ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS pgrep -c run.sh` -ge 1 ]
     do
         sleep 10
         echo -n "."
@@ -384,17 +335,17 @@
     set -x
 
     # output the run.sh.log
-    ssh_no_check -q stack@$DOMU_IP 'cat run.sh.log'
+    ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS 'cat run.sh.log'
 
     # Fail if the expected text is not found
-    ssh_no_check -q stack@$DOMU_IP 'cat run.sh.log' | grep -q 'stack.sh completed in'
+    ssh_no_check -q stack@$OS_VM_MANAGEMENT_ADDRESS 'cat run.sh.log' | grep -q 'stack.sh completed in'
 
     set +x
     echo "################################################################################"
     echo ""
     echo "All Finished!"
     echo "You can visit the OpenStack Dashboard"
-    echo "at http://$DOMU_IP, and contact other services at the usual ports."
+    echo "at http://$OS_VM_SERVICES_ADDRESS, and contact other services at the usual ports."
 else
     set +x
     echo "################################################################################"
@@ -403,9 +354,9 @@
     echo "Now, you can monitor the progress of the stack.sh installation by "
     echo "tailing /opt/stack/run.sh.log from within your domU."
     echo ""
-    echo "ssh into your domU now: 'ssh stack@$DOMU_IP' using your password"
+    echo "ssh into your domU now: 'ssh stack@$OS_VM_MANAGEMENT_ADDRESS' using your password"
     echo "and then do: 'tail -f /opt/stack/run.sh.log'"
     echo ""
     echo "When the script completes, you can then visit the OpenStack Dashboard"
-    echo "at http://$DOMU_IP, and contact other services at the usual ports."
+    echo "at http://$OS_VM_SERVICES_ADDRESS, and contact other services at the usual ports."
 fi
diff --git a/tools/xen/mocks b/tools/xen/mocks
index b006558..6da6acb 100644
--- a/tools/xen/mocks
+++ b/tools/xen/mocks
@@ -12,6 +12,18 @@
     exit 1
 }
 
+test ! -e "$XE_RESPONSE" && {
+    echo "Mocking is not set up properly."
+    echo "XE_RESPONSE should point to an existing file."
+    exit 1
+}
+
+test ! -e "$XE_CALLS" && {
+    echo "Mocking is not set up properly."
+    echo "XE_CALLS should point to an existing file."
+    exit 1
+}
+
 function mktemp {
     if test "${1:-}" = "-d";
     then
@@ -41,6 +53,10 @@
     echo "rm $@" >> $LIST_OF_ACTIONS
 }
 
+function ln {
+    echo "ln $@" >> $LIST_OF_ACTIONS
+}
+
 function [ {
     if test "${1:-}" = "-d";
     then
@@ -57,3 +73,13 @@
     echo "Mock test does not implement the requested function"
     exit 1
 }
+
+function xe {
+    cat $XE_RESPONSE
+    {
+    for i in $(seq "$#")
+    do
+        eval "echo \"\$$i\""
+    done
+    } >> $XE_CALLS
+}
diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh
index fe52445..0e11226 100755
--- a/tools/xen/prepare_guest.sh
+++ b/tools/xen/prepare_guest.sh
@@ -10,54 +10,51 @@
 # creating the user called "stack",
 # and shuts down the VM to signal the script has completed
 
-set -x
-# Echo commands
+set -o errexit
+set -o nounset
 set -o xtrace
 
 # Configurable nuggets
-GUEST_PASSWORD=${GUEST_PASSWORD:-secrete}
-STAGING_DIR=${STAGING_DIR:-stage}
-DO_TGZ=${DO_TGZ:-1}
-XS_TOOLS_PATH=${XS_TOOLS_PATH:-"/root/xs-tools.deb"}
-STACK_USER=${STACK_USER:-stack}
+GUEST_PASSWORD="$1"
+XS_TOOLS_PATH="$2"
+STACK_USER="$3"
 
 # Install basics
-chroot $STAGING_DIR apt-get update
-chroot $STAGING_DIR apt-get install -y cracklib-runtime curl wget ssh openssh-server tcpdump ethtool
-chroot $STAGING_DIR apt-get install -y curl wget ssh openssh-server python-pip git vim-nox sudo
-chroot $STAGING_DIR pip install xenapi
+apt-get update
+apt-get install -y cracklib-runtime curl wget ssh openssh-server tcpdump ethtool
+apt-get install -y curl wget ssh openssh-server python-pip git vim-nox sudo
+pip install xenapi
 
 # Install XenServer guest utilities
-cp $XS_TOOLS_PATH ${STAGING_DIR}${XS_TOOLS_PATH}
-chroot $STAGING_DIR dpkg -i $XS_TOOLS_PATH
-chroot $STAGING_DIR update-rc.d -f xe-linux-distribution remove
-chroot $STAGING_DIR update-rc.d xe-linux-distribution defaults
+dpkg -i $XS_TOOLS_PATH
+update-rc.d -f xe-linux-distribution remove
+update-rc.d xe-linux-distribution defaults
 
 # Make a small cracklib dictionary, so that passwd still works, but we don't
 # have the big dictionary.
-mkdir -p $STAGING_DIR/usr/share/cracklib
-echo a | chroot $STAGING_DIR cracklib-packer
+mkdir -p /usr/share/cracklib
+echo a | cracklib-packer
 
 # Make /etc/shadow, and set the root password
-chroot $STAGING_DIR "pwconv"
-echo "root:$GUEST_PASSWORD" | chroot $STAGING_DIR chpasswd
+pwconv
+echo "root:$GUEST_PASSWORD" | chpasswd
 
 # Put the VPX into UTC.
-rm -f $STAGING_DIR/etc/localtime
+rm -f /etc/localtime
 
 # Add stack user
-chroot $STAGING_DIR groupadd libvirtd
-chroot $STAGING_DIR useradd $STACK_USER -s /bin/bash -d /opt/stack -G libvirtd
-echo $STACK_USER:$GUEST_PASSWORD | chroot $STAGING_DIR chpasswd
-echo "$STACK_USER ALL=(ALL) NOPASSWD: ALL" >> $STAGING_DIR/etc/sudoers
+groupadd libvirtd
+useradd $STACK_USER -s /bin/bash -d /opt/stack -G libvirtd
+echo $STACK_USER:$GUEST_PASSWORD | chpasswd
+echo "$STACK_USER ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
 
 # Give ownership of /opt/stack to stack user
-chroot $STAGING_DIR chown -R $STACK_USER /opt/stack
+chown -R $STACK_USER /opt/stack
 
 # Make our ip address hostnames look nice at the command prompt
-echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $STAGING_DIR/opt/stack/.bashrc
-echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $STAGING_DIR/root/.bashrc
-echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> $STAGING_DIR/etc/profile
+echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> /opt/stack/.bashrc
+echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> /root/.bashrc
+echo "export PS1='${debian_chroot:+($debian_chroot)}\\u@\\H:\\w\\$ '" >> /etc/profile
 
 function setup_vimrc {
     if [ ! -e $1 ]; then
@@ -72,20 +69,15 @@
 }
 
 # Setup simple .vimrcs
-setup_vimrc $STAGING_DIR/root/.vimrc
-setup_vimrc $STAGING_DIR/opt/stack/.vimrc
-
-if [ "$DO_TGZ" = "1" ]; then
-    # Compress
-    rm -f stage.tgz
-    tar cfz stage.tgz stage
-fi
+setup_vimrc /root/.vimrc
+setup_vimrc /opt/stack/.vimrc
 
 # remove self from local.rc
 # so this script is not run again
 rm -rf /etc/rc.local
-mv /etc/rc.local.preparebackup /etc/rc.local
-cp $STAGING_DIR/etc/rc.local $STAGING_DIR/etc/rc.local.backup
+
+# Restore rc.local file
+cp /etc/rc.local.preparebackup /etc/rc.local
 
 # shutdown to notify we are done
 shutdown -h now
diff --git a/tools/xen/prepare_guest_template.sh b/tools/xen/prepare_guest_template.sh
index 19bd2f8..6ea6f63 100755
--- a/tools/xen/prepare_guest_template.sh
+++ b/tools/xen/prepare_guest_template.sh
@@ -15,9 +15,8 @@
 # The resultant image is started by install_os_domU.sh,
 # and once the VM has shutdown, build_xva.sh is run
 
-# Exit on errors
 set -o errexit
-# Echo commands
+set -o nounset
 set -o xtrace
 
 # This directory
@@ -75,7 +74,8 @@
 
 # run prepare_guest.sh on boot
 cat <<EOF >$STAGING_DIR/etc/rc.local
-GUEST_PASSWORD=$GUEST_PASSWORD STAGING_DIR=/ \
-    DO_TGZ=0 XS_TOOLS_PATH=$XS_TOOLS_PATH \
-    bash /opt/stack/prepare_guest.sh > /opt/stack/prepare_guest.log 2>&1
+#!/bin/sh -e
+bash /opt/stack/prepare_guest.sh \\
+    "$GUEST_PASSWORD" "$XS_TOOLS_PATH" "$STACK_USER" \\
+    > /opt/stack/prepare_guest.log 2>&1
 EOF
diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh
index 241296b..c82f870 100755
--- a/tools/xen/scripts/install-os-vpx.sh
+++ b/tools/xen/scripts/install-os-vpx.sh
@@ -25,7 +25,6 @@
 DATA_VDI_SIZE="500MiB"
 BRIDGE_M=
 BRIDGE_P=
-KERNEL_PARAMS=
 VPX_FILE=os-vpx.xva
 AS_TEMPLATE=
 FROM_TEMPLATE=
@@ -38,7 +37,7 @@
 cat << EOF
 
   Usage: $0 [-f FILE_PATH] [-d DISK_SIZE] [-v BRIDGE_NAME] [-m BRIDGE_NAME] [-p BRIDGE_NAME]
-            [-k PARAMS] [-r RAM] [-i|-c] [-w] [-b] [-l NAME_LABEL] [-t TEMPLATE_NW_INSTALL]
+            [-r RAM] [-i|-c] [-w] [-b] [-l NAME_LABEL] [-t TEMPLATE_NW_INSTALL]
 
   Installs XenServer OpenStack VPX.
 
@@ -57,7 +56,6 @@
                   Defaults to xenbr0.
      -v bridge    Specifies the bridge for the vm network
      -p bridge    Specifies the bridge for the externally facing network.
-     -k params    Specifies kernel parameters.
      -r MiB       Specifies RAM used by the VPX, in MiB.
                   By default it will take the value from the XVA.
      -l name      Specifies the name label for the VM.
@@ -81,15 +79,12 @@
      using the default for management traffic:
             install-os-vpx.sh -m xapi4
 
-     Create a VPX that automatically becomes the master:
-            install-os-vpx.sh -k geppetto_master=true
-
 EOF
 }
 
 get_params()
 {
-  while getopts "hicwbf:d:v:m:p:k:r:l:t:" OPTION;
+  while getopts "hicwbf:d:v:m:p:r:l:t:" OPTION;
   do
     case $OPTION in
       h) usage
@@ -119,9 +114,6 @@
       p)
          BRIDGE_P=$OPTARG
          ;;
-      k)
-         KERNEL_PARAMS=$OPTARG
-         ;;
       r)
          RAM=$OPTARG
          ;;
@@ -235,7 +227,7 @@
 create_vm_vif()
 {
   local v="$1"
-  echo "Installing management interface on $BRIDGE_V."
+  echo "Installing VM interface on $BRIDGE_V."
   local out_network_uuid=$(find_network "$BRIDGE_V")
   create_vif "$v" "$out_network_uuid" "1" >/dev/null
 }
@@ -328,20 +320,6 @@
 }
 
 
-set_kernel_params()
-{
-  local v="$1"
-  local args=$KERNEL_PARAMS
-  if [ "$args" != "" ]
-  then
-    echo "Passing Geppetto args to VPX: $args."
-    pvargs=$(xe vm-param-get param-name=PV-args uuid="$v")
-    args="$pvargs $args"
-    xe vm-param-set PV-args="$args" uuid="$v"
-  fi
-}
-
-
 set_memory()
 {
   local v="$1"
@@ -367,7 +345,6 @@
 set_all()
 {
   local v="$1"
-  set_kernel_params "$v"
   set_memory "$v"
   set_auto_start "$v"
   label_system_disk "$v"
@@ -430,7 +407,6 @@
   create_vm_vif "$vm_uuid"
   create_management_vif "$vm_uuid"
   create_public_vif "$vm_uuid"
-  set_kernel_params "$vm_uuid"
   xe vm-param-set other-config:os-vpx=true uuid="$vm_uuid"
   xe vm-param-set actions-after-reboot=Destroy uuid="$vm_uuid"
   set_memory "$vm_uuid"
diff --git a/tools/xen/scripts/install_ubuntu_template.sh b/tools/xen/scripts/install_ubuntu_template.sh
index 43b6dec..5cbe2ac 100755
--- a/tools/xen/scripts/install_ubuntu_template.sh
+++ b/tools/xen/scripts/install_ubuntu_template.sh
@@ -7,9 +7,8 @@
 # Based on a script by: David Markey <david.markey@citrix.com>
 #
 
-# Exit on errors
 set -o errexit
-# Echo commands
+set -o nounset
 set -o xtrace
 
 # This directory
@@ -38,7 +37,7 @@
 builtin_name="Debian Squeeze 6.0 (32-bit)"
 builtin_uuid=$(xe template-list name-label="$builtin_name" --minimal)
 if [[ -z $builtin_uuid ]]; then
-    echo "Cant find the Debian Squeeze 32bit template on your XenServer."
+    echo "Can't find the Debian Squeeze 32bit template on your XenServer."
     exit 1
 fi
 
@@ -54,11 +53,11 @@
 pvargs="-- quiet console=hvc0 partman/default_filesystem=ext3 \
 console-setup/ask_detect=false locale=${UBUNTU_INST_LOCALE} \
 keyboard-configuration/layoutcode=${UBUNTU_INST_KEYBOARD} \
-netcfg/choose_interface=${HOST_IP_IFACE} \
+netcfg/choose_interface=${UBUNTU_INST_IFACE} \
 netcfg/get_hostname=os netcfg/get_domain=os auto \
 url=${preseed_url}"
 
-if [ "$NETINSTALLIP" != "dhcp" ]; then
+if [ "$UBUNTU_INST_IP" != "dhcp" ]; then
     netcfgargs="netcfg/disable_autoconfig=true \
 netcfg/get_nameservers=${UBUNTU_INST_NAMESERVERS} \
 netcfg/get_ipaddress=${UBUNTU_INST_IP} \
@@ -70,11 +69,16 @@
 
 xe template-param-set uuid=$new_uuid \
     other-config:install-methods=http \
-    other-config:install-repository="$UBUNTU_INST_REPOSITORY" \
+    other-config:install-repository="http://${UBUNTU_INST_HTTP_HOSTNAME}${UBUNTU_INST_HTTP_DIRECTORY}" \
     PV-args="$pvargs" \
     other-config:debian-release="$UBUNTU_INST_RELEASE" \
     other-config:default_template=true \
     other-config:disks='<provision><disk device="0" size="'$disk_size'" sr="" bootable="true" type="system"/></provision>' \
     other-config:install-arch="$UBUNTU_INST_ARCH"
 
+if ! [ -z "$UBUNTU_INST_HTTP_PROXY" ]; then
+    xe template-param-set uuid=$new_uuid \
+        other-config:install-proxy="$UBUNTU_INST_HTTP_PROXY"
+fi
+
 echo "Ubuntu template installed uuid:$new_uuid"
diff --git a/tools/xen/templates/interfaces.in b/tools/xen/templates/interfaces.in
deleted file mode 100644
index 74b41cc..0000000
--- a/tools/xen/templates/interfaces.in
+++ /dev/null
@@ -1,23 +0,0 @@
-auto lo
-iface lo inet loopback
-
-# If eth3 is static, the order should not matter
-# and eth0 will have the default gateway. If not,
-# we probably want the default gateway to be
-# what is on the public interface. Hence changed
-# the order here.
-auto eth3
-iface eth3 inet static
-        address @ETH3_IP@
-        netmask @ETH3_NETMASK@
-
-auto eth1
-iface eth1 inet static
-        address @ETH1_IP@
-        netmask @ETH1_NETMASK@
-post-up ethtool -K eth1 tx off
-
-auto eth2
-iface eth2 inet static
-        address @ETH2_IP@
-        netmask @ETH2_NETMASK@
diff --git a/tools/xen/test_functions.sh b/tools/xen/test_functions.sh
index 6817ec3..410df5f 100755
--- a/tools/xen/test_functions.sh
+++ b/tools/xen/test_functions.sh
@@ -23,15 +23,27 @@
 
     LIST_OF_ACTIONS=$(mktemp)
     truncate -s 0 $LIST_OF_ACTIONS
+
+    XE_RESPONSE=$(mktemp)
+    truncate -s 0 $XE_RESPONSE
+
+    XE_CALLS=$(mktemp)
+    truncate -s 0 $XE_CALLS
 }
 
 # Teardown
 function after_each_test {
     rm -f $LIST_OF_DIRECTORIES
     rm -f $LIST_OF_ACTIONS
+    rm -f $XE_RESPONSE
+    rm -f $XE_CALLS
 }
 
 # Helpers
+function setup_xe_response {
+    echo "$1" > $XE_RESPONSE
+}
+
 function given_directory_exists {
     echo "$1" >> $LIST_OF_DIRECTORIES
 }
@@ -44,6 +56,30 @@
     [ "$?" != "0" ] || exit 1
 }
 
+function assert_xe_min {
+    grep -qe "^--minimal\$" $XE_CALLS
+}
+
+function assert_xe_param {
+    grep -qe "^$1\$" $XE_CALLS
+}
+
+function mock_out {
+    local FNNAME="$1"
+    local OUTPUT="$2"
+
+    . <(cat << EOF
+function $FNNAME {
+    echo "$OUTPUT"
+}
+EOF
+)
+}
+
+function assert_symlink {
+    grep -qe "^ln -s $2 $1\$" $LIST_OF_ACTIONS
+}
+
 # Tests
 function test_plugin_directory_on_xenserver {
     given_directory_exists "/etc/xapi.d/plugins/"
@@ -80,9 +116,49 @@
 }
 
 function test_create_directory_for_kernels {
-    (. mocks && create_directory_for_kernels)
+    (
+        . mocks
+        mock_out get_local_sr uuid1
+        create_directory_for_kernels
+    )
 
-    assert_directory_exists "/boot/guest"
+    assert_directory_exists "/var/run/sr-mount/uuid1/os-guest-kernels"
+    assert_symlink "/boot/guest" "/var/run/sr-mount/uuid1/os-guest-kernels"
+}
+
+function test_create_directory_for_kernels_existing_dir {
+    (
+        . mocks
+        given_directory_exists "/boot/guest"
+        create_directory_for_kernels
+    )
+
+    diff -u $LIST_OF_ACTIONS - << EOF
+[ -d /boot/guest ]
+EOF
+}
+
+function test_create_directory_for_images {
+    (
+        . mocks
+        mock_out get_local_sr uuid1
+        create_directory_for_images
+    )
+
+    assert_directory_exists "/var/run/sr-mount/uuid1/os-images"
+    assert_symlink "/images" "/var/run/sr-mount/uuid1/os-images"
+}
+
+function test_create_directory_for_images_existing_dir {
+    (
+        . mocks
+        given_directory_exists "/images"
+        create_directory_for_images
+    )
+
+    diff -u $LIST_OF_ACTIONS - << EOF
+[ -d /images ]
+EOF
 }
 
 function test_extract_remote_zipball {
@@ -107,6 +183,23 @@
     rm -rf $tmpdir
 }
 
+function test_get_local_sr {
+    setup_xe_response "uuid123"
+
+    local RESULT=$(. mocks && get_local_sr)
+
+    [ "$RESULT" == "uuid123" ]
+
+    assert_xe_min
+    assert_xe_param "sr-list" "name-label=Local storage"
+}
+
+function test_get_local_sr_path {
+    local RESULT=$(mock_out get_local_sr "uuid1" && get_local_sr_path)
+
+    [ "/var/run/sr-mount/uuid1" == "$RESULT" ]
+}
+
 # Test runner
 [ "$1" = "" ] && {
     grep -e "^function *test_" $0 | cut -d" " -f2
diff --git a/tools/xen/xenrc b/tools/xen/xenrc
index e4d8ac9..c343891 100644
--- a/tools/xen/xenrc
+++ b/tools/xen/xenrc
@@ -8,51 +8,56 @@
 # Name of this guest
 GUEST_NAME=${GUEST_NAME:-DevStackOSDomU}
 
+# Template cleanup
+CLEAN_TEMPLATES=${CLEAN_TEMPLATES:-false}
+
 # Size of image
 VDI_MB=${VDI_MB:-5000}
 OSDOMU_MEM_MB=1024
 OSDOMU_VDI_GB=8
 
+# Network mapping. Specify bridge names or network names. Network names may
+# differ across localised versions of XenServer. If a given bridge/network
+# was not found, a new network will be created with the specified name.
+
+# The management network is specified by the bridge name. xenbr0 is usually
+# the name of the bridge of the network associated with the hypervisor's eth0.
+MGT_BRIDGE_OR_NET_NAME="xenbr0"
+VM_BRIDGE_OR_NET_NAME="OpenStack VM Network"
+PUB_BRIDGE_OR_NET_NAME="OpenStack Public Network"
+XEN_INT_BRIDGE_OR_NET_NAME="OpenStack VM Integration Network"
+
 # VM Password
 GUEST_PASSWORD=${GUEST_PASSWORD:-secrete}
 
+# Extracted variables for OpenStack VM network device numbers.
+# Make sure, they form a continous sequence starting from 0
+MGT_DEV_NR=0
+VM_DEV_NR=1
+PUB_DEV_NR=2
+XEN_INT_DEV_NR=3
+
 # Host Interface, i.e. the interface on the nova vm you want to expose the
-# services on. Usually eth2 (management network) or eth3 (public network) and
-# not eth0 (private network with XenServer host) or eth1 (VM traffic network)
-# This is also used as the interface for the Ubuntu install
-HOST_IP_IFACE=${HOST_IP_IFACE:-eth3}
+# services on. Usually the device connected to the management network or the
+# one connected to the public network is used.
+HOST_IP_IFACE=${HOST_IP_IFACE:-"eth${MGT_DEV_NR}"}
 
 #
 # Our nova host's network info
 #
 
-# A host-only ip that let's the interface come up, otherwise unused
+# Management network
+MGT_IP=${MGT_IP:-dhcp}
+MGT_NETMASK=${MGT_NETMASK:-ignored}
+
+# VM Network
 VM_IP=${VM_IP:-10.255.255.255}
-MGT_IP=${MGT_IP:-172.16.100.55}
-PUB_IP=${PUB_IP:-192.168.1.55}
+VM_NETMASK=${VM_NETMASK:-255.255.255.0}
 
 # Public network
+# Aligned with stack.sh - see FLOATING_RANGE
+PUB_IP=${PUB_IP:-172.24.4.10}
 PUB_NETMASK=${PUB_NETMASK:-255.255.255.0}
-PUB_BR=${PUB_BR:-"xenbr0"}
-PUB_VLAN=${PUB_VLAN:--1}
-PUB_DEV=${PUB_DEV:-eth0}
-
-# VM network params
-VM_NETMASK=${VM_NETMASK:-255.255.255.0}
-VM_BR=${VM_BR:-""}
-VM_VLAN=${VM_VLAN:-100}
-VM_DEV=${VM_DEV:-eth0}
-
-# MGMT network params
-MGT_NETMASK=${MGT_NETMASK:-255.255.255.0}
-MGT_BR=${MGT_BR:-""}
-MGT_VLAN=${MGT_VLAN:-101}
-MGT_DEV=${MGT_DEV:-eth0}
-
-# Decide if you should enable eth0,
-# the guest installer network
-# You need to disable this on xcp-xapi on Ubuntu 12.04
-ENABLE_GI=true
 
 # Ubuntu install settings
 UBUNTU_INST_RELEASE="oneiric"
@@ -62,15 +67,29 @@
 # XenServer 6.1 and later or XCP 1.6 or later
 # 11.10 is only really supported with XenServer 6.0.2 and later
 UBUNTU_INST_ARCH="amd64"
-UBUNTU_INST_REPOSITORY="http://archive.ubuntu.net/ubuntu"
+UBUNTU_INST_HTTP_HOSTNAME="archive.ubuntu.net"
+UBUNTU_INST_HTTP_DIRECTORY="/ubuntu"
+UBUNTU_INST_HTTP_PROXY=""
 UBUNTU_INST_LOCALE="en_US"
 UBUNTU_INST_KEYBOARD="us"
-# network configuration for HOST_IP_IFACE during install
+# network configuration for ubuntu netinstall.
+# TODO(matelakat): get rid of legacy network interfaces
+# specify "eth2" to use the management network
+# specify "eth3" to use the public network
+UBUNTU_INST_IFACE="eth2"
 UBUNTU_INST_IP="dhcp"
 UBUNTU_INST_NAMESERVERS=""
 UBUNTU_INST_NETMASK=""
 UBUNTU_INST_GATEWAY=""
 
-# Load stackrc defaults
-# then override with settings from localrc
-cd ../.. && source ./stackrc && cd $TOP_DIR
+# Create a separate xvdb. Tis could be used as a backing device for cinder
+# volumes. Specify
+#   XEN_XVDB_SIZE_GB=10
+#   VOLUME_BACKING_DEVICE=/dev/xvdb
+# in your localrc to avoid kernel lockups:
+#   https://bugs.launchpad.net/cinder/+bug/1023755
+#
+# Set the size to 0 to avoid creation of additional disk.
+XEN_XVDB_SIZE_GB=0
+
+source ../../stackrc
diff --git a/unstack.sh b/unstack.sh
index 3ac2985..d1d0349 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -109,4 +109,5 @@
 if is_service_enabled quantum; then
     stop_quantum
     stop_quantum_third_party
+    cleanup_quantum
 fi