Merge "Makes error message easier to understand."
diff --git a/README.md b/README.md
index 9914b1e..89e3855 100644
--- a/README.md
+++ b/README.md
@@ -73,7 +73,7 @@
 This is a recent change (Oct 2013) from the previous behaviour of
 automatically creating a ``stack`` user.  Automatically creating
 user accounts is not the right response to running as root, so
-that bit is now an explicit step using ``tools/create-stack-user.sh``. 
+that bit is now an explicit step using ``tools/create-stack-user.sh``.
 Run that (as root!) or just check it out to see what DevStack's
 expectations are for the account it runs under.  Many people simply
 use their usual login (the default 'ubuntu' login on a UEC image
@@ -163,7 +163,7 @@
 Basic Setup
 
 In order to enable Neutron a single node setup, you'll need the
-following settings in your `localrc` section:
+following settings in your `local.conf`:
 
     disable_service n-net
     enable_service q-svc
@@ -172,7 +172,6 @@
     enable_service q-l3
     enable_service q-meta
     enable_service q-metering
-    enable_service neutron
     # Optional, to enable tempest configuration as part of DevStack
     enable_service tempest
 
@@ -180,24 +179,44 @@
 
 DevStack supports setting specific Neutron configuration flags to the
 service, Open vSwitch plugin and LinuxBridge plugin configuration files.
-To make use of this feature, the following variables are defined and can
-be configured in your `localrc` section:
+To make use of this feature, the settings can be added to ``local.conf``.
+The old ``Q_XXX_EXTRA_XXX_OPTS`` variables are deprecated and will be removed
+in the near future.  The ``local.conf`` headers for the replacements are:
 
-    Variable Name             Config File  Section Modified
-    -------------------------------------------------------------------------------------
-    Q_SRV_EXTRA_OPTS          Plugin       `OVS` (for Open Vswitch) or `LINUX_BRIDGE` (for LinuxBridge)
-    Q_AGENT_EXTRA_AGENT_OPTS  Plugin       AGENT
-    Q_AGENT_EXTRA_SRV_OPTS    Plugin       `OVS` (for Open Vswitch) or `LINUX_BRIDGE` (for LinuxBridge)
-    Q_SRV_EXTRA_DEFAULT_OPTS  Service      DEFAULT
+* ``Q_SRV_EXTRA_OPTS``:
 
-An example of using the variables in your `localrc` section is below:
+    [[post-config|/$Q_PLUGIN_CONF_FILE]]
+    [linuxbridge]   # or [ovs]
 
-    Q_AGENT_EXTRA_AGENT_OPTS=(tunnel_type=vxlan vxlan_udp_port=8472)
-    Q_SRV_EXTRA_OPTS=(tenant_network_type=vxlan)
+* ``Q_AGENT_EXTRA_AGENT_OPTS``:
+
+    [[post-config|/$Q_PLUGIN_CONF_FILE]]
+    [agent]
+
+* ``Q_AGENT_EXTRA_SRV_OPTS``:
+
+    [[post-config|/$Q_PLUGIN_CONF_FILE]]
+    [linuxbridge]   # or [ovs]
+
+* ``Q_SRV_EXTRA_DEFAULT_OPTS``:
+
+    [[post-config|$NEUTRON_CONF]]
+    [DEFAULT]
+
+Example extra config in `local.conf`:
+
+    [[post-config|/$Q_PLUGIN_CONF_FILE]]
+    [agent]
+    tunnel_type=vxlan
+    vxlan_udp_port=8472
+
+    [[post-config|$NEUTRON_CONF]]
+    [DEFAULT]
+    tenant_network_type=vxlan
 
 DevStack also supports configuring the Neutron ML2 plugin. The ML2 plugin
-can run with the OVS, LinuxBridge, or Hyper-V agents on compute hosts. A
-simple way to configure the ml2 plugin is shown below:
+can run with the OVS, LinuxBridge, or Hyper-V agents on compute hosts. This
+is a simple way to configure the ml2 plugin:
 
     # VLAN configuration
     Q_PLUGIN=ml2
@@ -223,7 +242,6 @@
     Q_ML2_PLUGIN_GRE_TYPE_OPTIONS    GRE TypeDriver options. Defaults to none.
     Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS  VXLAN TypeDriver options. Defaults to none.
     Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS   VLAN TypeDriver options. Defaults to none.
-    Q_AGENT_EXTRA_AGENT_OPTS         Extra configuration options to pass to the OVS or LinuxBridge Agent.
 
 # Heat
 
@@ -253,10 +271,6 @@
 
 If you would like to use Xenserver as the hypervisor, please refer to the instructions in `./tools/xen/README.md`.
 
-# DevStack on Docker
-
-If you would like to use Docker as the hypervisor, please refer to the instructions in `./tools/docker/README.md`.
-
 # Additional Projects
 
 DevStack has a hook mechanism to call out to a dispatch script at specific
diff --git a/driver_certs/cinder_driver_cert.sh b/driver_certs/cinder_driver_cert.sh
index d2c636f..7726e7e 100755
--- a/driver_certs/cinder_driver_cert.sh
+++ b/driver_certs/cinder_driver_cert.sh
@@ -19,6 +19,8 @@
 #
 #     SCREEN_LOGDIR=/opt/stack/screen-logs
 
+set -o pipefail
+
 CERT_DIR=$(cd $(dirname "$0") && pwd)
 TOP_DIR=$(cd $CERT_DIR/..; pwd)
 
diff --git a/eucarc b/eucarc
index 3502351..343f4cc 100644
--- a/eucarc
+++ b/eucarc
@@ -22,7 +22,7 @@
 export EC2_URL=$(keystone catalog --service ec2 | awk '/ publicURL / { print $4 }')
 
 # Create EC2 credentials for the current user
-CREDS=$(keystone ec2-credentials-create)
+CREDS=$(openstack ec2 credentials create)
 export EC2_ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }')
 export EC2_SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }')
 
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
index f679669..d756685 100755
--- a/exercises/boot_from_volume.sh
+++ b/exercises/boot_from_volume.sh
@@ -44,8 +44,8 @@
 # the exercise is skipped
 is_service_enabled cinder || exit 55
 
-# Also skip if the hypervisor is Docker
-[[ "$VIRT_DRIVER" == "docker" ]] && exit 55
+# Ironic does not support boot from volume.
+[ "$VIRT_DRIVER" == "ironic" ] && exit 55
 
 # Instance type to create
 DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
diff --git a/exercises/euca.sh b/exercises/euca.sh
index ad852a4..f9c4752 100755
--- a/exercises/euca.sh
+++ b/exercises/euca.sh
@@ -36,13 +36,13 @@
 # Import exercise configuration
 source $TOP_DIR/exerciserc
 
+# Import project functions
+source $TOP_DIR/lib/neutron
+
 # If nova api is not enabled we exit with exitcode 55 so that
 # the exercise is skipped
 is_service_enabled n-api || exit 55
 
-# Skip if the hypervisor is Docker
-[[ "$VIRT_DRIVER" == "docker" ]] && exit 55
-
 # Instance type to create
 DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
 
@@ -85,7 +85,7 @@
 
 # Volumes
 # -------
-if is_service_enabled c-vol && ! is_service_enabled n-cell; then
+if is_service_enabled c-vol && ! is_service_enabled n-cell && [ "$VIRT_DRIVER" != "ironic" ]; then
     VOLUME_ZONE=`euca-describe-availability-zones | head -n1 | cut -f2`
     die_if_not_set $LINENO VOLUME_ZONE "Failure to find zone for volume"
 
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index 8b7b961..7e90e5a 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -40,9 +40,6 @@
 # the exercise is skipped
 is_service_enabled n-api || exit 55
 
-# Skip if the hypervisor is Docker
-[[ "$VIRT_DRIVER" == "docker" ]] && exit 55
-
 # Instance type to create
 DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
 
@@ -183,7 +180,7 @@
 fi
 
 # FIXME (anthony): make xs support security groups
-if [ "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then
+if [ "$VIRT_DRIVER" != "ironic" -a "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then
     # Test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds
     ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT Fail
 fi
diff --git a/exercises/savanna.sh b/exercises/sahara.sh
similarity index 88%
rename from exercises/savanna.sh
rename to exercises/sahara.sh
index fc3f976..867920e 100755
--- a/exercises/savanna.sh
+++ b/exercises/sahara.sh
@@ -1,8 +1,8 @@
 #!/usr/bin/env bash
 
-# **savanna.sh**
+# **sahara.sh**
 
-# Sanity check that Savanna started if enabled
+# Sanity check that Sahara started if enabled
 
 echo "*********************************************************************"
 echo "Begin DevStack Exercise: $0"
@@ -33,9 +33,9 @@
 # Import exercise configuration
 source $TOP_DIR/exerciserc
 
-is_service_enabled savanna || exit 55
+is_service_enabled sahara || exit 55
 
-curl http://$SERVICE_HOST:8386/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Savanna API not functioning!"
+curl http://$SERVICE_HOST:8386/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Sahara API isn't functioning!"
 
 set +o xtrace
 echo "*********************************************************************"
diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh
index d71a1e0..5f8b0a4 100755
--- a/exercises/sec_groups.sh
+++ b/exercises/sec_groups.sh
@@ -37,9 +37,6 @@
 # the exercise is skipped
 is_service_enabled n-api || exit 55
 
-# Skip if the hypervisor is Docker
-[[ "$VIRT_DRIVER" == "docker" ]] && exit 55
-
 
 # Testing Security Groups
 # =======================
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index 83d25c7..1dff6a4 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -41,8 +41,8 @@
 # exercise is skipped.
 is_service_enabled cinder || exit 55
 
-# Also skip if the hypervisor is Docker
-[[ "$VIRT_DRIVER" == "docker" ]] && exit 55
+# Ironic does not currently support volume attachment.
+[ "$VIRT_DRIVER" == "ironic" ] && exit 55
 
 # Instance type to create
 DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
diff --git a/extras.d/50-ironic.sh b/extras.d/50-ironic.sh
index 9e61dc5..3b8e3d5 100644
--- a/extras.d/50-ironic.sh
+++ b/extras.d/50-ironic.sh
@@ -24,10 +24,17 @@
         # Start the ironic API and ironic taskmgr components
         echo_summary "Starting Ironic"
         start_ironic
+
+        if [[ "$IRONIC_BAREMETAL_BASIC_OPS" = "True" ]]; then
+            prepare_baremetal_basic_ops
+        fi
     fi
 
     if [[ "$1" == "unstack" ]]; then
         stop_ironic
+        if [[ "$IRONIC_BAREMETAL_BASIC_OPS" = "True" ]]; then
+            cleanup_baremetal_basic_ops
+        fi
     fi
 
     if [[ "$1" == "clean" ]]; then
diff --git a/extras.d/70-sahara.sh b/extras.d/70-sahara.sh
new file mode 100644
index 0000000..80e07ff
--- /dev/null
+++ b/extras.d/70-sahara.sh
@@ -0,0 +1,37 @@
+# sahara.sh - DevStack extras script to install Sahara
+
+if is_service_enabled sahara; then
+    if [[ "$1" == "source" ]]; then
+        # Initial source
+        source $TOP_DIR/lib/sahara
+        source $TOP_DIR/lib/sahara-dashboard
+    elif [[ "$1" == "stack" && "$2" == "install" ]]; then
+        echo_summary "Installing sahara"
+        install_sahara
+        cleanup_sahara
+        if is_service_enabled horizon; then
+            install_sahara_dashboard
+        fi
+    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
+        echo_summary "Configuring sahara"
+        configure_sahara
+        create_sahara_accounts
+        if is_service_enabled horizon; then
+            configure_sahara_dashboard
+        fi
+    elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
+        echo_summary "Initializing sahara"
+        start_sahara
+    fi
+
+    if [[ "$1" == "unstack" ]]; then
+        stop_sahara
+        if is_service_enabled horizon; then
+            cleanup_sahara_dashboard
+        fi
+    fi
+
+    if [[ "$1" == "clean" ]]; then
+        cleanup_sahara
+    fi
+fi
diff --git a/extras.d/70-savanna.sh b/extras.d/70-savanna.sh
deleted file mode 100644
index edc1376..0000000
--- a/extras.d/70-savanna.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-# savanna.sh - DevStack extras script to install Savanna
-
-if is_service_enabled savanna; then
-    if [[ "$1" == "source" ]]; then
-        # Initial source
-        source $TOP_DIR/lib/savanna
-        source $TOP_DIR/lib/savanna-dashboard
-    elif [[ "$1" == "stack" && "$2" == "install" ]]; then
-        echo_summary "Installing Savanna"
-        install_savanna
-        cleanup_savanna
-        if is_service_enabled horizon; then
-            install_savanna_dashboard
-        fi
-    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
-        echo_summary "Configuring Savanna"
-        configure_savanna
-        create_savanna_accounts
-        if is_service_enabled horizon; then
-            configure_savanna_dashboard
-        fi
-    elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
-        echo_summary "Initializing Savanna"
-        start_savanna
-    fi
-
-    if [[ "$1" == "unstack" ]]; then
-        stop_savanna
-        if is_service_enabled horizon; then
-            cleanup_savanna_dashboard
-        fi
-    fi
-
-    if [[ "$1" == "clean" ]]; then
-        cleanup_savanna
-    fi
-fi
diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh
index 0186e36..74f4c60 100644
--- a/extras.d/80-tempest.sh
+++ b/extras.d/80-tempest.sh
@@ -9,7 +9,7 @@
         install_tempest
     elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
         # Tempest config must come after layer 2 services are running
-        :
+        create_tempest_accounts
     elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
         echo_summary "Initializing Tempest"
         configure_tempest
diff --git a/files/apts/ceilometer-collector b/files/apts/ceilometer-collector
index 71007ba..f1b692a 100644
--- a/files/apts/ceilometer-collector
+++ b/files/apts/ceilometer-collector
@@ -1,5 +1,5 @@
-python-pymongo
-mongodb-server
+python-pymongo #NOPRIME
+mongodb-server #NOPRIME
 libnspr4-dev
 pkg-config
 libxml2-dev
diff --git a/files/apts/cinder b/files/apts/cinder
index 712fee9..7819c31 100644
--- a/files/apts/cinder
+++ b/files/apts/cinder
@@ -2,6 +2,5 @@
 lvm2
 qemu-utils
 libpq-dev
-python-dev
 open-iscsi
 open-iscsi-utils # Deprecated since quantal dist:precise
diff --git a/files/apts/general b/files/apts/general
index 995c0c6..020d84f 100644
--- a/files/apts/general
+++ b/files/apts/general
@@ -18,5 +18,6 @@
 euca2ools # only for testing client
 tar
 python-cmd2 # dist:precise
+python-dev
 python2.7
 bc
diff --git a/files/apts/glance b/files/apts/glance
index 6dc878e..e80f447 100644
--- a/files/apts/glance
+++ b/files/apts/glance
@@ -5,7 +5,6 @@
 libssl-dev          # testonly
 libxml2-dev
 libxslt1-dev        # testonly
-python-dev
 python-eventlet
 python-routes
 python-greenlet
diff --git a/files/apts/ironic b/files/apts/ironic
new file mode 100644
index 0000000..a749ad7
--- /dev/null
+++ b/files/apts/ironic
@@ -0,0 +1,10 @@
+libguestfs0
+libvirt-bin
+openssh-client
+openvswitch-switch
+openvswitch-datapath-dkms
+python-libguestfs
+python-libvirt
+syslinux
+tftpd-hpa
+xinetd
diff --git a/files/apts/keystone b/files/apts/keystone
index 564921b..57fde80 100644
--- a/files/apts/keystone
+++ b/files/apts/keystone
@@ -1,4 +1,3 @@
-python-dev
 python-lxml
 python-pastescript
 python-pastedeploy
diff --git a/files/apts/nova b/files/apts/nova
index ae925c3..69d0a35 100644
--- a/files/apts/nova
+++ b/files/apts/nova
@@ -12,7 +12,7 @@
 ebtables
 sqlite3
 sudo
-kvm # NOPRIME
+qemu-kvm # NOPRIME
 qemu # dist:wheezy,jessie NOPRIME
 libvirt-bin # NOPRIME
 libjs-jquery-tablesorter # Needed for coverage html reports
@@ -25,7 +25,6 @@
 python-mox
 python-paste
 python-migrate
-python-gflags
 python-greenlet
 python-libvirt # NOPRIME
 python-libxml2
@@ -34,7 +33,6 @@
 python-pastedeploy
 python-eventlet
 python-cheetah
-python-carrot
 python-tempita
 python-sqlalchemy
 python-suds
diff --git a/files/apts/ryu b/files/apts/ryu
index e8ed926..9b85080 100644
--- a/files/apts/ryu
+++ b/files/apts/ryu
@@ -1,4 +1,2 @@
-python-gevent
-python-gflags
-python-netifaces
+python-eventlet
 python-sphinx
diff --git a/files/apts/swift b/files/apts/swift
index 37d5bc0..0c27b5b 100644
--- a/files/apts/swift
+++ b/files/apts/swift
@@ -4,7 +4,6 @@
 memcached
 python-configobj
 python-coverage
-python-dev
 python-eventlet
 python-greenlet
 python-netifaces
diff --git a/files/keystone_data.sh b/files/keystone_data.sh
deleted file mode 100755
index fc1e813..0000000
--- a/files/keystone_data.sh
+++ /dev/null
@@ -1,146 +0,0 @@
-#!/bin/bash
-#
-# Initial data for Keystone using python-keystoneclient
-#
-# Tenant               User         Roles
-# ------------------------------------------------------------------
-# service              glance       service
-# service              glance-swift ResellerAdmin
-# service              heat         service        # if enabled
-# service              ceilometer   admin          # if enabled
-# Tempest Only:
-# alt_demo             alt_demo     Member
-#
-# Variables set before calling this script:
-# SERVICE_TOKEN - aka admin_token in keystone.conf
-# SERVICE_ENDPOINT - local Keystone admin endpoint
-# SERVICE_TENANT_NAME - name of tenant containing service accounts
-# SERVICE_HOST - host used for endpoint creation
-# ENABLED_SERVICES - stack.sh's list of services to start
-# DEVSTACK_DIR - Top-level DevStack directory
-# KEYSTONE_CATALOG_BACKEND - used to determine service catalog creation
-
-# Defaults
-# --------
-
-ADMIN_PASSWORD=${ADMIN_PASSWORD:-secrete}
-SERVICE_PASSWORD=${SERVICE_PASSWORD:-$ADMIN_PASSWORD}
-export SERVICE_TOKEN=$SERVICE_TOKEN
-export SERVICE_ENDPOINT=$SERVICE_ENDPOINT
-SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service}
-
-# Roles
-# -----
-
-# The ResellerAdmin role is used by Nova and Ceilometer so we need to keep it.
-# The admin role in swift allows a user to act as an admin for their tenant,
-# but ResellerAdmin is needed for a user to act as any tenant. The name of this
-# role is also configurable in swift-proxy.conf
-keystone role-create --name=ResellerAdmin
-# Service role, so service users do not have to be admins
-keystone role-create --name=service
-
-
-# Services
-# --------
-
-if [[ "$ENABLED_SERVICES" =~ "n-api" ]] && [[ "$ENABLED_SERVICES" =~ "s-proxy" || "$ENABLED_SERVICES" =~ "swift" ]]; then
-    # Nova needs ResellerAdmin role to download images when accessing
-    # swift through the s3 api.
-    keystone user-role-add \
-        --tenant $SERVICE_TENANT_NAME \
-        --user nova \
-        --role ResellerAdmin
-fi
-
-# Glance
-if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then
-    keystone user-create \
-        --name=glance \
-        --pass="$SERVICE_PASSWORD" \
-        --tenant $SERVICE_TENANT_NAME \
-        --email=glance@example.com
-    keystone user-role-add \
-        --tenant $SERVICE_TENANT_NAME \
-        --user glance \
-        --role service
-    # required for swift access
-    if [[ "$ENABLED_SERVICES" =~ "s-proxy" ]]; then
-        keystone user-create \
-            --name=glance-swift \
-            --pass="$SERVICE_PASSWORD" \
-            --tenant $SERVICE_TENANT_NAME \
-            --email=glance-swift@example.com
-        keystone user-role-add \
-            --tenant $SERVICE_TENANT_NAME \
-            --user glance-swift \
-            --role ResellerAdmin
-    fi
-    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-        keystone service-create \
-            --name=glance \
-            --type=image \
-            --description="Glance Image Service"
-        keystone endpoint-create \
-            --region RegionOne \
-            --service glance \
-            --publicurl "http://$SERVICE_HOST:9292" \
-            --adminurl "http://$SERVICE_HOST:9292" \
-            --internalurl "http://$SERVICE_HOST:9292"
-    fi
-fi
-
-# Ceilometer
-if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]] && [[ "$ENABLED_SERVICES" =~ "s-proxy" || "$ENABLED_SERVICES" =~ "swift" ]]; then
-    # Ceilometer needs ResellerAdmin role to access swift account stats.
-    keystone user-role-add --tenant $SERVICE_TENANT_NAME \
-        --user ceilometer \
-        --role ResellerAdmin
-fi
-
-# EC2
-if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
-    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-        keystone service-create \
-            --name=ec2 \
-            --type=ec2 \
-            --description="EC2 Compatibility Layer"
-        keystone endpoint-create \
-            --region RegionOne \
-            --service ec2 \
-            --publicurl "http://$SERVICE_HOST:8773/services/Cloud" \
-            --adminurl "http://$SERVICE_HOST:8773/services/Admin" \
-            --internalurl "http://$SERVICE_HOST:8773/services/Cloud"
-    fi
-fi
-
-# S3
-if [[ "$ENABLED_SERVICES" =~ "n-obj" || "$ENABLED_SERVICES" =~ "swift3" ]]; then
-    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-        keystone service-create \
-            --name=s3 \
-            --type=s3 \
-            --description="S3"
-        keystone endpoint-create \
-            --region RegionOne \
-            --service s3 \
-            --publicurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \
-            --adminurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \
-            --internalurl "http://$SERVICE_HOST:$S3_SERVICE_PORT"
-    fi
-fi
-
-if [[ "$ENABLED_SERVICES" =~ "tempest" ]]; then
-    # Tempest has some tests that validate various authorization checks
-    # between two regular users in separate tenants
-    keystone tenant-create \
-        --name=alt_demo
-    keystone user-create \
-        --name=alt_demo \
-        --pass="$ADMIN_PASSWORD" \
-        --email=alt_demo@example.com
-    keystone user-role-add \
-        --tenant alt_demo \
-        --user alt_demo \
-        --role Member
-fi
diff --git a/files/rpms-suse/glance b/files/rpms-suse/glance
index dd68ac0..d9844e9 100644
--- a/files/rpms-suse/glance
+++ b/files/rpms-suse/glance
@@ -8,5 +8,6 @@
 python-eventlet
 python-greenlet
 python-iso8601
+python-pyOpenSSL
 python-wsgiref
 python-xattr
diff --git a/files/rpms-suse/neutron b/files/rpms-suse/neutron
index e9ccf59..462513d 100644
--- a/files/rpms-suse/neutron
+++ b/files/rpms-suse/neutron
@@ -3,7 +3,7 @@
 ebtables
 iptables
 iputils
-mysql-community-server # NOPRIME
+mariadb # NOPRIME
 python-boto
 python-eventlet
 python-greenlet
diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova
index ee4917d..c5a58b9 100644
--- a/files/rpms-suse/nova
+++ b/files/rpms-suse/nova
@@ -12,8 +12,7 @@
 qemu # NOPRIME
 libvirt # NOPRIME
 libvirt-python # NOPRIME
-libxml2-python
-mysql-community-server # NOPRIME
+mariadb # NOPRIME
 parted
 polkit
 python-M2Crypto
@@ -24,20 +23,19 @@
 python-SQLAlchemy
 python-Tempita
 python-boto
-python-carrot
 python-cheetah
 python-eventlet
 python-feedparser
 python-greenlet
 python-iso8601
 python-kombu
+python-libxml2
 python-lockfile
 python-lxml # needed for glance which is needed for nova --- this shouldn't be here
 python-mox
 python-mysql
 python-numpy # needed by websockify for spice console
 python-paramiko
-python-python-gflags
 python-sqlalchemy-migrate
 python-suds
 python-xattr # needed for glance which is needed for nova --- this shouldn't be here
diff --git a/files/rpms-suse/ryu b/files/rpms-suse/ryu
index 3797b6c..6b426fb 100644
--- a/files/rpms-suse/ryu
+++ b/files/rpms-suse/ryu
@@ -1,4 +1,2 @@
 python-Sphinx
-python-gevent
-python-netifaces
-python-python-gflags
+python-eventlet
diff --git a/files/rpms/ceilometer-collector b/files/rpms/ceilometer-collector
index c91bac3..9cf580d 100644
--- a/files/rpms/ceilometer-collector
+++ b/files/rpms/ceilometer-collector
@@ -1,4 +1,4 @@
 selinux-policy-targeted
-mongodb-server
-pymongo
+mongodb-server #NOPRIME
+pymongo # NOPRIME
 mongodb # NOPRIME
diff --git a/files/rpms/cinder b/files/rpms/cinder
index 199ae10..ce6181e 100644
--- a/files/rpms/cinder
+++ b/files/rpms/cinder
@@ -1,7 +1,6 @@
 lvm2
 scsi-target-utils
 qemu-img
-python-devel
 postgresql-devel
 iscsi-initiator-utils
-python-lxml         #dist:f18,f19,f20,rhel7
+python-lxml         #dist:f19,f20,rhel7
diff --git a/files/rpms/general b/files/rpms/general
index 6cfe31e..99be725 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -15,6 +15,7 @@
 python-prettytable # dist:rhel6 [1]
 python-unittest2
 python-virtualenv
+python-devel
 screen
 tar
 tcpdump
diff --git a/files/rpms/glance b/files/rpms/glance
index 534097a..f959c22 100644
--- a/files/rpms/glance
+++ b/files/rpms/glance
@@ -6,11 +6,10 @@
 openssl-devel       # testonly
 postgresql-devel    # testonly
 python-argparse
-python-devel
 python-eventlet
 python-greenlet
-python-lxml         #dist:f18,f19,f20,rhel7
-python-paste-deploy #dist:f18,f19,f20,rhel7
+python-lxml         #dist:f19,f20,rhel7
+python-paste-deploy #dist:f19,f20,rhel7
 python-routes
 python-sqlalchemy
 python-wsgiref      #dist:f18,f19,f20
diff --git a/files/rpms/horizon b/files/rpms/horizon
index 59503cc..2dd24e0 100644
--- a/files/rpms/horizon
+++ b/files/rpms/horizon
@@ -16,8 +16,8 @@
 python-migrate
 python-mox
 python-nose
-python-paste        #dist:f18,f19,f20
-python-paste-deploy #dist:f18,f19,f20
+python-paste        #dist:f19,f20
+python-paste-deploy #dist:f19,f20
 python-routes
 python-sphinx
 python-sqlalchemy
diff --git a/files/rpms/ironic b/files/rpms/ironic
new file mode 100644
index 0000000..54b9829
--- /dev/null
+++ b/files/rpms/ironic
@@ -0,0 +1,9 @@
+libguestfs
+libvirt
+libvirt-python
+openssh-clients
+openvswitch
+python-libguestfs
+syslinux
+tftp-server
+xinetd
diff --git a/files/rpms/keystone b/files/rpms/keystone
index 99e8524..7182091 100644
--- a/files/rpms/keystone
+++ b/files/rpms/keystone
@@ -1,9 +1,9 @@
 python-greenlet
 libxslt-devel       # dist:f20
-python-lxml         #dist:f18,f19,f20
-python-paste        #dist:f18,f19,f20
-python-paste-deploy #dist:f18,f19,f20
-python-paste-script #dist:f18,f19,f20
+python-lxml         #dist:f19,f20
+python-paste        #dist:f19,f20
+python-paste-deploy #dist:f19,f20
+python-paste-script #dist:f19,f20
 python-routes
 python-sqlalchemy
 python-webob
diff --git a/files/rpms/neutron b/files/rpms/neutron
index 42d7f68..9fafecb 100644
--- a/files/rpms/neutron
+++ b/files/rpms/neutron
@@ -11,8 +11,8 @@
 python-iso8601
 python-kombu
 #rhel6 gets via pip
-python-paste        # dist:f18,f19,f20,rhel7
-python-paste-deploy # dist:f18,f19,f20,rhel7
+python-paste        # dist:f19,f20,rhel7
+python-paste-deploy # dist:f19,f20,rhel7
 python-qpid
 python-routes
 python-sqlalchemy
@@ -21,4 +21,3 @@
 qpid-cpp-server        # NOPRIME
 sqlite
 sudo
-vconfig
diff --git a/files/rpms/nova b/files/rpms/nova
index a607d92..e05d0d7 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -17,22 +17,20 @@
 parted
 polkit
 python-boto
-python-carrot
 python-cheetah
 python-eventlet
 python-feedparser
-python-gflags
 python-greenlet
 python-iso8601
 python-kombu
 python-lockfile
 python-migrate
 python-mox
-python-paramiko # dist:f18,f19,f20,rhel7
+python-paramiko # dist:f19,f20,rhel7
 # ^ on RHEL6, brings in python-crypto which conflicts with version from
 # pip we need
-python-paste        # dist:f18,f19,f20,rhel7
-python-paste-deploy # dist:f18,f19,f20,rhel7
+python-paste        # dist:f19,f20,rhel7
+python-paste-deploy # dist:f19,f20,rhel7
 python-qpid
 python-routes
 python-sqlalchemy
@@ -42,4 +40,3 @@
 qpid-cpp-server # NOPRIME
 sqlite
 sudo
-vconfig
diff --git a/files/rpms/ryu b/files/rpms/ryu
index e8ed926..9b85080 100644
--- a/files/rpms/ryu
+++ b/files/rpms/ryu
@@ -1,4 +1,2 @@
-python-gevent
-python-gflags
-python-netifaces
+python-eventlet
 python-sphinx
diff --git a/files/rpms/swift b/files/rpms/swift
index 72253f7..ec53424 100644
--- a/files/rpms/swift
+++ b/files/rpms/swift
@@ -4,12 +4,11 @@
 memcached
 python-configobj
 python-coverage
-python-devel
 python-eventlet
 python-greenlet
 python-netifaces
 python-nose
-python-paste-deploy # dist:f18,f19,f20,rhel7
+python-paste-deploy # dist:f19,f20,rhel7
 python-simplejson
 python-webob
 pyxattr
diff --git a/functions b/functions
index 1d30922..80f98ad 100644
--- a/functions
+++ b/functions
@@ -122,7 +122,7 @@
             flat_fname="$(head -25 $IMAGE | { grep -G 'RW\|RDONLY [0-9]+ FLAT\|VMFS' $IMAGE || true; })"
             flat_fname="${flat_fname#*\"}"
             flat_fname="${flat_fname%?}"
-            if [[ -z "$flat_name" ]]; then
+            if [[ -z "$flat_fname" ]]; then
                 flat_fname="$IMAGE_NAME-flat.vmdk"
             fi
             path_len=`expr ${#image_url} - ${#IMAGE_FNAME}`
@@ -133,27 +133,16 @@
                 if [[ ! -f $FILES/$flat_fname || \
                 "$(stat -c "%s" $FILES/$flat_fname)" = "0" ]]; then
                     wget -c $flat_url -O $FILES/$flat_fname
-                    if [[ $? -ne 0 ]]; then
-                        echo "Flat disk not found: $flat_url"
-                        flat_found=false
-                    fi
                 fi
-                if $flat_found; then
-                    IMAGE="$FILES/${flat_fname}"
-                fi
+                IMAGE="$FILES/${flat_fname}"
             else
                 IMAGE=$(echo $flat_url | sed "s/^file:\/\///g")
                 if [[ ! -f $IMAGE || "$(stat -c "%s" $IMAGE)" == "0" ]]; then
                     echo "Flat disk not found: $flat_url"
-                    flat_found=false
-                fi
-                if ! $flat_found; then
-                    IMAGE=$(echo $image_url | sed "s/^file:\/\///g")
+                    return 1
                 fi
             fi
-            if $flat_found; then
-                IMAGE_NAME="${flat_fname}"
-            fi
+            IMAGE_NAME="${flat_fname}"
             vmdk_disktype="preallocated"
         elif [[ "$vmdk_create_type" = "streamOptimized" ]]; then
             vmdk_disktype="streamOptimized"
@@ -163,33 +152,27 @@
             if [[ ${IMAGE_NAME: -5} != "-flat" ]]; then
                 warn $LINENO "Expected filename suffix: '-flat'."`
                             `" Filename provided: ${IMAGE_NAME}"
-            fi
-
-            descriptor_fname="${IMAGE_NAME:0:${#IMAGE_NAME} - 5}.vmdk"
-            path_len=`expr ${#image_url} - ${#IMAGE_FNAME}`
-            flat_path="${image_url:0:$path_len}"
-            descriptor_url=$flat_path$descriptor_fname
-            warn $LINENO "$descriptor_data_pair_msg"`
-                            `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url"
-            if [[ $flat_path != file* ]]; then
-                if [[ ! -f $FILES/$descriptor_fname || \
-                "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then
-                    wget -c $descriptor_url -O $FILES/$descriptor_fname
-                    if [[ $? -ne 0 ]]; then
-                        warn $LINENO "Descriptor not found $descriptor_url"
-                        descriptor_found=false
+            else
+                descriptor_fname="${IMAGE_NAME:0:${#IMAGE_NAME} - 5}.vmdk"
+                path_len=`expr ${#image_url} - ${#IMAGE_FNAME}`
+                flat_path="${image_url:0:$path_len}"
+                descriptor_url=$flat_path$descriptor_fname
+                warn $LINENO "$descriptor_data_pair_msg"`
+                                `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url"
+                if [[ $flat_path != file* ]]; then
+                    if [[ ! -f $FILES/$descriptor_fname || \
+                    "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then
+                        wget -c $descriptor_url -O $FILES/$descriptor_fname
+                    fi
+                    descriptor_url="$FILES/$descriptor_fname"
+                else
+                    descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g")
+                    if [[ ! -f $descriptor_url || \
+                    "$(stat -c "%s" $descriptor_url)" == "0" ]]; then
+                        echo "Descriptor not found: $descriptor_url"
+                        return 1
                     fi
                 fi
-                descriptor_url="$FILES/$descriptor_fname"
-            else
-                descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g")
-                if [[ ! -f $descriptor_url || \
-                "$(stat -c "%s" $descriptor_url)" == "0" ]]; then
-                    warn $LINENO "Descriptor not found $descriptor_url"
-                    descriptor_found=false
-                fi
-            fi
-            if $descriptor_found; then
                 vmdk_adapter_type="$(head -25 $descriptor_url | { grep -a -F -m 1 'ddb.adapterType =' $descriptor_url || true; })"
                 vmdk_adapter_type="${vmdk_adapter_type#*\"}"
                 vmdk_adapter_type="${vmdk_adapter_type%?}"
@@ -216,7 +199,21 @@
     # and should not be decompressed prior to loading
     if [[ "$image_url" =~ '.vhd.tgz' ]]; then
         IMAGE_NAME="${IMAGE_FNAME%.vhd.tgz}"
-        glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format=ovf --disk-format=vhd < "${IMAGE}"
+        FORCE_VM_MODE=""
+        if [[ "$IMAGE_NAME" =~ 'cirros' ]]; then
+            # Cirros VHD image currently only boots in PV mode.
+            # Nova defaults to PV for all VHD images, but
+            # the glance setting is needed for booting
+            # directly from volume.
+            FORCE_VM_MODE="--property vm_mode=xen"
+        fi
+        glance \
+            --os-auth-token $token \
+            --os-image-url http://$GLANCE_HOSTPORT \
+            image-create \
+            --name "$IMAGE_NAME" --is-public=True \
+            --container-format=ovf --disk-format=vhd \
+            $FORCE_VM_MODE < "${IMAGE}"
         return
     fi
 
@@ -290,7 +287,7 @@
     esac
 
     if is_arch "ppc64"; then
-        IMG_PROPERTY="--property hw_disk_bus=scsi --property hw_cdrom_bus=scsi"
+        IMG_PROPERTY="--property hw_cdrom_bus=scsi"
     fi
 
     if [ "$CONTAINER_FORMAT" = "bare" ]; then
diff --git a/functions-common b/functions-common
index ed3d883..6340c5c 100644
--- a/functions-common
+++ b/functions-common
@@ -517,12 +517,14 @@
     GIT_DEST=$2
     GIT_REF=$3
     RECLONE=$(trueorfalse False $RECLONE)
+    local orig_dir=`pwd`
 
     if [[ "$OFFLINE" = "True" ]]; then
         echo "Running in offline mode, clones already exist"
         # print out the results so we know what change was used in the logs
         cd $GIT_DEST
         git show --oneline | head -1
+        cd $orig_dir
         return
     fi
 
@@ -572,6 +574,7 @@
     # print out the results so we know what change was used in the logs
     cd $GIT_DEST
     git show --oneline | head -1
+    cd $orig_dir
 }
 
 # git can sometimes get itself infinitely stuck with transient network
@@ -821,6 +824,10 @@
             if [[ ! $file_to_parse =~ neutron ]]; then
                 file_to_parse="${file_to_parse} neutron"
             fi
+        elif [[ $service == ir-* ]]; then
+            if [[ ! $file_to_parse =~ ironic ]]; then
+                file_to_parse="${file_to_parse} ironic"
+            fi
         fi
     done
 
@@ -1232,6 +1239,19 @@
         && $SUDO_PIP rm -rf ${pip_build_tmp}
 }
 
+# this should be used if you want to install globally, all libraries should
+# use this, especially *oslo* ones
+function setup_install {
+    local project_dir=$1
+    setup_package_with_req_sync $project_dir
+}
+
+# this should be used for projects which run services, like all services
+function setup_develop {
+    local project_dir=$1
+    setup_package_with_req_sync $project_dir -e
+}
+
 # ``pip install -e`` the package, which processes the dependencies
 # using pip before running `setup.py develop`
 #
@@ -1240,20 +1260,21 @@
 #
 # Uses globals ``TRACK_DEPENDS``, ``REQUIREMENTS_DIR``, ``UNDO_REQUIREMENTS``
 # setup_develop directory
-function setup_develop {
+function setup_package_with_req_sync {
     local project_dir=$1
+    local flags=$2
 
     # Don't update repo if local changes exist
     # Don't use buggy "git diff --quiet"
     # ``errexit`` requires us to trap the exit code when the repo is changed
     local update_requirements=$(cd $project_dir && git diff --exit-code >/dev/null || echo "changed")
 
-    if [[ $update_requirements = "changed" ]]; then
+    if [[ $update_requirements != "changed" ]]; then
         (cd $REQUIREMENTS_DIR; \
             $SUDO_CMD python update.py $project_dir)
     fi
 
-    setup_develop_no_requirements_update $project_dir
+    setup_package $project_dir $flags
 
     # We've just gone and possibly modified the user's source tree in an
     # automated way, which is considered bad form if it's a development
@@ -1264,7 +1285,7 @@
     # a variable that tells us whether or not we should UNDO the requirements
     # changes (this will be set to False in the OpenStack ci gate)
     if [ $UNDO_REQUIREMENTS = "True" ]; then
-        if [[ $update_requirements = "changed" ]]; then
+        if [[ $update_requirements != "changed" ]]; then
             (cd $project_dir && git reset --hard)
         fi
     fi
@@ -1274,12 +1295,15 @@
 # using pip before running `setup.py develop`
 # Uses globals ``STACK_USER``
 # setup_develop_no_requirements_update directory
-function setup_develop_no_requirements_update {
+function setup_package {
     local project_dir=$1
+    local flags=$2
 
-    pip_install -e $project_dir
+    pip_install $flags $project_dir
     # ensure that further actions can do things like setup.py sdist
-    safe_chown -R $STACK_USER $1/*.egg-info
+    if [[ "$flags" == "-e" ]]; then
+        safe_chown -R $STACK_USER $1/*.egg-info
+    fi
 }
 
 
diff --git a/lib/baremetal b/lib/baremetal
index 1d02e1e..adcbe4c 100644
--- a/lib/baremetal
+++ b/lib/baremetal
@@ -129,7 +129,7 @@
 
 # Below this, we set some path and filenames.
 # Defaults are probably sufficient.
-BM_IMAGE_BUILD_DIR=${BM_IMAGE_BUILD_DIR:-$DEST/diskimage-builder}
+DIB_DIR=${DIB_DIR:-$DEST/diskimage-builder}
 
 # Use DIB to create deploy ramdisk and kernel.
 BM_BUILD_DEPLOY_RAMDISK=`trueorfalse True $BM_BUILD_DEPLOY_RAMDISK`
@@ -140,7 +140,10 @@
 
 # If you need to add any extra flavors to the deploy ramdisk image
 # eg, specific network drivers, specify them here
-BM_DEPLOY_FLAVOR=${BM_DEPLOY_FLAVOR:-}
+#
+# NOTE(deva): this will be moved to lib/ironic in a future patch
+#             for now, set the default to a suitable value for Ironic's needs
+BM_DEPLOY_FLAVOR=${BM_DEPLOY_FLAVOR:--a amd64 ubuntu deploy-ironic}
 
 # set URL and version for google shell-in-a-box
 BM_SHELL_IN_A_BOX=${BM_SHELL_IN_A_BOX:-http://shellinabox.googlecode.com/files/shellinabox-2.14.tar.gz}
@@ -162,7 +165,7 @@
 # Install diskimage-builder and shell-in-a-box
 # so that we can build the deployment kernel & ramdisk
 function prepare_baremetal_toolchain {
-    git_clone $BM_IMAGE_BUILD_REPO $BM_IMAGE_BUILD_DIR $BM_IMAGE_BUILD_BRANCH
+    git_clone $DIB_REPO $DIB_DIR $DIB_BUILD_BRANCH
 
     local shellinabox_basename=$(basename $BM_SHELL_IN_A_BOX)
     if [[ ! -e $DEST/$shellinabox_basename ]]; then
@@ -220,7 +223,7 @@
         BM_DEPLOY_KERNEL=bm-deploy.kernel
         BM_DEPLOY_RAMDISK=bm-deploy.initramfs
         if [ ! -e "$TOP_DIR/files/$BM_DEPLOY_KERNEL" -o ! -e "$TOP_DIR/files/$BM_DEPLOY_RAMDISK" ]; then
-            $BM_IMAGE_BUILD_DIR/bin/ramdisk-image-create $BM_DEPLOY_FLAVOR deploy \
+            $DIB_DIR/bin/ramdisk-image-create $BM_DEPLOY_FLAVOR \
                 -o $TOP_DIR/files/bm-deploy
         fi
     fi
@@ -268,7 +271,7 @@
     image_name=$(basename "$file" ".qcow2")
 
     # this call returns the file names as "$kernel,$ramdisk"
-    out=$($BM_IMAGE_BUILD_DIR/bin/disk-image-get-kernel \
+    out=$($DIB_DIR/bin/disk-image-get-kernel \
             -x -d $TOP_DIR/files -o bm-deploy -i $file)
     if [ $? -ne 0 ]; then
         die $LINENO "Failed to get kernel and ramdisk from $file"
diff --git a/lib/ceilometer b/lib/ceilometer
index b0899e2..5030b3c 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -69,6 +69,11 @@
 
 # create_ceilometer_accounts() - Set up common required ceilometer accounts
 
+# Project              User         Roles
+# ------------------------------------------------------------------
+# SERVICE_TENANT_NAME  ceilometer   admin
+# SERVICE_TENANT_NAME  ceilometer   ResellerAdmin (if Swift is enabled)
+
 create_ceilometer_accounts() {
 
     SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
@@ -99,6 +104,13 @@
                 --adminurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \
                 --internalurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/"
         fi
+        if is_service_enabled swift; then
+            # Ceilometer needs ResellerAdmin role to access swift account stats.
+            openstack role add \
+                --project $SERVICE_TENANT_NAME \
+                --user ceilometer \
+                ResellerAdmin
+        fi
     fi
 }
 
@@ -106,7 +118,9 @@
 # cleanup_ceilometer() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
 function cleanup_ceilometer {
-    mongo ceilometer --eval "db.dropDatabase();"
+    if [ "$CEILOMETER_BACKEND" != 'mysql' ] && [ "$CEILOMETER_BACKEND" != 'postgresql' ] ; then
+        mongo ceilometer --eval "db.dropDatabase();"
+    fi
 }
 
 # configure_ceilometerclient() - Set config files, create data dirs, etc
@@ -161,17 +175,37 @@
         configure_mongodb
         cleanup_ceilometer
     fi
+
+    if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then
+        iniset $CEILOMETER_CONF DEFAULT hypervisor_inspector vsphere
+        iniset $CEILOMETER_CONF vmware host_ip "$VMWAREAPI_IP"
+        iniset $CEILOMETER_CONF vmware host_username "$VMWAREAPI_USER"
+        iniset $CEILOMETER_CONF vmware host_password "$VMWAREAPI_PASSWORD"
+    fi
 }
 
 function configure_mongodb {
+    # server package is the same on all
+    local packages=mongodb-server
+
     if is_fedora; then
-        # install mongodb client
-        install_package mongodb
+        # mongodb client + python bindings
+        packages="${packages} mongodb pymongo"
+    else
+        packages="${packages} python-pymongo"
+    fi
+
+    install_package ${packages}
+
+    if is_fedora; then
         # ensure smallfiles selected to minimize freespace requirements
         sudo sed -i '/--smallfiles/!s/OPTIONS=\"/OPTIONS=\"--smallfiles /' /etc/sysconfig/mongod
 
         restart_service mongod
     fi
+
+    # give mongodb time to start-up
+    sleep 5
 }
 
 # init_ceilometer() - Initialize etc.
@@ -204,6 +238,9 @@
     if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
         screen_it ceilometer-acompute "cd ; sg $LIBVIRT_GROUP \"ceilometer-agent-compute --config-file $CEILOMETER_CONF\""
     fi
+    if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then
+        screen_it ceilometer-acompute "cd ; ceilometer-agent-compute --config-file $CEILOMETER_CONF"
+    fi
     screen_it ceilometer-acentral "cd ; ceilometer-agent-central --config-file $CEILOMETER_CONF"
     screen_it ceilometer-anotification "cd ; ceilometer-agent-notification --config-file $CEILOMETER_CONF"
     screen_it ceilometer-collector "cd ; ceilometer-collector --config-file $CEILOMETER_CONF"
diff --git a/lib/databases/mysql b/lib/databases/mysql
index f5ee3c0..7a0145a 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -23,6 +23,7 @@
         stop_service $MYSQL
         apt_get purge -y mysql*
         sudo rm -rf /var/lib/mysql
+        sudo rm -rf /etc/mysql
         return
     elif is_fedora; then
         if [[ $DISTRO =~ (rhel7) ]]; then
diff --git a/lib/glance b/lib/glance
index 8a4c21b..51e4399 100644
--- a/lib/glance
+++ b/lib/glance
@@ -159,6 +159,49 @@
     cp -p $GLANCE_DIR/etc/schema-image.json $GLANCE_SCHEMA_JSON
 }
 
+# create_glance_accounts() - Set up common required glance accounts
+
+# Project              User         Roles
+# ------------------------------------------------------------------
+# SERVICE_TENANT_NAME  glance       service
+# SERVICE_TENANT_NAME  glance-swift ResellerAdmin (if Swift is enabled)
+
+function create_glance_accounts {
+    if is_service_enabled g-api; then
+        openstack user create \
+            --password "$SERVICE_PASSWORD" \
+            --project $SERVICE_TENANT_NAME \
+            glance
+        openstack role add \
+            --project $SERVICE_TENANT_NAME \
+            --user glance \
+            service
+        # required for swift access
+        if is_service_enabled s-proxy; then
+            openstack user create \
+                --password "$SERVICE_PASSWORD" \
+                --project $SERVICE_TENANT_NAME \
+                glance-swift
+            openstack role add \
+                --project $SERVICE_TENANT_NAME \
+                --user glance-swift \
+                ResellerAdmin
+        fi
+        if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+            openstack service create \
+                --type image \
+                --description "Glance Image Service" \
+                glance
+            openstack endpoint create \
+                --region RegionOne \
+                --publicurl "http://$GLANCE_HOSTPORT" \
+                --adminurl "http://$GLANCE_HOSTPORT" \
+                --internalurl "http://$GLANCE_HOSTPORT" \
+                glance
+        fi
+    fi
+}
+
 # create_glance_cache_dir() - Part of the init_glance() process
 function create_glance_cache_dir {
     # Create cache dir
diff --git a/lib/heat b/lib/heat
index 2d9d863..f66f0a8 100644
--- a/lib/heat
+++ b/lib/heat
@@ -37,6 +37,10 @@
 HEAT_CONF=$HEAT_CONF_DIR/heat.conf
 HEAT_ENV_DIR=$HEAT_CONF_DIR/environment.d
 HEAT_TEMPLATES_DIR=$HEAT_CONF_DIR/templates
+HEAT_STACK_DOMAIN=`trueorfalse True $HEAT_STACK_DOMAIN`
+
+# other default options
+HEAT_DEFERRED_AUTH=${HEAT_DEFERRED_AUTH:-trusts}
 
 # Tell Tempest this project is present
 TEMPEST_SERVICES+=,heat
@@ -45,6 +49,13 @@
 # Functions
 # ---------
 
+# Test if any Heat services are enabled
+# is_heat_enabled
+function is_heat_enabled {
+    [[ ,${ENABLED_SERVICES} =~ ,"h-" ]] && return 0
+    return 1
+}
+
 # cleanup_heat() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
 function cleanup_heat {
@@ -240,37 +251,42 @@
     # heat_stack_user role is for users created by Heat
     openstack role create heat_stack_user
 
-    # heat_stack_owner role is given to users who create Heat stacks,
-    # it's the default role used by heat to delegate to the heat service
-    # user (for performing deferred operations via trusts), see heat.conf
-    HEAT_OWNER_ROLE=$(openstack role create \
-        heat_stack_owner \
-        | grep " id " | get_field 2)
+    if [[ $HEAT_DEFERRED_AUTH == trusts ]]; then
+        # heat_stack_owner role is given to users who create Heat stacks,
+        # it's the default role used by heat to delegate to the heat service
+        # user (for performing deferred operations via trusts), see heat.conf
+        HEAT_OWNER_ROLE=$(openstack role create \
+            heat_stack_owner \
+            | grep " id " | get_field 2)
 
-    # Give the role to the demo and admin users so they can create stacks
-    # in either of the projects created by devstack
-    openstack role add $HEAT_OWNER_ROLE --project demo --user demo
-    openstack role add $HEAT_OWNER_ROLE --project demo --user admin
-    openstack role add $HEAT_OWNER_ROLE --project admin --user admin
+        # Give the role to the demo and admin users so they can create stacks
+        # in either of the projects created by devstack
+        openstack role add $HEAT_OWNER_ROLE --project demo --user demo
+        openstack role add $HEAT_OWNER_ROLE --project demo --user admin
+        openstack role add $HEAT_OWNER_ROLE --project admin --user admin
+        iniset $HEAT_CONF DEFAULT deferred_auth_method trusts
+    fi
 
-    # Note we have to pass token/endpoint here because the current endpoint and
-    # version negotiation in OSC means just --os-identity-api-version=3 won't work
-    KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3"
-    D_ID=$(openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \
-        --os-identity-api-version=3 domain create heat \
-        --description "Owns users and projects created by heat" \
-        | grep ' id ' | get_field 2)
-    iniset $HEAT_CONF DEFAULT stack_user_domain ${D_ID}
+    if [[ "$HEAT_STACK_DOMAIN" == "True" ]]; then
+        # Note we have to pass token/endpoint here because the current endpoint and
+        # version negotiation in OSC means just --os-identity-api-version=3 won't work
+        KS_ENDPOINT_V3="$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v3"
+        D_ID=$(openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \
+            --os-identity-api-version=3 domain create heat \
+            --description "Owns users and projects created by heat" \
+            | grep ' id ' | get_field 2)
+        iniset $HEAT_CONF DEFAULT stack_user_domain ${D_ID}
 
-    openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \
-        --os-identity-api-version=3 user create --password $SERVICE_PASSWORD \
-        --domain $D_ID heat_domain_admin \
-        --description "Manages users and projects created by heat"
-    openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \
-        --os-identity-api-version=3 role add \
-        --user heat_domain_admin --domain ${D_ID} admin
-    iniset $HEAT_CONF DEFAULT stack_domain_admin heat_domain_admin
-    iniset $HEAT_CONF DEFAULT stack_domain_admin_password $SERVICE_PASSWORD
+        openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \
+            --os-identity-api-version=3 user create --password $SERVICE_PASSWORD \
+            --domain $D_ID heat_domain_admin \
+            --description "Manages users and projects created by heat"
+        openstack --os-token $OS_TOKEN --os-url=$KS_ENDPOINT_V3 \
+            --os-identity-api-version=3 role add \
+            --user heat_domain_admin --domain ${D_ID} admin
+        iniset $HEAT_CONF DEFAULT stack_domain_admin heat_domain_admin
+        iniset $HEAT_CONF DEFAULT stack_domain_admin_password $SERVICE_PASSWORD
+    fi
 }
 
 # Restore xtrace
diff --git a/lib/infra b/lib/infra
index 7f70ff2..e2f7dad 100644
--- a/lib/infra
+++ b/lib/infra
@@ -46,7 +46,7 @@
 
     # Install pbr
     git_clone $PBR_REPO $PBR_DIR $PBR_BRANCH
-    setup_develop $PBR_DIR
+    setup_install $PBR_DIR
 }
 
 # Restore xtrace
diff --git a/lib/ironic b/lib/ironic
index b346de1..979420f 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -18,16 +18,19 @@
 # - stop_ironic
 # - cleanup_ironic
 
-# Save trace setting
+# Save trace and pipefail settings
 XTRACE=$(set +o | grep xtrace)
+PIPEFAIL=$(set +o | grep pipefail)
 set +o xtrace
-
+set +o pipefail
 
 # Defaults
 # --------
 
 # Set up default directories
 IRONIC_DIR=$DEST/ironic
+IRONIC_DATA_DIR=$DATA_DIR/ironic
+IRONIC_STATE_PATH=/var/lib/ironic
 IRONICCLIENT_DIR=$DEST/python-ironicclient
 IRONIC_AUTH_CACHE_DIR=${IRONIC_AUTH_CACHE_DIR:-/var/cache/ironic}
 IRONIC_CONF_DIR=${IRONIC_CONF_DIR:-/etc/ironic}
@@ -35,6 +38,45 @@
 IRONIC_ROOTWRAP_CONF=$IRONIC_CONF_DIR/rootwrap.conf
 IRONIC_POLICY_JSON=$IRONIC_CONF_DIR/policy.json
 
+# Set up defaults for functional / integration testing
+IRONIC_SCRIPTS_DIR=${IRONIC_SCRIPTS_DIR:-$TOP_DIR/tools/ironic/scripts}
+IRONIC_TEMPLATES_DIR=${IRONIC_TEMPLATES_DIR:-$TOP_DIR/tools/ironic/templates}
+IRONIC_BAREMETAL_BASIC_OPS=$(trueorfalse False $IRONIC_BAREMETAL_BASIC_OPS)
+IRONIC_SSH_USERNAME=${IRONIC_SSH_USERNAME:-`whoami`}
+IRONIC_SSH_KEY_DIR=${IRONIC_SSH_KEY_DIR:-$IRONIC_DATA_DIR/ssh_keys}
+IRONIC_SSH_KEY_FILENAME=${IRONIC_SSH_KEY_FILENAME:-ironic_key}
+IRONIC_KEY_FILE=$IRONIC_SSH_KEY_DIR/$IRONIC_SSH_KEY_FILENAME
+IRONIC_SSH_VIRT_TYPE=${IRONIC_SSH_VIRT_TYPE:-virsh}
+IRONIC_TFTPBOOT_DIR=${IRONIC_TFTPBOOT_DIR:-$IRONIC_DATA_DIR/tftpboot}
+IRONIC_VM_SSH_PORT=${IRONIC_VM_SSH_PORT:-2222}
+IRONIC_VM_SSH_ADDRESS=${IRONIC_VM_SSH_ADDRESS:-$HOST_IP}
+IRONIC_VM_COUNT=${IRONIC_VM_COUNT:-1}
+IRONIC_VM_SPECS_CPU=${IRONIC_VM_SPECS_CPU:-1}
+# NOTE(agordeev): both ubuntu and fedora deploy images won't work with 256MB of RAM.
+#                 System halts and throws kernel panic during initramfs unpacking.
+#                 Ubuntu needs at least 384MB, but fedora requires 448.
+#                 So placing 512 here to satisfy both.
+IRONIC_VM_SPECS_RAM=${IRONIC_VM_SPECS_RAM:-512}
+IRONIC_VM_SPECS_DISK=${IRONIC_VM_SPECS_DISK:-10}
+IRONIC_VM_EMULATOR=${IRONIC_VM_EMULATOR:-/usr/bin/qemu-system-x86_64}
+IRONIC_VM_NETWORK_BRIDGE=${IRONIC_VM_NETWORK_BRIDGE:-brbm}
+IRONIC_VM_NETWORK_RANGE=${IRONIC_VM_NETWORK_RANGE:-192.0.2.0/24}
+IRONIC_VM_MACS_CSV_FILE=${IRONIC_VM_MACS_CSV_FILE:-$IRONIC_DATA_DIR/ironic_macs.csv}
+IRONIC_AUTHORIZED_KEYS_FILE=${IRONIC_AUTHORIZED_KEYS_FILE:-$HOME/.ssh/authorized_keys}
+
+DIB_DIR=${DIB_DIR:-$DEST/diskimage-builder}
+
+# Use DIB to create deploy ramdisk and kernel.
+IRONIC_BUILD_DEPLOY_RAMDISK=`trueorfalse True $IRONIC_BUILD_DEPLOY_RAMDISK`
+# If not use DIB, these files are used as deploy ramdisk/kernel.
+# (The value must be a absolute path)
+IRONIC_DEPLOY_RAMDISK=${IRONIC_DEPLOY_RAMDISK:-}
+IRONIC_DEPLOY_KERNEL=${IRONIC_DEPLOY_KERNEL:-}
+IRONIC_DEPLOY_ELEMENT=${IRONIC_DEPLOY_ELEMENT:-deploy-ironic}
+
+#TODO(agordeev): replace 'ubuntu' with host distro name getting
+IRONIC_DEPLOY_FLAVOR=${IRONIC_DEPLOY_FLAVOR:-ubuntu $IRONIC_DEPLOY_ELEMENT}
+
 # Support entry points installation of console scripts
 IRONIC_BIN_DIR=$(get_python_exec_prefix)
 
@@ -86,8 +128,8 @@
     iniset $IRONIC_CONF_FILE DEFAULT debug True
     inicomment $IRONIC_CONF_FILE DEFAULT log_file
     iniset $IRONIC_CONF_FILE DEFAULT sql_connection `database_connection_url ironic`
+    iniset $IRONIC_CONF_FILE DEFAULT state_path $IRONIC_STATE_PATH
     iniset $IRONIC_CONF_FILE DEFAULT use_syslog $SYSLOG
-
     # Configure Ironic conductor, if it was enabled.
     if is_service_enabled ir-cond; then
         configure_ironic_conductor
@@ -97,6 +139,10 @@
     if is_service_enabled ir-api; then
         configure_ironic_api
     fi
+
+    if [[ "$IRONIC_BAREMETAL_BASIC_OPS" == "True" ]]; then
+        configure_ironic_auxiliary
+    fi
 }
 
 # configure_ironic_api() - Is used by configure_ironic(). Performs
@@ -125,6 +171,10 @@
     cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_CONF_DIR
 
     iniset $IRONIC_CONF_FILE DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF
+    iniset $IRONIC_CONF_FILE conductor api_url http://$HOST_IP:6385
+    iniset $IRONIC_CONF_FILE pxe tftp_server $HOST_IP
+    iniset $IRONIC_CONF_FILE pxe tftp_root $IRONIC_TFTPBOOT_DIR
+    iniset $IRONIC_CONF_FILE pxe tftp_master_path $IRONIC_TFTPBOOT_DIR/master_images
 }
 
 # create_ironic_cache_dir() - Part of the init_ironic() process
@@ -225,9 +275,270 @@
     screen -S $SCREEN_NAME -p ir-cond -X kill
 }
 
+function is_ironic {
+    if ( is_service_enabled ir-cond && is_service_enabled ir-api ); then
+        return 0
+    fi
+    return 1
+}
 
-# Restore xtrace
+function configure_ironic_dirs {
+    sudo mkdir -p $IRONIC_DATA_DIR
+    sudo mkdir -p $IRONIC_STATE_PATH
+    sudo mkdir -p $IRONIC_TFTPBOOT_DIR
+    sudo chown -R $STACK_USER $IRONIC_DATA_DIR $IRONIC_STATE_PATH
+    sudo chown -R $STACK_USER:$LIBVIRT_GROUP $IRONIC_TFTPBOOT_DIR
+    if is_ubuntu; then
+        PXEBIN=/usr/lib/syslinux/pxelinux.0
+    elif is_fedora; then
+        PXEBIN=/usr/share/syslinux/pxelinux.0
+    fi
+    if [ ! -f $PXEBIN ]; then
+        die $LINENO "pxelinux.0 (from SYSLINUX) not found."
+    fi
+
+    cp $PXEBIN $IRONIC_TFTPBOOT_DIR
+    mkdir -p $IRONIC_TFTPBOOT_DIR/pxelinux.cfg
+}
+
+function create_bridge_and_vms {
+    # Call libvirt setup scripts in a new shell to ensure any new group membership
+    sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/setup-network"
+    sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/create-nodes \
+        $IRONIC_VM_SPECS_CPU $IRONIC_VM_SPECS_RAM $IRONIC_VM_SPECS_DISK \
+        amd64 $IRONIC_VM_COUNT $IRONIC_VM_NETWORK_BRIDGE $IRONIC_VM_EMULATOR" >> $IRONIC_VM_MACS_CSV_FILE
+}
+
+function enroll_vms {
+
+    CHASSIS_ID=$(ironic chassis-create -d "ironic test chassis" | grep " uuid " | get_field 2)
+    IRONIC_NET_ID=$(neutron net-list | grep private | get_field 1)
+    local idx=0
+
+    # work around; need to know what netns neutron uses for private network
+    neutron port-create private
+
+    while read MAC; do
+
+        NODE_ID=$(ironic node-create --chassis_uuid $CHASSIS_ID --driver pxe_ssh \
+            -i ssh_virt_type=$IRONIC_SSH_VIRT_TYPE \
+            -i ssh_address=$IRONIC_VM_SSH_ADDRESS \
+            -i ssh_port=$IRONIC_VM_SSH_PORT \
+            -i ssh_username=$IRONIC_SSH_USERNAME \
+            -i ssh_key_filename=$IRONIC_SSH_KEY_DIR/$IRONIC_SSH_KEY_FILENAME \
+            -p cpus=$IRONIC_VM_SPECS_CPU \
+            -p memory_mb=$IRONIC_VM_SPECS_RAM \
+            -p local_gb=$IRONIC_VM_SPECS_DISK \
+            -p cpu_arch=x86_64 \
+            | grep " uuid " | get_field 2)
+
+        ironic port-create --address $MAC --node_uuid $NODE_ID
+
+        idx=$((idx+1))
+
+    done < $IRONIC_VM_MACS_CSV_FILE
+
+    # create the nova flavor
+    nova flavor-create baremetal auto $IRONIC_VM_SPECS_RAM $IRONIC_VM_SPECS_DISK $IRONIC_VM_SPECS_CPU
+    nova flavor-key baremetal set "cpu_arch"="x86_64" "baremetal:deploy_kernel_id"="$IRONIC_DEPLOY_KERNEL_ID" "baremetal:deploy_ramdisk_id"="$IRONIC_DEPLOY_RAMDISK_ID"
+
+    # intentional sleep to make sure the tag has been set to port
+    sleep 10
+    TAPDEV=$(sudo ip netns exec qdhcp-${IRONIC_NET_ID} ip link list | grep tap | cut -d':' -f2 | cut -b2-)
+    TAG_ID=$(sudo ovs-vsctl show |grep ${TAPDEV} -A1 -m1 | grep tag | cut -d':' -f2 | cut -b2-)
+
+    # make sure veth pair is not existing, otherwise delete its links
+    sudo ip link show ovs-tap1 && sudo ip link delete ovs-tap1
+    sudo ip link show brbm-tap1 && sudo ip link delete brbm-tap1
+    # create veth pair for future interconnection between br-int and brbm
+    sudo ip link add brbm-tap1 type veth peer name ovs-tap1
+    sudo ip link set dev brbm-tap1 up
+    sudo ip link set dev ovs-tap1 up
+
+    sudo ovs-vsctl -- --if-exists del-port ovs-tap1 -- add-port br-int ovs-tap1 tag=$TAG_ID
+    sudo ovs-vsctl -- --if-exists del-port brbm-tap1 -- add-port $IRONIC_VM_NETWORK_BRIDGE brbm-tap1
+}
+
+function configure_tftpd {
+    # enable tftp natting for allowing connections to SERVICE_HOST's tftp server
+    sudo modprobe nf_conntrack_tftp
+    sudo modprobe nf_nat_tftp
+
+    if is_ubuntu; then
+        PXEBIN=/usr/lib/syslinux/pxelinux.0
+    elif is_fedora; then
+        PXEBIN=/usr/share/syslinux/pxelinux.0
+    fi
+    if [ ! -f $PXEBIN ]; then
+        die $LINENO "pxelinux.0 (from SYSLINUX) not found."
+    fi
+
+    # stop tftpd and setup serving via xinetd
+    stop_service tftpd-hpa || true
+    [ -f /etc/init/tftpd-hpa.conf ] && echo "manual" | sudo tee /etc/init/tftpd-hpa.override
+    sudo cp $IRONIC_TEMPLATES_DIR/tftpd-xinetd.template /etc/xinetd.d/tftp
+    sudo sed -e "s|%TFTPBOOT_DIR%|$IRONIC_TFTPBOOT_DIR|g" -i /etc/xinetd.d/tftp
+
+    # setup tftp file mapping to satisfy requests at the root (booting) and
+    # /tftpboot/ sub-dir (as per deploy-ironic elements)
+    echo "r ^([^/]) $IRONIC_TFTPBOOT_DIR/\1" >$IRONIC_TFTPBOOT_DIR/map-file
+    echo "r ^(/tftpboot/) $IRONIC_TFTPBOOT_DIR/\2" >>$IRONIC_TFTPBOOT_DIR/map-file
+
+    chmod -R 0755 $IRONIC_TFTPBOOT_DIR
+    restart_service xinetd
+}
+
+function configure_ironic_ssh_keypair {
+    # Generating ssh key pair for stack user
+    if [[ ! -d $IRONIC_SSH_KEY_DIR ]]; then
+        mkdir -p $IRONIC_SSH_KEY_DIR
+    fi
+    if [[ ! -d $HOME/.ssh ]]; then
+        mkdir -p $HOME/.ssh
+        chmod 700 $HOME/.ssh
+    fi
+    echo -e 'n\n' | ssh-keygen -q -t rsa -P '' -f $IRONIC_KEY_FILE
+    cat $IRONIC_KEY_FILE.pub | tee -a $IRONIC_AUTHORIZED_KEYS_FILE
+}
+
+function ironic_ssh_check {
+    local KEY_FILE=$1
+    local FLOATING_IP=$2
+    local PORT=$3
+    local DEFAULT_INSTANCE_USER=$4
+    local ACTIVE_TIMEOUT=$5
+    if ! timeout $ACTIVE_TIMEOUT sh -c "while ! ssh -p $PORT -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP echo success; do sleep 1; done"; then
+        die $LINENO "server didn't become ssh-able!"
+    fi
+}
+
+function configure_ironic_sshd {
+    # Ensure sshd server accepts connections from localhost only
+
+    SSH_CONFIG=/etc/ssh/sshd_config
+    HOST_PORT=$IRONIC_VM_SSH_ADDRESS:$IRONIC_VM_SSH_PORT
+    if ! sudo grep ListenAddress $SSH_CONFIG | grep $HOST_PORT; then
+        echo "ListenAddress $HOST_PORT" | sudo tee -a $SSH_CONFIG
+    fi
+
+    SSH_SERVICE_NAME=sshd
+    if is_ubuntu; then
+        SSH_SERVICE_NAME=ssh
+    fi
+
+    restart_service $SSH_SERVICE_NAME
+    # to ensure ssh service is up and running
+    sleep 3
+    ironic_ssh_check $IRONIC_SSH_KEY_DIR/$IRONIC_SSH_KEY_FILENAME $IRONIC_VM_SSH_ADDRESS $IRONIC_VM_SSH_PORT $IRONIC_SSH_USERNAME 10
+
+}
+
+function configure_ironic_auxiliary {
+    configure_ironic_dirs
+    configure_ironic_ssh_keypair
+    configure_ironic_sshd
+}
+
+# build deploy kernel+ramdisk, then upload them to glance
+# this function sets IRONIC_DEPLOY_KERNEL_ID and IRONIC_DEPLOY_RAMDISK_ID
+function upload_baremetal_ironic_deploy {
+    token=$1
+
+    if [ -z "$IRONIC_DEPLOY_KERNEL" -o -z "$IRONIC_DEPLOY_RAMDISK" ]; then
+        IRONIC_DEPLOY_KERNEL_PATH=$TOP_DIR/files/ir-deploy.kernel
+        IRONIC_DEPLOY_RAMDISK_PATH=$TOP_DIR/files/ir-deploy.initramfs
+    else
+        IRONIC_DEPLOY_KERNEL_PATH=$IRONIC_DEPLOY_KERNEL
+        IRONIC_DEPLOY_RAMDISK_PATH=$IRONIC_DEPLOY_RAMDISK
+    fi
+
+    if [ ! -e "$IRONIC_DEPLOY_RAMDISK_PATH" -o ! -e "$IRONIC_DEPLOY_KERNEL_PATH" ]; then
+        # files don't exist, need to build them
+        if [ "$IRONIC_BUILD_DEPLOY_RAMDISK" = "True" ]; then
+            # we can build them only if we're not offline
+            if [ "$OFFLINE" != "True" ]; then
+                $DIB_DIR/bin/ramdisk-image-create $IRONIC_DEPLOY_FLAVOR \
+                    -o $TOP_DIR/files/ir-deploy
+            else
+                die $LINENO "Deploy kernel+ramdisk files don't exist and cannot be build in OFFLINE mode"
+            fi
+        else
+            die $LINENO "Deploy kernel+ramdisk files don't exist and their building was disabled explicitly by IRONIC_BUILD_DEPLOY_RAMDISK"
+        fi
+    fi
+
+    # load them into glance
+    IRONIC_DEPLOY_KERNEL_ID=$(glance \
+        --os-auth-token $token \
+        --os-image-url http://$GLANCE_HOSTPORT \
+        image-create \
+        --name $(basename $IRONIC_DEPLOY_KERNEL_PATH) \
+        --is-public True --disk-format=aki \
+        < $IRONIC_DEPLOY_KERNEL_PATH  | grep ' id ' | get_field 2)
+    IRONIC_DEPLOY_RAMDISK_ID=$(glance \
+        --os-auth-token $token \
+        --os-image-url http://$GLANCE_HOSTPORT \
+        image-create \
+        --name $(basename $IRONIC_DEPLOY_RAMDISK_PATH) \
+        --is-public True --disk-format=ari \
+        < $IRONIC_DEPLOY_RAMDISK_PATH  | grep ' id ' | get_field 2)
+}
+
+function prepare_baremetal_basic_ops {
+
+    # install diskimage-builder
+    git_clone $DIB_REPO $DIB_DIR $DIB_BRANCH
+
+    # make sure all needed service were enabled
+    for srv in nova glance key neutron; do
+        if ! is_service_enabled "$srv"; then
+            die $LINENO "$srv should be enabled for ironic tests"
+        fi
+    done
+
+    SCREEN_NAME=${SCREEN_NAME:-stack}
+    SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
+
+    # stop all nova services
+    stop_nova || true
+
+    # remove any nova services failure status
+    find $SERVICE_DIR/$SCREEN_NAME -name 'n-*.failure' -exec rm -f '{}' \;
+
+    # start them again
+    start_nova_api
+    start_nova
+
+    TOKEN=$(keystone token-get | grep ' id ' | get_field 2)
+    die_if_not_set $LINENO TOKEN "Keystone fail to get token"
+
+    echo_summary "Creating and uploading baremetal images for ironic"
+
+    # build and upload separate deploy kernel & ramdisk
+    upload_baremetal_ironic_deploy $TOKEN
+
+    create_bridge_and_vms
+    enroll_vms
+    configure_tftpd
+}
+
+function cleanup_baremetal_basic_ops {
+    rm -f $IRONIC_VM_MACS_CSV_FILE
+    if [ -f $IRONIC_KEY_FILE ]; then
+        KEY=`cat $IRONIC_KEY_FILE.pub`
+        # remove public key from authorized_keys
+        grep -v "$KEY" $IRONIC_AUTHORIZED_KEYS_FILE > temp && mv temp $IRONIC_AUTHORIZED_KEYS_FILE
+        chmod 0600 $IRONIC_AUTHORIZED_KEYS_FILE
+    fi
+    sudo rm -rf $IRONIC_DATA_DIR $IRONIC_STATE_PATH
+    sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/cleanup-nodes $IRONIC_VM_COUNT $IRONIC_VM_NETWORK_BRIDGE"
+    sudo rm -rf /etc/xinetd.d/tftp /etc/init/tftpd-hpa.override
+    restart_service xinetd
+}
+
+# Restore xtrace + pipefail
 $XTRACE
+$PIPEFAIL
 
 # Tell emacs to use shell-script-mode
 ## Local variables:
diff --git a/lib/keystone b/lib/keystone
index c6856c9..b31cc57 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -266,9 +266,11 @@
 
 # Tenant               User       Roles
 # ------------------------------------------------------------------
-# service              --         --
-# --                   --         Member
 # admin                admin      admin
+# service              --         --
+# --                   --         service
+# --                   --         ResellerAdmin
+# --                   --         Member
 # demo                 admin      admin
 # demo                 demo       Member, anotherrole
 # invisible_to_admin   demo       Member
@@ -294,10 +296,17 @@
         --project $ADMIN_TENANT \
         --user $ADMIN_USER
 
-    # service
-    SERVICE_TENANT=$(openstack project create \
-        $SERVICE_TENANT_NAME \
-        | grep " id " | get_field 2)
+    # Create service project/role
+    openstack project create $SERVICE_TENANT_NAME
+
+    # Service role, so service users do not have to be admins
+    openstack role create service
+
+    # The ResellerAdmin role is used by Nova and Ceilometer so we need to keep it.
+    # The admin role in swift allows a user to act as an admin for their tenant,
+    # but ResellerAdmin is needed for a user to act as any tenant. The name of this
+    # role is also configurable in swift-proxy.conf
+    openstack role create ResellerAdmin
 
     # The Member role is used by Horizon and Swift so we need to keep it:
     MEMBER_ROLE=$(openstack role create \
diff --git a/lib/marconi b/lib/marconi
index 3c4547f..fd1c351 100644
--- a/lib/marconi
+++ b/lib/marconi
@@ -154,7 +154,7 @@
 
 # start_marconi() - Start running processes, including screen
 function start_marconi {
-    screen_it marconi-server "marconi-server --config-file $MARCONI_CONF 2>&1"
+    screen_it marconi-server "marconi-server --config-file $MARCONI_CONF"
     echo "Waiting for Marconi to start..."
     if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- $MARCONI_SERVICE_PROTOCOL://$MARCONI_SERVICE_HOST:$MARCONI_SERVICE_PORT/v1/health; do sleep 1; done"; then
         die $LINENO "Marconi did not start"
diff --git a/lib/neutron b/lib/neutron
index bb591ab..294ffac 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -110,6 +110,10 @@
 Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1}
 # nova vif driver that all plugins should use
 NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+Q_NOTIFY_NOVA_PORT_STATUS_CHANGE=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGE:-True}
+Q_NOTIFY_NOVA_ON_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_CHANGE:-True}
+VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True}
+VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300}
 
 # The next two variables are configured by plugin
 # e.g.  _configure_neutron_l3_agent or lib/neutron_plugins/*
@@ -313,6 +317,9 @@
     if is_service_enabled q-meta; then
         iniset $NOVA_CONF DEFAULT service_neutron_metadata_proxy "True"
     fi
+
+    iniset $NOVA_CONF DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL"
+    iniset $NOVA_CONF DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT"
 }
 
 # create_neutron_cache_dir() - Part of the _neutron_setup_keystone() process
@@ -754,6 +761,16 @@
         iniset $NEUTRON_CONF DEFAULT ${I/=/ }
     done
 
+    # Configuration for neutron notifations to nova.
+    iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_change $Q_NOTIFY_NOVA_PORT_STATUS_CHANGE
+    iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_ON_PORT_DATA_CHANGES
+    iniset $NEUTRON_CONF DEFAULT nova_url "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2"
+    iniset $NEUTRON_CONF DEFAULT nova_admin_username nova $NOVA_USER
+    iniset $NEUTRON_CONF DEFAULT nova_admin_password $SERVICE_PASSWORD
+    ADMIN_TENANT_ID=$(openstack project list | awk "/ service / { print \$2 }")
+    iniset $NEUTRON_CONF DEFAULT nova_admin_tenant_id $ADMIN_TENANT_ID
+    iniset $NEUTRON_CONF DEFAULT nova_admin_auth_url  "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0"
+
     # Configure plugin
     neutron_plugin_configure_service
 }
diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight
index 4cb0da8..efdd9ef 100644
--- a/lib/neutron_plugins/bigswitch_floodlight
+++ b/lib/neutron_plugins/bigswitch_floodlight
@@ -2,7 +2,7 @@
 # ------------------------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+BS_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 source $TOP_DIR/lib/neutron_plugins/ovs_base
@@ -38,7 +38,12 @@
 }
 
 function neutron_plugin_configure_plugin_agent {
-    :
+    # Set up integration bridge
+    _neutron_ovs_base_setup_bridge $OVS_BRIDGE
+    iniset /$Q_PLUGIN_CONF_FILE restproxyagent integration_bridge $OVS_BRIDGE
+    AGENT_BINARY="$NEUTRON_DIR/neutron/plugins/bigswitch/agent/restproxy_agent.py"
+
+    _neutron_ovs_base_configure_firewall_driver
 }
 
 function neutron_plugin_configure_service {
@@ -61,7 +66,7 @@
 
 function has_neutron_plugin_security_group {
     # 1 means False here
-    return 1
+    return 0
 }
 
 function neutron_plugin_check_adv_test_requirements {
@@ -69,4 +74,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$BS_XTRACE
diff --git a/lib/neutron_plugins/cisco b/lib/neutron_plugins/cisco
index a1b089e..7f7c049 100644
--- a/lib/neutron_plugins/cisco
+++ b/lib/neutron_plugins/cisco
@@ -2,7 +2,7 @@
 # ---------------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+CISCO_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 # Scecify the VSM parameters
@@ -324,4 +324,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$CISCO_XTRACE
diff --git a/lib/neutron_plugins/embrane b/lib/neutron_plugins/embrane
index 62f9737..cce108a 100644
--- a/lib/neutron_plugins/embrane
+++ b/lib/neutron_plugins/embrane
@@ -2,7 +2,7 @@
 # ---------------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+EMBR_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 source $TOP_DIR/lib/neutron_plugins/openvswitch
@@ -37,4 +37,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$EMBR_XTRACE
diff --git a/lib/neutron_plugins/ibm b/lib/neutron_plugins/ibm
index 22c8578..3aef9d0 100644
--- a/lib/neutron_plugins/ibm
+++ b/lib/neutron_plugins/ibm
@@ -2,7 +2,7 @@
 # ---------------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+IBM_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 source $TOP_DIR/lib/neutron_plugins/ovs_base
@@ -130,4 +130,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$IBM_XTRACE
diff --git a/lib/neutron_plugins/linuxbridge b/lib/neutron_plugins/linuxbridge
index 362fd5b..96b14f1 100644
--- a/lib/neutron_plugins/linuxbridge
+++ b/lib/neutron_plugins/linuxbridge
@@ -2,7 +2,7 @@
 # ---------------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+LBRIDGE_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 source $TOP_DIR/lib/neutron_plugins/linuxbridge_agent
@@ -53,4 +53,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$LBRIDGE_XTRACE
diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet
index 742e3b2..c5373d6 100644
--- a/lib/neutron_plugins/midonet
+++ b/lib/neutron_plugins/midonet
@@ -6,7 +6,7 @@
 MIDONET_API_URL=${MIDONET_API_URL:-http://localhost:$MIDONET_API_PORT/midonet-api}
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+MN_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 function is_neutron_ovs_base_plugin {
@@ -84,4 +84,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$MN_XTRACE
diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2
index e985dcb..db43fcf 100644
--- a/lib/neutron_plugins/ml2
+++ b/lib/neutron_plugins/ml2
@@ -2,7 +2,7 @@
 # ------------------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+ML2_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 # Enable this to simply and quickly enable tunneling with ML2.
@@ -119,4 +119,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$ML2_XTRACE
diff --git a/lib/neutron_plugins/nec b/lib/neutron_plugins/nec
index 6d4bfca..d76f7d4 100644
--- a/lib/neutron_plugins/nec
+++ b/lib/neutron_plugins/nec
@@ -2,7 +2,7 @@
 # ---------------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+NEC_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 # Configuration parameters
@@ -127,4 +127,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$NEC_XTRACE
diff --git a/lib/neutron_plugins/nuage b/lib/neutron_plugins/nuage
new file mode 100644
index 0000000..86f09d2
--- /dev/null
+++ b/lib/neutron_plugins/nuage
@@ -0,0 +1,69 @@
+# Nuage Neutron Plugin
+# ----------------------
+
+# Save trace setting
+NU_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+function neutron_plugin_create_nova_conf {
+    NOVA_OVS_BRIDGE=${NOVA_OVS_BRIDGE:-"br-int"}
+    iniset $NOVA_CONF DEFAULT neutron_ovs_bridge $NOVA_OVS_BRIDGE
+    NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+    LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver
+    iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER
+}
+
+function neutron_plugin_install_agent_packages {
+    :
+}
+
+function neutron_plugin_configure_common {
+    Q_PLUGIN_CONF_PATH=etc/neutron/plugins/nuage
+    Q_PLUGIN_CONF_FILENAME=nuage_plugin.ini
+    Q_DB_NAME="nuage_neutron"
+    Q_PLUGIN_CLASS="neutron.plugins.nuage.plugin.NuagePlugin"
+    Q_PLUGIN_EXTENSIONS_PATH=neutron/plugins/nuage/extensions
+    #Nuage specific Neutron defaults. Actual value must be set and sourced
+    NUAGE_CNA_SERVERS=${NUAGE_CNA_SERVERS:-'localhost:8443'}
+    NUAGE_CNA_SERVER_AUTH=${NUAGE_CNA_SERVER_AUTH:-'username:password'}
+    NUAGE_CNA_ORGANIZATION=${NUAGE_CNA_ORGANIZATION:-'org'}
+    NUAGE_CNA_SERVER_SSL=${NUAGE_CNA_SERVER_SSL:-'True'}
+    NUAGE_CNA_BASE_URI=${NUAGE_CNA_BASE_URI:-'/'}
+    NUAGE_CNA_AUTH_RESOURCE=${NUAGE_CNA_AUTH_RESOURCE:-'/'}
+    NUAGE_CNA_DEF_NETPART_NAME=${NUAGE_CNA_DEF_NETPART_NAME:-''}
+}
+
+function neutron_plugin_configure_debug_command {
+    :
+}
+
+function neutron_plugin_configure_dhcp_agent {
+    :
+}
+
+function neutron_plugin_configure_l3_agent {
+    :
+}
+
+function neutron_plugin_configure_plugin_agent {
+    :
+}
+
+function neutron_plugin_configure_service {
+    iniset $NEUTRON_CONF DEFAULT api_extensions_path neutron/plugins/nuage/extensions/
+    iniset /$Q_PLUGIN_CONF_FILE restproxy base_uri $NUAGE_CNA_BASE_URI
+    iniset /$Q_PLUGIN_CONF_FILE restproxy serverssl $NUAGE_CNA_SERVER_SSL
+    iniset /$Q_PLUGIN_CONF_FILE restproxy serverauth $NUAGE_CNA_SERVER_AUTH
+    iniset /$Q_PLUGIN_CONF_FILE restproxy organization $NUAGE_CNA_ORGANIZATION
+    iniset /$Q_PLUGIN_CONF_FILE restproxy server $NUAGE_CNA_SERVERS
+    iniset /$Q_PLUGIN_CONF_FILE restproxy auth_resource $NUAGE_CNA_AUTH_RESOURCE
+    iniset /$Q_PLUGIN_CONF_FILE restproxy default_net_partition_name $NUAGE_CNA_DEF_NETPART_NAME
+}
+
+function has_neutron_plugin_security_group {
+    # 1 means False here
+    return 1
+}
+
+# Restore xtrace
+$NU_XTRACE
diff --git a/lib/neutron_plugins/ofagent_agent b/lib/neutron_plugins/ofagent_agent
index 724df41..6610ea3 100644
--- a/lib/neutron_plugins/ofagent_agent
+++ b/lib/neutron_plugins/ofagent_agent
@@ -2,7 +2,7 @@
 # ----------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+OFA_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 source $TOP_DIR/lib/neutron_plugins/ovs_base
@@ -91,4 +91,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$OFA_XTRACE
diff --git a/lib/neutron_plugins/oneconvergence b/lib/neutron_plugins/oneconvergence
new file mode 100644
index 0000000..06f1eee
--- /dev/null
+++ b/lib/neutron_plugins/oneconvergence
@@ -0,0 +1,76 @@
+# Neutron One Convergence plugin
+# ---------------------------
+# Save trace setting
+OC_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+source $TOP_DIR/lib/neutron_plugins/ovs_base
+
+Q_L3_ENABLED=true
+Q_L3_ROUTER_PER_TENANT=true
+Q_USE_NAMESPACE=true
+
+function neutron_plugin_install_agent_packages {
+    _neutron_ovs_base_install_agent_packages
+}
+# Configure common parameters
+function neutron_plugin_configure_common {
+
+    Q_PLUGIN_CONF_PATH=etc/neutron/plugins/oneconvergence
+    Q_PLUGIN_CONF_FILENAME=nvsdplugin.ini
+    Q_PLUGIN_CLASS="neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2"
+    Q_DB_NAME='oc_nvsd_neutron'
+}
+
+# Configure plugin specific information
+function neutron_plugin_configure_service {
+    iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_ip $NVSD_IP
+    iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_port $NVSD_PORT
+    iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_user $NVSD_USER
+    iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_passwd $NVSD_PASSWD
+}
+
+function neutron_plugin_configure_debug_command {
+    _neutron_ovs_base_configure_debug_command
+}
+
+function neutron_plugin_setup_interface_driver {
+    local conf_file=$1
+    iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
+}
+
+function has_neutron_plugin_security_group {
+    # 1 means False here
+    return 0
+}
+
+function setup_integration_bridge {
+    _neutron_ovs_base_setup_bridge $OVS_BRIDGE
+}
+
+function neutron_plugin_configure_dhcp_agent {
+    setup_integration_bridge
+    iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport
+}
+
+function neutron_plugin_configure_l3_agent {
+    _neutron_ovs_base_configure_l3_agent
+    iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport
+}
+
+function neutron_plugin_configure_plugin_agent {
+
+    AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-nvsd-agent"
+
+    _neutron_ovs_base_configure_firewall_driver
+}
+
+function neutron_plugin_create_nova_conf {
+    NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+    if ( is_service_enabled n-cpu && ! ( is_service_enabled q-dhcp )) ; then
+        setup_integration_bridge
+    fi
+}
+
+# Restore xtrace
+$OC_XTRACE
diff --git a/lib/neutron_plugins/openvswitch b/lib/neutron_plugins/openvswitch
index bdbc5a9..c644fed 100644
--- a/lib/neutron_plugins/openvswitch
+++ b/lib/neutron_plugins/openvswitch
@@ -2,7 +2,7 @@
 # ---------------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+OVS_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 source $TOP_DIR/lib/neutron_plugins/openvswitch_agent
@@ -57,4 +57,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$OVS_XTRACE
diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent
index 3a2bdc3..33ca17a 100644
--- a/lib/neutron_plugins/openvswitch_agent
+++ b/lib/neutron_plugins/openvswitch_agent
@@ -2,7 +2,7 @@
 # -----------------------------
 
 # Save trace setting
-PLUGIN_XTRACE=$(set +o | grep xtrace)
+OVSA_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 source $TOP_DIR/lib/neutron_plugins/ovs_base
@@ -128,4 +128,4 @@
 }
 
 # Restore xtrace
-$PLUGIN_XTRACE
+$OVSA_XTRACE
diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base
index 0a2ba58..ae7f815 100644
--- a/lib/neutron_plugins/ovs_base
+++ b/lib/neutron_plugins/ovs_base
@@ -2,7 +2,7 @@
 # -------------------------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+OVSB_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 OVS_BRIDGE=${OVS_BRIDGE:-br-int}
@@ -77,4 +77,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$OVSB_XTRACE
diff --git a/lib/neutron_plugins/plumgrid b/lib/neutron_plugins/plumgrid
index 19f94cb..67080f4 100644
--- a/lib/neutron_plugins/plumgrid
+++ b/lib/neutron_plugins/plumgrid
@@ -3,7 +3,7 @@
 # ------------------------------------
 
 # Save trace settings
-MY_XTRACE=$(set +o | grep xtrace)
+PG_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 function neutron_plugin_create_nova_conf {
@@ -52,4 +52,4 @@
     is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
 }
 # Restore xtrace
-$MY_XTRACE
+$PG_XTRACE
diff --git a/lib/neutron_plugins/ryu b/lib/neutron_plugins/ryu
index 9ae36d3..ceb89fa 100644
--- a/lib/neutron_plugins/ryu
+++ b/lib/neutron_plugins/ryu
@@ -2,7 +2,7 @@
 # ------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+RYU_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 source $TOP_DIR/lib/neutron_plugins/ovs_base
@@ -77,4 +77,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$RYU_XTRACE
diff --git a/lib/neutron_plugins/services/firewall b/lib/neutron_plugins/services/firewall
index ab6c324..b5253db 100644
--- a/lib/neutron_plugins/services/firewall
+++ b/lib/neutron_plugins/services/firewall
@@ -2,7 +2,7 @@
 # ---------------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+FW_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 FWAAS_PLUGIN=neutron.services.firewall.fwaas_plugin.FirewallPlugin
@@ -24,4 +24,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$FW_XTRACE
diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer
index 531f52f..78e7738 100644
--- a/lib/neutron_plugins/services/loadbalancer
+++ b/lib/neutron_plugins/services/loadbalancer
@@ -2,7 +2,7 @@
 # ---------------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+LB_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -48,4 +48,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$LB_XTRACE
diff --git a/lib/neutron_plugins/services/metering b/lib/neutron_plugins/services/metering
index 0e5f75b..51123e2 100644
--- a/lib/neutron_plugins/services/metering
+++ b/lib/neutron_plugins/services/metering
@@ -2,7 +2,7 @@
 # ---------------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+METER_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -27,4 +27,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$METER_XTRACE
diff --git a/lib/neutron_plugins/services/vpn b/lib/neutron_plugins/services/vpn
index e56d361..d920ba6 100644
--- a/lib/neutron_plugins/services/vpn
+++ b/lib/neutron_plugins/services/vpn
@@ -2,7 +2,7 @@
 # ---------------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+VPN_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -30,4 +30,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$VPN_XTRACE
diff --git a/lib/neutron_plugins/vmware_nsx b/lib/neutron_plugins/vmware_nsx
index fe79354..f2f8735 100644
--- a/lib/neutron_plugins/vmware_nsx
+++ b/lib/neutron_plugins/vmware_nsx
@@ -2,7 +2,7 @@
 # -------------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+NSX_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 source $TOP_DIR/lib/neutron_plugins/ovs_base
@@ -146,4 +146,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$NSX_XTRACE
diff --git a/lib/neutron_thirdparty/bigswitch_floodlight b/lib/neutron_thirdparty/bigswitch_floodlight
index f03de56..033731e 100644
--- a/lib/neutron_thirdparty/bigswitch_floodlight
+++ b/lib/neutron_thirdparty/bigswitch_floodlight
@@ -2,7 +2,7 @@
 # ------------------------------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+BS3_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 BS_FL_CONTROLLERS_PORT=${BS_FL_CONTROLLERS_PORT:-localhost:80}
@@ -49,4 +49,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$BS3_XTRACE
diff --git a/lib/neutron_thirdparty/midonet b/lib/neutron_thirdparty/midonet
index ad417bb..099a66e 100644
--- a/lib/neutron_thirdparty/midonet
+++ b/lib/neutron_thirdparty/midonet
@@ -17,7 +17,7 @@
 MIDONET_CLIENT_DIR=${MIDONET_CLIENT_DIR:-$MIDONET_DIR/python-midonetclient}
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+MN3_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 function configure_midonet {
@@ -46,4 +46,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$MN3_XTRACE
diff --git a/lib/neutron_thirdparty/ryu b/lib/neutron_thirdparty/ryu
index 424a900..bbe227e 100644
--- a/lib/neutron_thirdparty/ryu
+++ b/lib/neutron_thirdparty/ryu
@@ -2,7 +2,7 @@
 # -----------------------
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+RYU3_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -18,14 +18,8 @@
 # Ryu Applications
 RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest}
 
-# configure_ryu can be called multiple times as neutron_pluing/ryu may call
-# this function for neutron-ryu-agent
-_RYU_CONFIGURED=${_RYU_CONFIGURED:-False}
 function configure_ryu {
-    if [[ "$_RYU_CONFIGURED" == "False" ]]; then
-        setup_develop $RYU_DIR
-        _RYU_CONFIGURED=True
-    fi
+    :
 }
 
 function init_ryu {
@@ -63,6 +57,7 @@
 function install_ryu {
     if [[ "$_RYU_INSTALLED" == "False" ]]; then
         git_clone $RYU_REPO $RYU_DIR $RYU_BRANCH
+        export PYTHONPATH=$RYU_DIR:$PYTHONPATH
         _RYU_INSTALLED=True
     fi
 }
@@ -80,4 +75,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$RYU3_XTRACE
diff --git a/lib/neutron_thirdparty/trema b/lib/neutron_thirdparty/trema
index d465ac7..f829aa8 100644
--- a/lib/neutron_thirdparty/trema
+++ b/lib/neutron_thirdparty/trema
@@ -13,7 +13,7 @@
 TREMA_APPS_BRANCH=${TREMA_APPS_BRANCH:-master}
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+TREMA3_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 TREMA_DIR=${TREMA_DIR:-$DEST/trema}
@@ -114,4 +114,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$TREMA3_XTRACE
diff --git a/lib/neutron_thirdparty/vmware_nsx b/lib/neutron_thirdparty/vmware_nsx
index 3fecc62..7a76570 100644
--- a/lib/neutron_thirdparty/vmware_nsx
+++ b/lib/neutron_thirdparty/vmware_nsx
@@ -11,7 +11,7 @@
 # * NSX_GATEWAY_NETWORK_CIDR         --> CIDR to configure br-ex, e.g. 172.24.4.211/24
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+NSX3_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
 # This is the interface that connects the Devstack instance
@@ -83,4 +83,4 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$NSX3_XTRACE
diff --git a/lib/nova b/lib/nova
index f5e0d11..5cc94ec 100644
--- a/lib/nova
+++ b/lib/nova
@@ -139,7 +139,7 @@
 # Test if any Nova Cell services are enabled
 # is_nova_enabled
 function is_n-cell_enabled {
-    [[ ,${ENABLED_SERVICES} =~ ,"n-cell-" ]] && return 0
+    [[ ,${ENABLED_SERVICES} =~ ,"n-cell" ]] && return 0
     return 1
 }
 
@@ -308,7 +308,7 @@
     # Rebuild the config file from scratch
     create_nova_conf
 
-    if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
+    if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
         # Configure hypervisor plugin
         configure_nova_hypervisor
     fi
@@ -316,9 +316,10 @@
 
 # create_nova_accounts() - Set up common required nova accounts
 
-# Tenant               User       Roles
+# Project              User         Roles
 # ------------------------------------------------------------------
-# service              nova       admin, [ResellerAdmin (swift only)]
+# SERVICE_TENANT_NAME  nova         admin
+# SERVICE_TENANT_NAME  nova         ResellerAdmin (if Swift is enabled)
 
 # Migrated from keystone_data.sh
 create_nova_accounts() {
@@ -363,6 +364,48 @@
                 --internalurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v3"
         fi
     fi
+
+    if is_service_enabled n-api; then
+        # Swift
+        if is_service_enabled swift; then
+            # Nova needs ResellerAdmin role to download images when accessing
+            # swift through the s3 api.
+            openstack role add \
+                --project $SERVICE_TENANT_NAME \
+                --user nova \
+                ResellerAdmin
+        fi
+
+        # EC2
+        if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then
+            openstack service create \
+                --type ec2 \
+                --description "EC2 Compatibility Layer" \
+                ec2
+            openstack endpoint create \
+                --region RegionOne \
+                --publicurl "http://$SERVICE_HOST:8773/services/Cloud" \
+                --adminurl "http://$SERVICE_HOST:8773/services/Admin" \
+                --internalurl "http://$SERVICE_HOST:8773/services/Cloud" \
+                ec2
+        fi
+    fi
+
+    # S3
+    if is_service_enabled n-obj swift3; then
+        if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+            openstack service create \
+                --type s3 \
+                --description "S3" \
+                s3
+            openstack endpoint create \
+                --region RegionOne \
+                --publicurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \
+                --adminurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \
+                --internalurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \
+                s3
+        fi
+    fi
 }
 
 # create_nova_conf() - Create a new nova.conf file
@@ -715,19 +758,28 @@
     start_nova_rest
 }
 
-# stop_nova() - Stop running processes (non-screen)
-function stop_nova {
-    # Kill the nova screen windows
-    # Some services are listed here twice since more than one instance
-    # of a service may be running in certain configs.
-    for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta; do
-        screen_stop $serv
-    done
+function stop_nova_compute {
+    screen_stop n-cpu
     if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
         stop_nova_hypervisor
     fi
 }
 
+function stop_nova_rest {
+    # Kill the nova screen windows
+    # Some services are listed here twice since more than one instance
+    # of a service may be running in certain configs.
+    for serv in n-api n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta n-obj; do
+        screen_stop $serv
+    done
+}
+
+# stop_nova() - Stop running processes (non-screen)
+function stop_nova {
+    stop_nova_rest
+    stop_nova_compute
+}
+
 
 # Restore xtrace
 $XTRACE
diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt
new file mode 100644
index 0000000..1f2b239
--- /dev/null
+++ b/lib/nova_plugins/functions-libvirt
@@ -0,0 +1,125 @@
+# lib/nova_plugins/functions-libvirt
+# Common libvirt configuration functions
+
+# Dependencies:
+# ``functions`` file
+# ``STACK_USER`` has to be defined
+
+# Save trace setting
+LV_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+# Defaults
+# -------
+
+# if we should turn on massive libvirt debugging
+DEBUG_LIBVIRT=$(trueorfalse False $DEBUG_LIBVIRT)
+
+# Installs required distro-specific libvirt packages.
+function install_libvirt {
+    if is_ubuntu; then
+        install_package qemu-kvm
+        install_package libvirt-bin
+        install_package python-libvirt
+        install_package python-guestfs
+    elif is_fedora || is_suse; then
+        install_package kvm
+        install_package libvirt
+        install_package libvirt-python
+        install_package python-libguestfs
+    fi
+}
+
+# Configures the installed libvirt system so that is accessible by
+# STACK_USER via qemu:///system with management capabilities.
+function configure_libvirt {
+    if is_service_enabled neutron && is_neutron_ovs_base_plugin && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF; then
+        # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces
+        cat <<EOF | sudo tee -a $QEMU_CONF
+cgroup_device_acl = [
+    "/dev/null", "/dev/full", "/dev/zero",
+    "/dev/random", "/dev/urandom",
+    "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
+    "/dev/rtc", "/dev/hpet","/dev/net/tun",
+]
+EOF
+    fi
+
+    if is_ubuntu; then
+        LIBVIRT_DAEMON=libvirt-bin
+    else
+        LIBVIRT_DAEMON=libvirtd
+    fi
+
+    if is_fedora || is_suse; then
+        if is_fedora && [[ $DISTRO =~ (rhel6) || "$os_RELEASE" -le "17" ]]; then
+            cat <<EOF | sudo tee /etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla
+[libvirt Management Access]
+Identity=unix-group:$LIBVIRT_GROUP
+Action=org.libvirt.unix.manage
+ResultAny=yes
+ResultInactive=yes
+ResultActive=yes
+EOF
+        elif is_suse && [[ $os_RELEASE = 12.2 || "$os_VENDOR" = "SUSE LINUX" ]]; then
+            # openSUSE < 12.3 or SLE
+            # Work around the fact that polkit-default-privs overrules pklas
+            # with 'unix-group:$group'.
+            cat <<EOF | sudo tee /etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla
+[libvirt Management Access]
+Identity=unix-user:$STACK_USER
+Action=org.libvirt.unix.manage
+ResultAny=yes
+ResultInactive=yes
+ResultActive=yes
+EOF
+        else
+            # Starting with fedora 18 and opensuse-12.3 enable stack-user to
+            # virsh -c qemu:///system by creating a policy-kit rule for
+            # stack-user using the new Javascript syntax
+            rules_dir=/etc/polkit-1/rules.d
+            sudo mkdir -p $rules_dir
+            cat <<EOF | sudo tee $rules_dir/50-libvirt-$STACK_USER.rules
+polkit.addRule(function(action, subject) {
+    if (action.id == 'org.libvirt.unix.manage' &&
+        subject.user == '$STACK_USER') {
+        return polkit.Result.YES;
+    }
+});
+EOF
+            unset rules_dir
+        fi
+    fi
+
+    # The user that nova runs as needs to be member of **libvirtd** group otherwise
+    # nova-compute will be unable to use libvirt.
+    if ! getent group $LIBVIRT_GROUP >/dev/null; then
+        sudo groupadd $LIBVIRT_GROUP
+    fi
+    add_user_to_group $STACK_USER $LIBVIRT_GROUP
+
+    # Enable server side traces for libvirtd
+    if [[ "$DEBUG_LIBVIRT" = "True" ]] ; then
+        local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util"
+        local log_outputs="1:file:/var/log/libvirt/libvirtd.log"
+        if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then
+            echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf
+        fi
+        if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then
+            echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf
+        fi
+    fi
+
+    # libvirt detects various settings on startup, as we potentially changed
+    # the system configuration (modules, filesystems), we need to restart
+    # libvirt to detect those changes.
+    restart_service $LIBVIRT_DAEMON
+}
+
+
+# Restore xtrace
+$LV_XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker
deleted file mode 100644
index fd3c4fe..0000000
--- a/lib/nova_plugins/hypervisor-docker
+++ /dev/null
@@ -1,132 +0,0 @@
-# lib/nova_plugins/docker
-# Configure the Docker hypervisor
-
-# Enable with:
-#
-#   VIRT_DRIVER=docker
-
-# Dependencies:
-#
-# - ``functions`` file
-# - ``nova`` and ``glance`` configurations
-
-# install_nova_hypervisor - install any external requirements
-# configure_nova_hypervisor - make configuration changes, including those to other services
-# start_nova_hypervisor - start any external services
-# stop_nova_hypervisor - stop any external services
-# cleanup_nova_hypervisor - remove transient data and cache
-
-# Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-
-# Defaults
-# --------
-
-# Set up default directories
-DOCKER_DIR=$DEST/docker
-
-DOCKER_UNIX_SOCKET=/var/run/docker.sock
-DOCKER_PID_FILE=/var/run/docker.pid
-DOCKER_REGISTRY_PORT=${DOCKER_REGISTRY_PORT:-5042}
-
-DOCKER_IMAGE=${DOCKER_IMAGE:-cirros:latest}
-DOCKER_IMAGE_NAME=$DEFAULT_IMAGE_NAME
-DOCKER_REGISTRY_IMAGE=${DOCKER_REGISTRY_IMAGE:-registry:latest}
-DOCKER_REGISTRY_IMAGE_NAME=registry
-DOCKER_REPOSITORY_NAME=${SERVICE_HOST}:${DOCKER_REGISTRY_PORT}/${DOCKER_IMAGE_NAME}
-
-DOCKER_APT_REPO=${DOCKER_APT_REPO:-https://get.docker.io/ubuntu}
-
-
-# Entry Points
-# ------------
-
-# clean_nova_hypervisor - Clean up an installation
-function cleanup_nova_hypervisor {
-    stop_service docker
-
-    # Clean out work area
-    sudo rm -rf /var/lib/docker
-}
-
-# configure_nova_hypervisor - Set config files, create data dirs, etc
-function configure_nova_hypervisor {
-    iniset $NOVA_CONF DEFAULT compute_driver docker.DockerDriver
-    iniset $GLANCE_API_CONF DEFAULT container_formats ami,ari,aki,bare,ovf,docker
-}
-
-# is_docker_running - Return 0 (true) if Docker is running, otherwise 1
-function is_docker_running {
-    local docker_pid
-    if [ -f "$DOCKER_PID_FILE" ]; then
-        docker_pid=$(cat "$DOCKER_PID_FILE")
-    fi
-    if [[ -z "$docker_pid" ]] || ! ps -p "$docker_pid" | grep [d]ocker; then
-        return 1
-    fi
-    return 0
-}
-
-# install_nova_hypervisor() - Install external components
-function install_nova_hypervisor {
-    # So far this is Ubuntu only
-    if ! is_ubuntu; then
-        die $LINENO "Docker is only supported on Ubuntu at this time"
-    fi
-
-    # Make sure Docker is installed
-    if ! is_package_installed lxc-docker; then
-        die $LINENO "Docker is not installed.  Please run tools/docker/install_docker.sh"
-    fi
-
-    if ! (is_docker_running); then
-        die $LINENO "Docker not running"
-    fi
-}
-
-# start_nova_hypervisor - Start any required external services
-function start_nova_hypervisor {
-    if ! (is_docker_running); then
-        die $LINENO "Docker not running"
-    fi
-
-    # Start the Docker registry container
-    docker run -d -p ${DOCKER_REGISTRY_PORT}:5000 \
-        -e SETTINGS_FLAVOR=openstack -e OS_USERNAME=${OS_USERNAME} \
-        -e OS_PASSWORD=${OS_PASSWORD} -e OS_TENANT_NAME=${OS_TENANT_NAME} \
-        -e OS_GLANCE_URL="${SERVICE_PROTOCOL}://${GLANCE_HOSTPORT}" \
-        -e OS_AUTH_URL=${OS_AUTH_URL} \
-        $DOCKER_REGISTRY_IMAGE_NAME ./docker-registry/run.sh
-
-    echo "Waiting for docker registry to start..."
-    DOCKER_REGISTRY=${SERVICE_HOST}:${DOCKER_REGISTRY_PORT}
-    if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl -s $DOCKER_REGISTRY; do sleep 1; done"; then
-        die $LINENO "docker-registry did not start"
-    fi
-
-    # Tag image if not already tagged
-    if ! docker images | grep $DOCKER_REPOSITORY_NAME; then
-        docker tag $DOCKER_IMAGE_NAME $DOCKER_REPOSITORY_NAME
-    fi
-
-    # Make sure we copied the image in Glance
-    if ! (glance image-show "$DOCKER_IMAGE"); then
-        docker push $DOCKER_REPOSITORY_NAME
-    fi
-}
-
-# stop_nova_hypervisor - Stop any external services
-function stop_nova_hypervisor {
-    # Stop the docker registry container
-    docker kill $(docker ps | grep docker-registry | cut -d' ' -f1)
-}
-
-
-# Restore xtrace
-$MY_XTRACE
-
-# Local variables:
-# mode: shell-script
-# End:
diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic
new file mode 100644
index 0000000..e72f7c1
--- /dev/null
+++ b/lib/nova_plugins/hypervisor-ironic
@@ -0,0 +1,80 @@
+# lib/nova_plugins/hypervisor-ironic
+# Configure the ironic hypervisor
+
+# Enable with:
+# VIRT_DRIVER=ironic
+
+# Dependencies:
+# ``functions`` file
+# ``nova`` configuration
+
+# install_nova_hypervisor - install any external requirements
+# configure_nova_hypervisor - make configuration changes, including those to other services
+# start_nova_hypervisor - start any external services
+# stop_nova_hypervisor - stop any external services
+# cleanup_nova_hypervisor - remove transient data and cache
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+source $TOP_DIR/lib/nova_plugins/functions-libvirt
+
+# Defaults
+# --------
+
+# Entry Points
+# ------------
+
+# clean_nova_hypervisor - Clean up an installation
+function cleanup_nova_hypervisor {
+    # This function intentionally left blank
+    :
+}
+
+# configure_nova_hypervisor - Set config files, create data dirs, etc
+function configure_nova_hypervisor {
+    configure_libvirt
+    LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.firewall.NoopFirewallDriver"}
+
+    # NOTE(adam_g): The ironic compute driver currently lives in the ironic
+    # tree.  We purposely configure Nova to load it from there until it moves
+    # back into Nova proper.
+    iniset $NOVA_CONF DEFAULT compute_driver ironic.nova.virt.ironic.IronicDriver
+    iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER
+    iniset $NOVA_CONF DEFAULT scheduler_host_manager ironic.nova.scheduler.ironic_host_manager.IronicHostManager
+    iniset $NOVA_CONF DEFAULT ram_allocation_ratio 1.0
+    iniset $NOVA_CONF DEFAULT reserved_host_memory_mb 0
+    # ironic section
+    iniset $NOVA_CONF ironic admin_username admin
+    iniset $NOVA_CONF ironic admin_password $ADMIN_PASSWORD
+    iniset $NOVA_CONF ironic admin_url $KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0
+    iniset $NOVA_CONF ironic admin_tenant_name demo
+    iniset $NOVA_CONF ironic api_endpoint http://$SERVICE_HOST:6385/v1
+    iniset $NOVA_CONF ironic sql_connection `database_connection_url nova_bm`
+}
+
+# install_nova_hypervisor() - Install external components
+function install_nova_hypervisor {
+    install_libvirt
+}
+
+# start_nova_hypervisor - Start any required external services
+function start_nova_hypervisor {
+    # This function intentionally left blank
+    :
+}
+
+# stop_nova_hypervisor - Stop any external services
+function stop_nova_hypervisor {
+    # This function intentionally left blank
+    :
+}
+
+
+# Restore xtrace
+$MY_XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index 26880e5..053df3c 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -7,7 +7,6 @@
 # Dependencies:
 # ``functions`` file
 # ``nova`` configuration
-# ``STACK_USER`` has to be defined
 
 # install_nova_hypervisor - install any external requirements
 # configure_nova_hypervisor - make configuration changes, including those to other services
@@ -19,6 +18,7 @@
 MY_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
+source $TOP_DIR/lib/nova_plugins/functions-libvirt
 
 # Defaults
 # --------
@@ -38,86 +38,7 @@
 
 # configure_nova_hypervisor - Set config files, create data dirs, etc
 function configure_nova_hypervisor {
-    if is_service_enabled neutron && is_neutron_ovs_base_plugin && ! sudo grep -q '^cgroup_device_acl' $QEMU_CONF; then
-        # Add /dev/net/tun to cgroup_device_acls, needed for type=ethernet interfaces
-        cat <<EOF | sudo tee -a $QEMU_CONF
-cgroup_device_acl = [
-    "/dev/null", "/dev/full", "/dev/zero",
-    "/dev/random", "/dev/urandom",
-    "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
-    "/dev/rtc", "/dev/hpet","/dev/net/tun",
-]
-EOF
-    fi
-
-    if is_ubuntu; then
-        LIBVIRT_DAEMON=libvirt-bin
-    else
-        LIBVIRT_DAEMON=libvirtd
-    fi
-
-    if is_fedora || is_suse; then
-        if is_fedora && [[ $DISTRO =~ (rhel6) || "$os_RELEASE" -le "17" ]]; then
-            cat <<EOF | sudo tee /etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla
-[libvirt Management Access]
-Identity=unix-group:$LIBVIRT_GROUP
-Action=org.libvirt.unix.manage
-ResultAny=yes
-ResultInactive=yes
-ResultActive=yes
-EOF
-        elif is_suse && [[ $os_RELEASE = 12.2 || "$os_VENDOR" = "SUSE LINUX" ]]; then
-            # openSUSE < 12.3 or SLE
-            # Work around the fact that polkit-default-privs overrules pklas
-            # with 'unix-group:$group'.
-            cat <<EOF | sudo tee /etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla
-[libvirt Management Access]
-Identity=unix-user:$STACK_USER
-Action=org.libvirt.unix.manage
-ResultAny=yes
-ResultInactive=yes
-ResultActive=yes
-EOF
-        else
-            # Starting with fedora 18 and opensuse-12.3 enable stack-user to
-            # virsh -c qemu:///system by creating a policy-kit rule for
-            # stack-user using the new Javascript syntax
-            rules_dir=/etc/polkit-1/rules.d
-            sudo mkdir -p $rules_dir
-            cat <<EOF | sudo tee $rules_dir/50-libvirt-$STACK_USER.rules
-polkit.addRule(function(action, subject) {
-    if (action.id == 'org.libvirt.unix.manage' &&
-        subject.user == '$STACK_USER') {
-        return polkit.Result.YES;
-    }
-});
-EOF
-            unset rules_dir
-        fi
-    fi
-
-    # The user that nova runs as needs to be member of **libvirtd** group otherwise
-    # nova-compute will be unable to use libvirt.
-    if ! getent group $LIBVIRT_GROUP >/dev/null; then
-        sudo groupadd $LIBVIRT_GROUP
-    fi
-    add_user_to_group $STACK_USER $LIBVIRT_GROUP
-
-    # Enable server side traces for libvirtd
-    local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util"
-    local log_outputs="1:file:/var/log/libvirt/libvirtd.log"
-    if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then
-        echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf
-    fi
-    if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then
-        echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf
-    fi
-
-    # libvirt detects various settings on startup, as we potentially changed
-    # the system configuration (modules, filesystems), we need to restart
-    # libvirt to detect those changes.
-    restart_service $LIBVIRT_DAEMON
-
+    configure_libvirt
     iniset $NOVA_CONF DEFAULT libvirt_type "$LIBVIRT_TYPE"
     iniset $NOVA_CONF DEFAULT libvirt_cpu_mode "none"
     iniset $NOVA_CONF DEFAULT use_usb_tablet "False"
@@ -146,17 +67,7 @@
 
 # install_nova_hypervisor() - Install external components
 function install_nova_hypervisor {
-    if is_ubuntu; then
-        install_package kvm
-        install_package libvirt-bin
-        install_package python-libvirt
-        install_package python-guestfs
-    elif is_fedora || is_suse; then
-        install_package kvm
-        install_package libvirt
-        install_package libvirt-python
-        install_package python-libguestfs
-    fi
+    install_libvirt
 
     # Install and configure **LXC** if specified.  LXC is another approach to
     # splitting a system into many smaller parts.  LXC uses cgroups and chroot
diff --git a/lib/nova_plugins/hypervisor-vsphere b/lib/nova_plugins/hypervisor-vsphere
index b04aeda..9933a3c 100644
--- a/lib/nova_plugins/hypervisor-vsphere
+++ b/lib/nova_plugins/hypervisor-vsphere
@@ -39,7 +39,7 @@
     iniset $NOVA_CONF vmware host_ip "$VMWAREAPI_IP"
     iniset $NOVA_CONF vmware host_username "$VMWAREAPI_USER"
     iniset $NOVA_CONF vmware host_password "$VMWAREAPI_PASSWORD"
-    iniset $NOVA_CONF vmware cluster_name "$VMWAREAPI_CLUSTER"
+    iniset_multiline $NOVA_CONF vmware cluster_name "$VMWAREAPI_CLUSTER"
     if is_service_enabled neutron; then
         iniset $NOVA_CONF vmware integration_bridge $OVS_BRIDGE
     fi
diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver
index 10bda2c..c37969b 100644
--- a/lib/nova_plugins/hypervisor-xenserver
+++ b/lib/nova_plugins/hypervisor-xenserver
@@ -63,9 +63,13 @@
     local ssh_dom0
     ssh_dom0="sudo -u $DOMZERO_USER ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@$dom0_ip"
 
+    # Find where the plugins should go in dom0
+    xen_functions=`cat $TOP_DIR/tools/xen/functions`
+    PLUGIN_DIR=`$ssh_dom0 "$xen_functions; set -eux; xapi_plugin_location"`
+
     # install nova plugins to dom0
     tar -czf - -C $NOVA_DIR/plugins/xenserver/xenapi/etc/xapi.d/plugins/ ./ |
-        $ssh_dom0 'tar -xzf - -C /etc/xapi.d/plugins/ && chmod a+x /etc/xapi.d/plugins/*'
+        $ssh_dom0 "tar -xzf - -C $PLUGIN_DIR && chmod a+x $PLUGIN_DIR/*"
 
     # install console logrotate script
     tar -czf - -C $NOVA_DIR/tools/xenserver/ rotate_xen_guest_logs.sh |
diff --git a/lib/opendaylight b/lib/opendaylight
index ca81c20..1022e2c 100644
--- a/lib/opendaylight
+++ b/lib/opendaylight
@@ -134,7 +134,7 @@
     # The flags to ODL have the following meaning:
     #   -of13: runs ODL using OpenFlow 1.3 protocol support.
     #   -virt ovsdb: Runs ODL in "virtualization" mode with OVSDB support
-    screen_it odl-server "cd $ODL_DIR/opendaylight && JAVE_HOME=$JHOME ./run.sh $ODL_ARGS -of13 -virt ovsdb"
+    screen_it odl-server "cd $ODL_DIR/opendaylight && JAVA_HOME=$JHOME ./run.sh $ODL_ARGS -of13 -virt ovsdb"
 
     # Sleep a bit to let OpenDaylight finish starting up
     sleep $ODL_BOOT_WAIT
diff --git a/lib/oslo b/lib/oslo
index 8ef179c..3cf7218 100644
--- a/lib/oslo
+++ b/lib/oslo
@@ -39,28 +39,28 @@
     cleanup_oslo
 
     git_clone $CLIFF_REPO $CLIFF_DIR $CLIFF_BRANCH
-    setup_develop $CLIFF_DIR
+    setup_install $CLIFF_DIR
 
     git_clone $OSLOCFG_REPO $OSLOCFG_DIR $OSLOCFG_BRANCH
-    setup_develop $OSLOCFG_DIR
+    setup_install $OSLOCFG_DIR
 
     git_clone $OSLOMSG_REPO $OSLOMSG_DIR $OSLOMSG_BRANCH
-    setup_develop $OSLOMSG_DIR
+    setup_install $OSLOMSG_DIR
 
     git_clone $OSLORWRAP_REPO $OSLORWRAP_DIR $OSLORWRAP_BRANCH
-    setup_develop $OSLORWRAP_DIR
+    setup_install $OSLORWRAP_DIR
 
     git_clone $OSLOVMWARE_REPO $OSLOVMWARE_DIR $OSLOVMWARE_BRANCH
-    setup_develop $OSLOVMWARE_DIR
+    setup_install $OSLOVMWARE_DIR
 
     git_clone $PYCADF_REPO $PYCADF_DIR $PYCADF_BRANCH
-    setup_develop $PYCADF_DIR
+    setup_install $PYCADF_DIR
 
     git_clone $STEVEDORE_REPO $STEVEDORE_DIR $STEVEDORE_BRANCH
-    setup_develop $STEVEDORE_DIR
+    setup_install $STEVEDORE_DIR
 
     git_clone $TASKFLOW_REPO $TASKFLOW_DIR $TASKFLOW_BRANCH
-    setup_develop $TASKFLOW_DIR
+    setup_install $TASKFLOW_DIR
 }
 
 # cleanup_oslo() - purge possibly old versions of oslo
diff --git a/lib/sahara b/lib/sahara
new file mode 100644
index 0000000..1ff0cf9
--- /dev/null
+++ b/lib/sahara
@@ -0,0 +1,172 @@
+# lib/sahara
+
+# Dependencies:
+# ``functions`` file
+# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# install_sahara
+# configure_sahara
+# start_sahara
+# stop_sahara
+# cleanup_sahara
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+
+# Set up default repos
+SAHARA_REPO=${SAHARA_REPO:-${GIT_BASE}/openstack/sahara.git}
+SAHARA_BRANCH=${SAHARA_BRANCH:-master}
+
+# Set up default directories
+SAHARA_DIR=$DEST/sahara
+SAHARA_CONF_DIR=${SAHARA_CONF_DIR:-/etc/sahara}
+SAHARA_CONF_FILE=${SAHARA_CONF_DIR}/sahara.conf
+
+SAHARA_SERVICE_HOST=${SAHARA_SERVICE_HOST:-$SERVICE_HOST}
+SAHARA_SERVICE_PORT=${SAHARA_SERVICE_PORT:-8386}
+SAHARA_SERVICE_PROTOCOL=${SAHARA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
+
+SAHARA_AUTH_CACHE_DIR=${SAHARA_AUTH_CACHE_DIR:-/var/cache/sahara}
+
+# Support entry points installation of console scripts
+if [[ -d $SAHARA_DIR/bin ]]; then
+    SAHARA_BIN_DIR=$SAHARA_DIR/bin
+else
+    SAHARA_BIN_DIR=$(get_python_exec_prefix)
+fi
+
+# Tell Tempest this project is present
+TEMPEST_SERVICES+=,sahara
+
+# Functions
+# ---------
+
+# create_sahara_accounts() - Set up common required sahara accounts
+#
+# Tenant      User       Roles
+# ------------------------------
+# service     sahara    admin
+function create_sahara_accounts {
+
+    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
+
+    SAHARA_USER=$(openstack user create \
+        sahara \
+        --password "$SERVICE_PASSWORD" \
+        --project $SERVICE_TENANT \
+        --email sahara@example.com \
+        | grep " id " | get_field 2)
+    openstack role add \
+        $ADMIN_ROLE \
+        --project $SERVICE_TENANT \
+        --user $SAHARA_USER
+
+    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+        SAHARA_SERVICE=$(openstack service create \
+            sahara \
+            --type=data_processing \
+            --description="Sahara Data Processing" \
+            | grep " id " | get_field 2)
+        openstack endpoint create \
+            $SAHARA_SERVICE \
+            --region RegionOne \
+            --publicurl "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
+            --adminurl "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
+            --internalurl "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s"
+    fi
+}
+
+# cleanup_sahara() - Remove residual data files, anything left over from
+# previous runs that would need to clean up.
+function cleanup_sahara {
+
+    # Cleanup auth cache dir
+    sudo rm -rf $SAHARA_AUTH_CACHE_DIR
+}
+
+# configure_sahara() - Set config files, create data dirs, etc
+function configure_sahara {
+
+    if [[ ! -d $SAHARA_CONF_DIR ]]; then
+        sudo mkdir -p $SAHARA_CONF_DIR
+    fi
+    sudo chown $STACK_USER $SAHARA_CONF_DIR
+
+    # Copy over sahara configuration file and configure common parameters.
+    cp $SAHARA_DIR/etc/sahara/sahara.conf.sample $SAHARA_CONF_FILE
+
+    # Create auth cache dir
+    sudo mkdir -p $SAHARA_AUTH_CACHE_DIR
+    sudo chown $STACK_USER $SAHARA_AUTH_CACHE_DIR
+    rm -rf $SAHARA_AUTH_CACHE_DIR/*
+
+    # Set obsolete keystone auth configs for backward compatibility
+    iniset $SAHARA_CONF_FILE DEFAULT os_auth_host $KEYSTONE_SERVICE_HOST
+    iniset $SAHARA_CONF_FILE DEFAULT os_auth_port $KEYSTONE_SERVICE_PORT
+    iniset $SAHARA_CONF_FILE DEFAULT os_auth_protocol $KEYSTONE_SERVICE_PROTOCOL
+    iniset $SAHARA_CONF_FILE DEFAULT os_admin_password $SERVICE_PASSWORD
+    iniset $SAHARA_CONF_FILE DEFAULT os_admin_username sahara
+    iniset $SAHARA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME
+
+    # Set actual keystone auth configs
+    iniset $SAHARA_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
+    iniset $SAHARA_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
+    iniset $SAHARA_CONF_FILE keystone_authtoken admin_user sahara
+    iniset $SAHARA_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD
+    iniset $SAHARA_CONF_FILE keystone_authtoken signing_dir $SAHARA_AUTH_CACHE_DIR
+    iniset $SAHARA_CONF_FILE keystone_authtoken cafile $KEYSTONE_SSL_CA
+
+    iniset $SAHARA_CONF_FILE DEFAULT verbose True
+    iniset $SAHARA_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+
+    iniset $SAHARA_CONF_FILE database connection `database_connection_url sahara`
+
+    if is_service_enabled neutron; then
+        iniset $SAHARA_CONF_FILE DEFAULT use_neutron true
+        iniset $SAHARA_CONF_FILE DEFAULT use_floating_ips true
+    fi
+
+    if is_service_enabled heat; then
+        iniset $SAHARA_CONF_FILE DEFAULT infrastructure_engine heat
+    else
+        iniset $SAHARA_CONF_FILE DEFAULT infrastructure_engine direct
+    fi
+
+    iniset $SAHARA_CONF_FILE DEFAULT use_syslog $SYSLOG
+
+    recreate_database sahara utf8
+    $SAHARA_BIN_DIR/sahara-db-manage --config-file $SAHARA_CONF_FILE upgrade head
+}
+
+# install_sahara() - Collect source and prepare
+function install_sahara {
+    git_clone $SAHARA_REPO $SAHARA_DIR $SAHARA_BRANCH
+    setup_develop $SAHARA_DIR
+}
+
+# start_sahara() - Start running processes, including screen
+function start_sahara {
+    screen_it sahara "cd $SAHARA_DIR && $SAHARA_BIN_DIR/sahara-api --config-file $SAHARA_CONF_FILE"
+}
+
+# stop_sahara() - Stop running processes
+function stop_sahara {
+    # Kill the Sahara screen windows
+    screen -S $SCREEN_NAME -p sahara -X kill
+}
+
+
+# Restore xtrace
+$XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/sahara-dashboard b/lib/sahara-dashboard
new file mode 100644
index 0000000..a81df0f
--- /dev/null
+++ b/lib/sahara-dashboard
@@ -0,0 +1,72 @@
+# lib/sahara-dashboard
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
+# - ``SERVICE_HOST``
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# - install_sahara_dashboard
+# - configure_sahara_dashboard
+# - cleanup_sahara_dashboard
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+source $TOP_DIR/lib/horizon
+
+# Defaults
+# --------
+
+# Set up default repos
+SAHARA_DASHBOARD_REPO=${SAHARA_DASHBOARD_REPO:-${GIT_BASE}/openstack/sahara-dashboard.git}
+SAHARA_DASHBOARD_BRANCH=${SAHARA_DASHBOARD_BRANCH:-master}
+
+SAHARA_PYTHONCLIENT_REPO=${SAHARA_PYTHONCLIENT_REPO:-${GIT_BASE}/openstack/python-saharaclient.git}
+SAHARA_PYTHONCLIENT_BRANCH=${SAHARA_PYTHONCLIENT_BRANCH:-master}
+
+# Set up default directories
+SAHARA_DASHBOARD_DIR=$DEST/sahara-dashboard
+SAHARA_PYTHONCLIENT_DIR=$DEST/python-saharaclient
+
+# Functions
+# ---------
+
+function configure_sahara_dashboard {
+
+    echo -e "AUTO_ASSIGNMENT_ENABLED = False" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py
+    echo -e "HORIZON_CONFIG['dashboards'] += ('sahara',)" >> $HORIZON_DIR/openstack_dashboard/settings.py
+    echo -e "INSTALLED_APPS += ('saharadashboard',)" >> $HORIZON_DIR/openstack_dashboard/settings.py
+
+    if is_service_enabled neutron; then
+        echo -e "SAHARA_USE_NEUTRON = True" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py
+    fi
+}
+
+# install_sahara_dashboard() - Collect source and prepare
+function install_sahara_dashboard {
+    install_python_saharaclient
+    git_clone $SAHARA_DASHBOARD_REPO $SAHARA_DASHBOARD_DIR $SAHARA_DASHBOARD_BRANCH
+    setup_develop $SAHARA_DASHBOARD_DIR
+}
+
+function install_python_saharaclient {
+    git_clone $SAHARA_PYTHONCLIENT_REPO $SAHARA_PYTHONCLIENT_DIR $SAHARA_PYTHONCLIENT_BRANCH
+    setup_develop $SAHARA_PYTHONCLIENT_DIR
+}
+
+# Cleanup file settings.py from Sahara
+function cleanup_sahara_dashboard {
+    sed -i '/sahara/d' $HORIZON_DIR/openstack_dashboard/settings.py
+}
+
+# Restore xtrace
+$XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
+
diff --git a/lib/savanna b/lib/savanna
deleted file mode 100644
index 2cb092c..0000000
--- a/lib/savanna
+++ /dev/null
@@ -1,173 +0,0 @@
-# lib/savanna
-
-# Dependencies:
-# ``functions`` file
-# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
-
-# ``stack.sh`` calls the entry points in this order:
-#
-# install_savanna
-# configure_savanna
-# start_savanna
-# stop_savanna
-# cleanup_savanna
-
-# Save trace setting
-XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-
-# Defaults
-# --------
-
-# Set up default repos
-SAVANNA_REPO=${SAVANNA_REPO:-${GIT_BASE}/openstack/savanna.git}
-SAVANNA_BRANCH=${SAVANNA_BRANCH:-master}
-
-# Set up default directories
-SAVANNA_DIR=$DEST/savanna
-SAVANNA_CONF_DIR=${SAVANNA_CONF_DIR:-/etc/savanna}
-SAVANNA_CONF_FILE=${SAVANNA_CONF_DIR}/savanna.conf
-SAVANNA_DEBUG=${SAVANNA_DEBUG:-True}
-
-SAVANNA_SERVICE_HOST=${SAVANNA_SERVICE_HOST:-$SERVICE_HOST}
-SAVANNA_SERVICE_PORT=${SAVANNA_SERVICE_PORT:-8386}
-SAVANNA_SERVICE_PROTOCOL=${SAVANNA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
-
-SAVANNA_AUTH_CACHE_DIR=${SAVANNA_AUTH_CACHE_DIR:-/var/cache/savanna}
-
-# Support entry points installation of console scripts
-if [[ -d $SAVANNA_DIR/bin ]]; then
-    SAVANNA_BIN_DIR=$SAVANNA_DIR/bin
-else
-    SAVANNA_BIN_DIR=$(get_python_exec_prefix)
-fi
-
-# Tell Tempest this project is present
-TEMPEST_SERVICES+=,savanna
-
-
-# Functions
-# ---------
-
-# create_savanna_accounts() - Set up common required savanna accounts
-#
-# Tenant      User       Roles
-# ------------------------------
-# service     savanna    admin
-function create_savanna_accounts {
-
-    SERVICE_TENANT=$(openstack project list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-    ADMIN_ROLE=$(openstack role list | awk "/ admin / { print \$2 }")
-
-    SAVANNA_USER=$(openstack user create \
-        savanna \
-        --password "$SERVICE_PASSWORD" \
-        --project $SERVICE_TENANT \
-        --email savanna@example.com \
-        | grep " id " | get_field 2)
-    openstack role add \
-        $ADMIN_ROLE \
-        --project $SERVICE_TENANT \
-        --user $SAVANNA_USER
-
-    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-        SAVANNA_SERVICE=$(openstack service create \
-            savanna \
-            --type=data_processing \
-            --description="Savanna Data Processing" \
-            | grep " id " | get_field 2)
-        openstack endpoint create \
-            $SAVANNA_SERVICE \
-            --region RegionOne \
-            --publicurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
-            --adminurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s" \
-            --internalurl "$SAVANNA_SERVICE_PROTOCOL://$SAVANNA_SERVICE_HOST:$SAVANNA_SERVICE_PORT/v1.1/\$(tenant_id)s"
-    fi
-}
-
-# cleanup_savanna() - Remove residual data files, anything left over from
-# previous runs that would need to clean up.
-function cleanup_savanna {
-
-    # Cleanup auth cache dir
-    sudo rm -rf $SAVANNA_AUTH_CACHE_DIR
-}
-
-# configure_savanna() - Set config files, create data dirs, etc
-function configure_savanna {
-
-    if [[ ! -d $SAVANNA_CONF_DIR ]]; then
-        sudo mkdir -p $SAVANNA_CONF_DIR
-    fi
-    sudo chown $STACK_USER $SAVANNA_CONF_DIR
-
-    # Copy over savanna configuration file and configure common parameters.
-    cp $SAVANNA_DIR/etc/savanna/savanna.conf.sample $SAVANNA_CONF_FILE
-
-    # Create auth cache dir
-    sudo mkdir -p $SAVANNA_AUTH_CACHE_DIR
-    sudo chown $STACK_USER $SAVANNA_AUTH_CACHE_DIR
-    rm -rf $SAVANNA_AUTH_CACHE_DIR/*
-
-    # Set obsolete keystone auth configs for backward compatibility
-    iniset $SAVANNA_CONF_FILE DEFAULT os_auth_host $KEYSTONE_SERVICE_HOST
-    iniset $SAVANNA_CONF_FILE DEFAULT os_auth_port $KEYSTONE_SERVICE_PORT
-    iniset $SAVANNA_CONF_FILE DEFAULT os_auth_protocol $KEYSTONE_SERVICE_PROTOCOL
-    iniset $SAVANNA_CONF_FILE DEFAULT os_admin_password $SERVICE_PASSWORD
-    iniset $SAVANNA_CONF_FILE DEFAULT os_admin_username savanna
-    iniset $SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME
-
-    # Set actual keystone auth configs
-    iniset $SAVANNA_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
-    iniset $SAVANNA_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
-    iniset $SAVANNA_CONF_FILE keystone_authtoken admin_user savanna
-    iniset $SAVANNA_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD
-    iniset $SAVANNA_CONF_FILE keystone_authtoken signing_dir $SAVANNA_AUTH_CACHE_DIR
-    iniset $SAVANNA_CONF_FILE keystone_authtoken cafile $KEYSTONE_SSL_CA
-
-    iniset $SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG
-
-    iniset $SAVANNA_CONF_FILE database connection `database_connection_url savanna`
-
-    if is_service_enabled neutron; then
-        iniset $SAVANNA_CONF_FILE DEFAULT use_neutron true
-        iniset $SAVANNA_CONF_FILE DEFAULT use_floating_ips true
-    fi
-
-    if is_service_enabled heat; then
-        iniset $SAVANNA_CONF_FILE DEFAULT infrastructure_engine heat
-    else
-        iniset $SAVANNA_CONF_FILE DEFAULT infrastructure_engine savanna
-    fi
-
-    iniset $SAVANNA_CONF_FILE DEFAULT use_syslog $SYSLOG
-
-    recreate_database savanna utf8
-    $SAVANNA_BIN_DIR/savanna-db-manage --config-file $SAVANNA_CONF_FILE upgrade head
-}
-
-# install_savanna() - Collect source and prepare
-function install_savanna {
-    git_clone $SAVANNA_REPO $SAVANNA_DIR $SAVANNA_BRANCH
-    setup_develop $SAVANNA_DIR
-}
-
-# start_savanna() - Start running processes, including screen
-function start_savanna {
-    screen_it savanna "cd $SAVANNA_DIR && $SAVANNA_BIN_DIR/savanna-api --config-file $SAVANNA_CONF_FILE"
-}
-
-# stop_savanna() - Stop running processes
-function stop_savanna {
-    # Kill the Savanna screen windows
-    screen -S $SCREEN_NAME -p savanna -X kill
-}
-
-
-# Restore xtrace
-$XTRACE
-
-# Local variables:
-# mode: shell-script
-# End:
diff --git a/lib/savanna-dashboard b/lib/savanna-dashboard
deleted file mode 100644
index 6fe15a3..0000000
--- a/lib/savanna-dashboard
+++ /dev/null
@@ -1,72 +0,0 @@
-# lib/savanna-dashboard
-
-# Dependencies:
-#
-# - ``functions`` file
-# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
-# - ``SERVICE_HOST``
-
-# ``stack.sh`` calls the entry points in this order:
-#
-# - install_savanna_dashboard
-# - configure_savanna_dashboard
-# - cleanup_savanna_dashboard
-
-# Save trace setting
-XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-source $TOP_DIR/lib/horizon
-
-# Defaults
-# --------
-
-# Set up default repos
-SAVANNA_DASHBOARD_REPO=${SAVANNA_DASHBOARD_REPO:-${GIT_BASE}/openstack/savanna-dashboard.git}
-SAVANNA_DASHBOARD_BRANCH=${SAVANNA_DASHBOARD_BRANCH:-master}
-
-SAVANNA_PYTHONCLIENT_REPO=${SAVANNA_PYTHONCLIENT_REPO:-${GIT_BASE}/openstack/python-savannaclient.git}
-SAVANNA_PYTHONCLIENT_BRANCH=${SAVANNA_PYTHONCLIENT_BRANCH:-master}
-
-# Set up default directories
-SAVANNA_DASHBOARD_DIR=$DEST/savanna-dashboard
-SAVANNA_PYTHONCLIENT_DIR=$DEST/python-savannaclient
-
-# Functions
-# ---------
-
-function configure_savanna_dashboard {
-
-    echo -e "AUTO_ASSIGNMENT_ENABLED = False" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py
-    echo -e "HORIZON_CONFIG['dashboards'] += ('savanna',)" >> $HORIZON_DIR/openstack_dashboard/settings.py
-    echo -e "INSTALLED_APPS += ('savannadashboard',)" >> $HORIZON_DIR/openstack_dashboard/settings.py
-
-    if is_service_enabled neutron; then
-        echo -e "SAVANNA_USE_NEUTRON = True" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py
-    fi
-}
-
-# install_savanna_dashboard() - Collect source and prepare
-function install_savanna_dashboard {
-    install_python_savannaclient
-    git_clone $SAVANNA_DASHBOARD_REPO $SAVANNA_DASHBOARD_DIR $SAVANNA_DASHBOARD_BRANCH
-    setup_develop $SAVANNA_DASHBOARD_DIR
-}
-
-function install_python_savannaclient {
-    git_clone $SAVANNA_PYTHONCLIENT_REPO $SAVANNA_PYTHONCLIENT_DIR $SAVANNA_PYTHONCLIENT_BRANCH
-    setup_develop $SAVANNA_PYTHONCLIENT_DIR
-}
-
-# Cleanup file settings.py from Savanna
-function cleanup_savanna_dashboard {
-    sed -i '/savanna/d' $HORIZON_DIR/openstack_dashboard/settings.py
-}
-
-# Restore xtrace
-$XTRACE
-
-# Local variables:
-# mode: shell-script
-# End:
-
diff --git a/lib/stackforge b/lib/stackforge
index dca08cc..e6528af 100644
--- a/lib/stackforge
+++ b/lib/stackforge
@@ -40,10 +40,10 @@
     cleanup_stackforge
 
     git_clone $WSME_REPO $WSME_DIR $WSME_BRANCH
-    setup_develop_no_requirements_update $WSME_DIR
+    setup_package $WSME_DIR
 
     git_clone $PECAN_REPO $PECAN_DIR $PECAN_BRANCH
-    setup_develop_no_requirements_update $PECAN_DIR
+    setup_package $PECAN_DIR
 }
 
 # cleanup_stackforge() - purge possibly old versions of stackforge libraries
diff --git a/lib/swift b/lib/swift
index b655440..3e183ff 100644
--- a/lib/swift
+++ b/lib/swift
@@ -334,11 +334,12 @@
     iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server node_timeout 120
     iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server conn_timeout 20
 
-    # Configure Ceilometer
-    if is_service_enabled ceilometer; then
-        iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer use "egg:ceilometer#swift"
-        SWIFT_EXTRAS_MIDDLEWARE_LAST="${SWIFT_EXTRAS_MIDDLEWARE_LAST} ceilometer"
-    fi
+    # Skipped due to bug 1294789
+    ## Configure Ceilometer
+    #if is_service_enabled ceilometer; then
+    #    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer use "egg:ceilometer#swift"
+    #    SWIFT_EXTRAS_MIDDLEWARE_LAST="${SWIFT_EXTRAS_MIDDLEWARE_LAST} ceilometer"
+    #fi
 
     # Restrict the length of auth tokens in the swift proxy-server logs.
     iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:proxy-logging reveal_sensitive_prefix ${SWIFT_LOG_TOKEN_LENGTH}
@@ -454,6 +455,9 @@
     sudo chown -R ${STACK_USER}:adm ${swift_log_dir}
     sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \
         tee /etc/rsyslog.d/10-swift.conf
+    # restart syslog to take the changes
+    sudo killall -HUP rsyslogd
+
     if is_apache_enabled_service swift; then
         _config_swift_apache_wsgi
     fi
@@ -485,7 +489,7 @@
     truncate -s ${SWIFT_LOOPBACK_DISK_SIZE} ${SWIFT_DISK_IMAGE}
 
     # Make a fresh XFS filesystem
-    mkfs.xfs -f -i size=1024  ${SWIFT_DISK_IMAGE}
+    /sbin/mkfs.xfs -f -i size=1024  ${SWIFT_DISK_IMAGE}
 
     # Mount the disk with mount options to make it as efficient as possible
     mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1
@@ -627,8 +631,6 @@
 
 # start_swift() - Start running processes, including screen
 function start_swift {
-    # (re)start rsyslog
-    restart_service rsyslog
     # (re)start memcached to make sure we have a clean memcache.
     restart_service memcached
 
diff --git a/lib/tempest b/lib/tempest
index c74f00d..a4558ce 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -277,7 +277,6 @@
     fi
 
     # Compute
-    iniset $TEMPEST_CONFIG compute change_password_available False
     iniset $TEMPEST_CONFIG compute allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True}
     iniset $TEMPEST_CONFIG compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED
     iniset $TEMPEST_CONFIG compute network_for_ssh $PRIVATE_NETWORK_NAME
@@ -289,10 +288,14 @@
     iniset $TEMPEST_CONFIG compute image_alt_ssh_user ${DEFAULT_INSTANCE_USER:-cirros}
     iniset $TEMPEST_CONFIG compute flavor_ref $flavor_ref
     iniset $TEMPEST_CONFIG compute flavor_ref_alt $flavor_ref_alt
-    iniset $TEMPEST_CONFIG compute live_migration_available ${LIVE_MIGRATION_AVAILABLE:-False}
-    iniset $TEMPEST_CONFIG compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False}
     iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method
 
+    # Compute Features
+    iniset $TEMPEST_CONFIG compute-feature-enabled resize True
+    iniset $TEMPEST_CONFIG compute-feature-enabled live_migration ${LIVE_MIGRATION_AVAILABLE:-False}
+    iniset $TEMPEST_CONFIG compute-feature-enabled change_password False
+    iniset $TEMPEST_CONFIG compute-feature-enabled block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False}
+
     # Compute admin
     iniset $TEMPEST_CONFIG "compute-admin" username $ADMIN_USERNAME
     iniset $TEMPEST_CONFIG "compute-admin" password "$password"
@@ -310,6 +313,9 @@
     iniset $TEMPEST_CONFIG boto ec2_url "http://$SERVICE_HOST:8773/services/Cloud"
     iniset $TEMPEST_CONFIG boto s3_url "http://$SERVICE_HOST:${S3_SERVICE_PORT:-3333}"
     iniset $TEMPEST_CONFIG boto s3_materials_path "$BOTO_MATERIALS_PATH"
+    iniset $TEMPEST_CONFIG boto ari_manifest cirros-0.3.1-x86_64-initrd.manifest.xml
+    iniset $TEMPEST_CONFIG boto ami_manifest cirros-0.3.1-x86_64-blank.img.manifest.xml
+    iniset $TEMPEST_CONFIG boto aki_manifest cirros-0.3.1-x86_64-vmlinuz.manifest.xml
     iniset $TEMPEST_CONFIG boto instance_type "$boto_instance_type"
     iniset $TEMPEST_CONFIG boto http_socket_timeout 30
     iniset $TEMPEST_CONFIG boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros}
@@ -369,6 +375,30 @@
     $errexit
 }
 
+# create_tempest_accounts() - Set up common required tempest accounts
+
+# Project              User         Roles
+# ------------------------------------------------------------------
+# alt_demo             alt_demo     Member
+
+# Migrated from keystone_data.sh
+function create_tempest_accounts {
+    if is_service_enabled tempest; then
+        # Tempest has some tests that validate various authorization checks
+        # between two regular users in separate tenants
+        openstack project create \
+            alt_demo
+        openstack user create \
+            --project alt_demo \
+            --password "$ADMIN_PASSWORD" \
+            alt_demo
+        openstack role add \
+            --project alt_demo \
+            --user alt_demo \
+            Member
+    fi
+}
+
 # install_tempest() - Collect source and prepare
 function install_tempest {
     git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH
diff --git a/lib/tls b/lib/tls
index 072059d..88e5f60 100644
--- a/lib/tls
+++ b/lib/tls
@@ -348,7 +348,7 @@
     local key=${!key_var}
     local ca=${!ca_var}
 
-    if [[ !($cert && $key && $ca) ]]; then
+    if [[ -z "$cert" || -z "$key" || -z "$ca" ]]; then
         die $LINENO "Missing either the ${cert_var} ${key_var} or ${ca_var}" \
                     "variable to enable SSL for ${service}"
     fi
diff --git a/lib/trove b/lib/trove
index 75b990f..42d2219 100644
--- a/lib/trove
+++ b/lib/trove
@@ -147,6 +147,9 @@
     iniset $TROVE_CONF_DIR/trove.conf DEFAULT rabbit_password $RABBIT_PASSWORD
     iniset $TROVE_CONF_DIR/trove.conf DEFAULT sql_connection `database_connection_url trove`
     iniset $TROVE_CONF_DIR/trove.conf DEFAULT add_addresses True
+    iniset $TROVE_CONF_DIR/trove.conf DEFAULT nova_compute_url $NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2
+    iniset $TROVE_CONF_DIR/trove.conf DEFAULT cinder_url $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1
+    iniset $TROVE_CONF_DIR/trove.conf DEFAULT swift_url http://$SERVICE_HOST:8080/v1/AUTH_
 
     iniset $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample DEFAULT rabbit_password $RABBIT_PASSWORD
     sed -i "s/localhost/$NETWORK_GATEWAY/g" $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample
@@ -164,6 +167,9 @@
         iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_user radmin
         iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_tenant_name trove
         iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS
+        iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_compute_url $NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2
+        iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT cinder_url $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1
+        iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT swift_url http://$SERVICE_HOST:8080/v1/AUTH_
         iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
         setup_trove_logging $TROVE_CONF_DIR/trove-taskmanager.conf
     fi
diff --git a/run_tests.sh b/run_tests.sh
index a0bfbee..b1aef4f 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -15,6 +15,23 @@
 #
 # this runs a series of unit tests for devstack to ensure it's functioning
 
+PASSES=""
+FAILURES=""
+
+# Check the return code and add the test to PASSES or FAILURES as appropriate
+# pass_fail <result> <expected> <name>
+function pass_fail {
+    local result=$1
+    local expected=$2
+    local test_name=$3
+
+    if [[ $result -ne $expected ]]; then
+        FAILURES="$FAILURES $test_name"
+    else
+        PASSES="$PASSES $test_name"
+    fi
+}
+
 if [[ -n $@ ]]; then
     FILES=$@
 else
@@ -27,3 +44,30 @@
 echo "Running bash8..."
 
 ./tools/bash8.py -v $FILES
+pass_fail $? 0 bash8
+
+
+# Test that no one is trying to land crazy refs as branches
+
+echo "Ensuring we don't have crazy refs"
+
+REFS=`grep BRANCH stackrc | grep -v -- '-master'`
+rc=$?
+pass_fail $rc 1 crazy-refs
+if [[ $rc -eq 0 ]]; then
+    echo "Branch defaults must be master. Found:"
+    echo $REFS
+fi
+
+echo "====================================================================="
+for script in $PASSES; do
+    echo PASS $script
+done
+for script in $FAILURES; do
+    echo FAILED $script
+done
+echo "====================================================================="
+
+if [[ -n "$FAILURES" ]]; then
+    exit 1
+fi
diff --git a/stack.sh b/stack.sh
index 817da26..c715a85 100755
--- a/stack.sh
+++ b/stack.sh
@@ -142,7 +142,7 @@
 
 # Warn users who aren't on an explicitly supported distro, but allow them to
 # override check and attempt installation with ``FORCE=yes ./stack``
-if [[ ! ${DISTRO} =~ (precise|raring|saucy|trusty|7.0|wheezy|sid|testing|jessie|f18|f19|f20|opensuse-12.2|rhel6) ]]; then
+if [[ ! ${DISTRO} =~ (precise|saucy|trusty|7.0|wheezy|sid|testing|jessie|f19|f20|rhel6) ]]; then
     echo "WARNING: this script has not been tested on $DISTRO"
     if [[ "$FORCE" != "yes" ]]; then
         die $LINENO "If you wish to run this script anyway run with FORCE=yes"
@@ -195,6 +195,7 @@
 # Some binaries might be under /sbin or /usr/sbin, so make sure sudo will
 # see them by forcing PATH
 echo "Defaults:$STACK_USER secure_path=/sbin:/usr/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/local/bin" >> $TEMPFILE
+echo "Defaults:$STACK_USER !requiretty" >> $TEMPFILE
 chmod 0440 $TEMPFILE
 sudo chown root:root $TEMPFILE
 sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh
@@ -538,8 +539,9 @@
                     cmd | getline now
                     close("date +\"%Y-%m-%d %H:%M:%S.%3N | \"")
                     sub(/^/, now)
-                    print
                     print > logfile
+                    fflush(logfile)
+                    print
                     fflush("")
                 }' ) 2>&1
         # Set up a second fd for output
@@ -593,7 +595,9 @@
 function exit_trap {
     local r=$?
     jobs=$(jobs -p)
-    if [[ -n $jobs ]]; then
+    # Only do the kill when we're logging through a process substitution,
+    # which currently is only to verbose logfile
+    if [[ -n $jobs && -n "$LOGFILE" && "$VERBOSE" == "True" ]]; then
         echo "exit_trap: cleaning up child processes"
         kill 2>&1 $jobs
     fi
@@ -880,7 +884,7 @@
 # -------
 
 # A better kind of sysstat, with the top process per time slice
-DSTAT_OPTS="-tcndylp --top-cpu-adv"
+DSTAT_OPTS="-tcmndrylp --top-cpu-adv"
 if [[ -n ${SCREEN_LOGDIR} ]]; then
     screen_it dstat "cd $TOP_DIR; dstat $DSTAT_OPTS | tee $SCREEN_LOGDIR/$DSTAT_FILE"
 else
@@ -907,14 +911,13 @@
         SERVICE_ENDPOINT=http://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT_INT/v2.0
     fi
 
-    # Do the keystone-specific bits from keystone_data.sh
-    export OS_SERVICE_TOKEN=$SERVICE_TOKEN
-    export OS_SERVICE_ENDPOINT=$SERVICE_ENDPOINT
-    # Add temporarily to make openstackclient work
+    # Setup OpenStackclient token-flow auth
     export OS_TOKEN=$SERVICE_TOKEN
     export OS_URL=$SERVICE_ENDPOINT
+
     create_keystone_accounts
     create_nova_accounts
+    create_glance_accounts
     create_cinder_accounts
     create_neutron_accounts
 
@@ -922,7 +925,7 @@
         create_ceilometer_accounts
     fi
 
-    if is_service_enabled swift || is_service_enabled s-proxy; then
+    if is_service_enabled swift; then
         create_swift_accounts
     fi
 
@@ -930,20 +933,14 @@
         create_heat_accounts
     fi
 
-    # ``keystone_data.sh`` creates services, admin and demo users, and roles.
-    ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \
-    SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \
-    S3_SERVICE_PORT=$S3_SERVICE_PORT KEYSTONE_CATALOG_BACKEND=$KEYSTONE_CATALOG_BACKEND \
-    DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES \
-        bash -x $FILES/keystone_data.sh
-
-    # Set up auth creds now that keystone is bootstrapped
+    # Begone token-flow auth
     unset OS_TOKEN OS_URL
+
+    # Set up password-flow auth creds now that keystone is bootstrapped
     export OS_AUTH_URL=$SERVICE_ENDPOINT
     export OS_TENANT_NAME=admin
     export OS_USERNAME=admin
     export OS_PASSWORD=$ADMIN_PASSWORD
-    unset OS_SERVICE_TOKEN OS_SERVICE_ENDPOINT
 fi
 
 
@@ -1134,15 +1131,9 @@
 
 # Create an access key and secret key for nova ec2 register image
 if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nova; then
-    NOVA_USER_ID=$(keystone user-list | grep ' nova ' | get_field 1)
-    die_if_not_set $LINENO NOVA_USER_ID "Failure retrieving NOVA_USER_ID for nova"
-    NOVA_TENANT_ID=$(keystone tenant-list | grep " $SERVICE_TENANT_NAME " | get_field 1)
-    die_if_not_set $LINENO NOVA_TENANT_ID "Failure retrieving NOVA_TENANT_ID for $SERVICE_TENANT_NAME"
-    CREDS=$(keystone ec2-credentials-create --user-id $NOVA_USER_ID --tenant-id $NOVA_TENANT_ID)
-    ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }')
-    SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }')
-    iniset $NOVA_CONF DEFAULT s3_access_key "$ACCESS_KEY"
-    iniset $NOVA_CONF DEFAULT s3_secret_key "$SECRET_KEY"
+    eval $(openstack ec2 credentials create --user nova --project $SERVICE_TENANT_NAME -f shell -c access -c secret)
+    iniset $NOVA_CONF DEFAULT s3_access_key "$access"
+    iniset $NOVA_CONF DEFAULT s3_secret_key "$secret"
     iniset $NOVA_CONF DEFAULT s3_affix_tenant "True"
 fi
 
@@ -1355,12 +1346,14 @@
     echo_summary "WARNING: $DEPRECATED_TEXT"
 fi
 
+# TODO(dtroyer): Remove EXTRA_OPTS after stable/icehouse branch is cut
 # Specific warning for deprecated configs
 if [[ -n "$EXTRA_OPTS" ]]; then
     echo ""
     echo_summary "WARNING: EXTRA_OPTS is used"
     echo "You are using EXTRA_OPTS to pass configuration into nova.conf."
     echo "Please convert that configuration in localrc to a nova.conf section in local.conf:"
+    echo "EXTRA_OPTS will be removed early in the Juno development cycle"
     echo "
 [[post-config|\$NOVA_CONF]]
 [DEFAULT]
@@ -1371,11 +1364,13 @@
     done
 fi
 
+# TODO(dtroyer): Remove EXTRA_BAREMETAL_OPTS after stable/icehouse branch is cut
 if [[ -n "$EXTRA_BAREMETAL_OPTS" ]]; then
     echo ""
-    echo_summary "WARNING: EXTRA_OPTS is used"
-    echo "You are using EXTRA_OPTS to pass configuration into nova.conf."
+    echo_summary "WARNING: EXTRA_BAREMETAL_OPTS is used"
+    echo "You are using EXTRA_BAREMETAL_OPTS to pass configuration into nova.conf."
     echo "Please convert that configuration in localrc to a nova.conf section in local.conf:"
+    echo "EXTRA_BAREMETAL_OPTS will be removed early in the Juno development cycle"
     echo "
 [[post-config|\$NOVA_CONF]]
 [baremetal]
@@ -1386,13 +1381,49 @@
     done
 fi
 
+# TODO(dtroyer): Remove Q_AGENT_EXTRA_AGENT_OPTS after stable/juno branch is cut
+if [[ -n "$Q_AGENT_EXTRA_AGENT_OPTS" ]]; then
+    echo ""
+    echo_summary "WARNING: Q_AGENT_EXTRA_AGENT_OPTS is used"
+    echo "You are using Q_AGENT_EXTRA_AGENT_OPTS to pass configuration into $NEUTRON_CONF."
+    echo "Please convert that configuration in localrc to a $NEUTRON_CONF section in local.conf:"
+    echo "Q_AGENT_EXTRA_AGENT_OPTS will be removed early in the 'K' development cycle"
+    echo "
+[[post-config|/\$Q_PLUGIN_CONF_FILE]]
+[DEFAULT]
+"
+    for I in "${Q_AGENT_EXTRA_AGENT_OPTS[@]}"; do
+        # Replace the first '=' with ' ' for iniset syntax
+        echo ${I}
+    done
+fi
+
+# TODO(dtroyer): Remove Q_AGENT_EXTRA_SRV_OPTS after stable/juno branch is cut
+if [[ -n "$Q_AGENT_EXTRA_SRV_OPTS" ]]; then
+    echo ""
+    echo_summary "WARNING: Q_AGENT_EXTRA_SRV_OPTS is used"
+    echo "You are using Q_AGENT_EXTRA_SRV_OPTS to pass configuration into $NEUTRON_CONF."
+    echo "Please convert that configuration in localrc to a $NEUTRON_CONF section in local.conf:"
+    echo "Q_AGENT_EXTRA_AGENT_OPTS will be removed early in the 'K' development cycle"
+    echo "
+[[post-config|/\$Q_PLUGIN_CONF_FILE]]
+[DEFAULT]
+"
+    for I in "${Q_AGENT_EXTRA_SRV_OPTS[@]}"; do
+        # Replace the first '=' with ' ' for iniset syntax
+        echo ${I}
+    done
+fi
+
+# TODO(dtroyer): Remove Q_DHCP_EXTRA_DEFAULT_OPTS after stable/icehouse branch is cut
 if [[ -n "$Q_DHCP_EXTRA_DEFAULT_OPTS" ]]; then
     echo ""
     echo_summary "WARNING: Q_DHCP_EXTRA_DEFAULT_OPTS is used"
     echo "You are using Q_DHCP_EXTRA_DEFAULT_OPTS to pass configuration into $Q_DHCP_CONF_FILE."
     echo "Please convert that configuration in localrc to a $Q_DHCP_CONF_FILE section in local.conf:"
+    echo "Q_DHCP_EXTRA_DEFAULT_OPTS will be removed early in the Juno development cycle"
     echo "
-[[post-config|\$Q_DHCP_CONF_FILE]]
+[[post-config|/\$Q_DHCP_CONF_FILE]]
 [DEFAULT]
 "
     for I in "${Q_DHCP_EXTRA_DEFAULT_OPTS[@]}"; do
@@ -1401,11 +1432,13 @@
     done
 fi
 
+# TODO(dtroyer): Remove Q_SRV_EXTRA_DEFAULT_OPTS after stable/icehouse branch is cut
 if [[ -n "$Q_SRV_EXTRA_DEFAULT_OPTS" ]]; then
     echo ""
     echo_summary "WARNING: Q_SRV_EXTRA_DEFAULT_OPTS is used"
     echo "You are using Q_SRV_EXTRA_DEFAULT_OPTS to pass configuration into $NEUTRON_CONF."
     echo "Please convert that configuration in localrc to a $NEUTRON_CONF section in local.conf:"
+    echo "Q_SRV_EXTRA_DEFAULT_OPTS will be removed early in the Juno development cycle"
     echo "
 [[post-config|\$NEUTRON_CONF]]
 [DEFAULT]
diff --git a/stackrc b/stackrc
index 6bb6f37..4418be1 100644
--- a/stackrc
+++ b/stackrc
@@ -213,7 +213,7 @@
 # storage service
 SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git}
 SWIFT_BRANCH=${SWIFT_BRANCH:-master}
-SWIFT3_REPO=${SWIFT3_REPO:-http://github.com/fujita/swift3.git}
+SWIFT3_REPO=${SWIFT3_REPO:-${GIT_BASE}/stackforge/swift3.git}
 SWIFT3_BRANCH=${SWIFT3_BRANCH:-master}
 
 # python swift client library
@@ -226,8 +226,8 @@
 
 
 # diskimage-builder
-BM_IMAGE_BUILD_REPO=${BM_IMAGE_BUILD_REPO:-${GIT_BASE}/openstack/diskimage-builder.git}
-BM_IMAGE_BUILD_BRANCH=${BM_IMAGE_BUILD_BRANCH:-master}
+DIB_REPO=${DIB_REPO:-${GIT_BASE}/openstack/diskimage-builder.git}
+DIB_BRANCH=${DIB_BRANCH:-master}
 
 # a websockets/html5 or flash powered VNC console for vm instances
 NOVNC_REPO=${NOVNC_REPO:-https://github.com/kanaka/noVNC.git}
@@ -267,7 +267,7 @@
 is_package_installed xenserver-core && DEFAULT_VIRT_DRIVER=xenserver
 VIRT_DRIVER=${VIRT_DRIVER:-$DEFAULT_VIRT_DRIVER}
 case "$VIRT_DRIVER" in
-    libvirt)
+    ironic|libvirt)
         LIBVIRT_TYPE=${LIBVIRT_TYPE:-kvm}
         if [[ "$os_VENDOR" =~ (Debian) ]]; then
             LIBVIRT_GROUP=libvirt
@@ -320,9 +320,6 @@
     openvz)
         DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-12.04-x86_64}
         IMAGE_URLS=${IMAGE_URLS:-"http://download.openvz.org/template/precreated/ubuntu-12.04-x86_64.tar.gz"};;
-    docker)
-        DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros}
-        IMAGE_URLS=${IMAGE_URLS:-};;
     libvirt)
         case "$LIBVIRT_TYPE" in
             lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc
@@ -335,7 +332,7 @@
         ;;
     vsphere)
         DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-debian-2.6.32-i686}
-        IMAGE_URLS=${IMAGE_URLS:-"http://partnerweb.vmware.com/programs/vmdkimage/debian-2.6.32-i686.vmdk"};;
+        IMAGE_URLS=${IMAGE_URLS:-"http://partnerweb.vmware.com/programs/vmdkimage/cirros-0.3.0-i386-disk.vmdk"};;
     xenserver)
         DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.0-x86_64-disk}
         IMAGE_URLS=${IMAGE_URLS:-"https://github.com/downloads/citrix-openstack/warehouse/cirros-0.3.0-x86_64-disk.vhd.tgz"};;
diff --git a/tools/docker/README.md b/tools/docker/README.md
deleted file mode 100644
index 976111f..0000000
--- a/tools/docker/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
-# DevStack on Docker
-
-Using Docker as Nova's hypervisor requries two steps:
-
-* Configure DevStack by adding the following to `localrc`::
-
-    VIRT_DRIVER=docker
-
-* Download and install the Docker service and images::
-
-    tools/docker/install_docker.sh
-
-After this, `stack.sh` should run as normal.
diff --git a/tools/docker/install_docker.sh b/tools/docker/install_docker.sh
deleted file mode 100755
index 27c8c82..0000000
--- a/tools/docker/install_docker.sh
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env bash
-
-# **install_docker.sh** - Do the initial Docker installation and configuration
-
-# install_docker.sh
-#
-# Install docker package and images
-# * downloads a base busybox image and a glance registry image if necessary
-# * install the images in Docker's image cache
-
-
-# Keep track of the current directory
-SCRIPT_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $SCRIPT_DIR/../..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Load local configuration
-source $TOP_DIR/stackrc
-
-FILES=$TOP_DIR/files
-
-# Get our defaults
-source $TOP_DIR/lib/nova_plugins/hypervisor-docker
-
-SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60}
-
-
-# Install Docker Service
-# ======================
-
-if is_fedora; then
-    install_package docker-io socat
-else
-    # Stop the auto-repo updates and do it when required here
-    NO_UPDATE_REPOS=True
-
-    # Set up home repo
-    curl https://get.docker.io/gpg | sudo apt-key add -
-    install_package python-software-properties && \
-        sudo sh -c "echo deb $DOCKER_APT_REPO docker main > /etc/apt/sources.list.d/docker.list"
-    apt_get update
-    install_package --force-yes lxc-docker socat
-fi
-
-# Start the daemon - restart just in case the package ever auto-starts...
-restart_service docker
-
-echo "Waiting for docker daemon to start..."
-DOCKER_GROUP=$(groups | cut -d' ' -f1)
-CONFIGURE_CMD="while ! /bin/echo -e 'GET /v1.3/version HTTP/1.0\n\n' | socat - unix-connect:$DOCKER_UNIX_SOCKET 2>/dev/null | grep -q '200 OK'; do
-    # Set the right group on docker unix socket before retrying
-    sudo chgrp $DOCKER_GROUP $DOCKER_UNIX_SOCKET
-    sudo chmod g+rw $DOCKER_UNIX_SOCKET
-    sleep 1
-done"
-if ! timeout $SERVICE_TIMEOUT sh -c "$CONFIGURE_CMD"; then
-    die $LINENO "docker did not start"
-fi
-
-# Get guest container image
-docker pull $DOCKER_IMAGE
-docker tag $DOCKER_IMAGE $DOCKER_IMAGE_NAME
-
-# Get docker-registry image
-docker pull $DOCKER_REGISTRY_IMAGE
-docker tag $DOCKER_REGISTRY_IMAGE $DOCKER_REGISTRY_IMAGE_NAME
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index 9fa161e..1eb9e7a 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -71,12 +71,13 @@
 }
 
 function install_pip_tarball {
-    (cd $FILES; \
-        curl -O $PIP_TAR_URL; \
-        tar xvfz pip-$INSTALL_PIP_VERSION.tar.gz 1>/dev/null; \
-        cd pip-$INSTALL_PIP_VERSION; \
-        sudo -E python setup.py install 1>/dev/null; \
-    )
+    if [[ ! -r $FILES/pip-$INSTALL_PIP_VERSION.tar.gz ]]; then
+        (cd $FILES; \
+            curl -O $PIP_TAR_URL; \
+            tar xvfz pip-$INSTALL_PIP_VERSION.tar.gz 1>/dev/null)
+    fi
+    (cd $FILES/pip-$INSTALL_PIP_VERSION; \
+        sudo -E python setup.py install 1>/dev/null)
 }
 
 # Show starting versions
diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh
index 0c65fd9..9651083 100755
--- a/tools/install_prereqs.sh
+++ b/tools/install_prereqs.sh
@@ -55,7 +55,13 @@
 # ================
 
 # Install package requirements
-install_package $(get_packages general $ENABLED_SERVICES)
+PACKAGES=$(get_packages general $ENABLED_SERVICES)
+if is_ubuntu && echo $PACKAGES | grep -q dkms ; then
+    # ensure headers for the running kernel are installed for any DKMS builds
+    PACKAGES="$PACKAGES linux-headers-$(uname -r)"
+fi
+
+install_package $PACKAGES
 
 if [[ -n "$SYSLOG" && "$SYSLOG" != "False" ]]; then
     if is_ubuntu || is_fedora; then
diff --git a/tools/ironic/scripts/cleanup-nodes b/tools/ironic/scripts/cleanup-nodes
new file mode 100755
index 0000000..adeca5c
--- /dev/null
+++ b/tools/ironic/scripts/cleanup-nodes
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+
+# **cleanup-nodes**
+
+# Cleans up baremetal poseur nodes and volumes created during ironic setup
+# Assumes calling user has proper libvirt group membership and access.
+
+set -exu
+
+LIBVIRT_STORAGE_POOL=${LIBVIRT_STORAGE_POOL:-"default"}
+LIBVIRT_CONNECT_URI=${LIBVIRT_CONNECT_URI:-"qemu:///system"}
+
+VM_COUNT=$1
+NETWORK_BRIDGE=$2
+
+export VIRSH_DEFAULT_CONNECT_URI=$LIBVIRT_CONNECT_URI
+
+for (( idx=0; idx<$VM_COUNT; idx++ )); do
+    NAME="baremetal${NETWORK_BRIDGE}_${idx}"
+    VOL_NAME="baremetal${NETWORK_BRIDGE}-${idx}.qcow2"
+    virsh list | grep -q $NAME && virsh destroy $NAME
+    virsh list --inactive | grep -q $NAME && virsh undefine $NAME
+
+    if virsh pool-list | grep -q $LIBVIRT_STORAGE_POOL ; then
+      virsh vol-list $LIBVIRT_STORAGE_POOL | grep -q $VOL_NAME &&
+          virsh vol-delete $VOL_NAME --pool $LIBVIRT_STORAGE_POOL
+    fi
+done
diff --git a/tools/ironic/scripts/configure-vm b/tools/ironic/scripts/configure-vm
new file mode 100755
index 0000000..9936b76
--- /dev/null
+++ b/tools/ironic/scripts/configure-vm
@@ -0,0 +1,78 @@
+#!/usr/bin/env python
+
+import argparse
+import os.path
+
+import libvirt
+
+templatedir = os.path.join(os.path.dirname(os.path.dirname(__file__)),
+                           'templates')
+
+
+def main():
+    parser = argparse.ArgumentParser(
+        description="Configure a kvm virtual machine for the seed image.")
+    parser.add_argument('--name', default='seed',
+                        help='the name to give the machine in libvirt.')
+    parser.add_argument('--image',
+                        help='Use a custom image file (must be qcow2).')
+    parser.add_argument('--engine', default='qemu',
+                        help='The virtualization engine to use')
+    parser.add_argument('--arch', default='i686',
+                        help='The architecture to use')
+    parser.add_argument('--memory', default='2097152',
+                        help="Maximum memory for the VM in KB.")
+    parser.add_argument('--cpus', default='1',
+                        help="CPU count for the VM.")
+    parser.add_argument('--bootdev', default='hd',
+                        help="What boot device to use (hd/network).")
+    parser.add_argument('--network', default="brbm",
+                        help='The libvirt network name to use')
+    parser.add_argument('--libvirt-nic-driver', default='e1000',
+                        help='The libvirt network driver to use')
+    parser.add_argument('--emulator', default=None,
+                        help='Path to emulator bin for vm template')
+    args = parser.parse_args()
+    with file(templatedir + '/vm.xml', 'rb') as f:
+        source_template = f.read()
+    params = {
+        'name': args.name,
+        'imagefile': args.image,
+        'engine': args.engine,
+        'arch': args.arch,
+        'memory': args.memory,
+        'cpus': args.cpus,
+        'bootdev': args.bootdev,
+        'network': args.network,
+        'emulator': args.emulator,
+    }
+
+    if args.emulator:
+        params['emulator'] = args.emulator
+    else:
+        if os.path.exists("/usr/bin/kvm"):  # Debian
+            params['emulator'] = "/usr/bin/kvm"
+        elif os.path.exists("/usr/bin/qemu-kvm"):  # Redhat
+            params['emulator'] = "/usr/bin/qemu-kvm"
+
+    nicparams = {
+        'nicdriver': args.libvirt_nic_driver,
+        'network': args.network,
+    }
+
+    params['bm_network'] = """
+<!-- neutron friendly 'bare metal' network -->
+<interface type='network'>
+  <source network='%(network)s'/>
+  <virtualport type='openvswitch'/>
+  <model type='%(nicdriver)s'/>
+  <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
+</interface>""" % nicparams
+
+    libvirt_template = source_template % params
+    conn = libvirt.open("qemu:///system")
+    a = conn.defineXML(libvirt_template)
+    print ("Created machine %s with UUID %s" % (args.name, a.UUIDString()))
+
+if __name__ == '__main__':
+    main()
diff --git a/tools/ironic/scripts/create-nodes b/tools/ironic/scripts/create-nodes
new file mode 100755
index 0000000..d81113a
--- /dev/null
+++ b/tools/ironic/scripts/create-nodes
@@ -0,0 +1,71 @@
+#!/usr/bin/env bash
+
+# **create-nodes**
+
+# Creates baremetal poseur nodes for ironic testing purposes
+
+set -exu
+
+# Keep track of the devstack directory
+TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
+
+CPU=$1
+MEM=$(( 1024 * $2 ))
+# extra G to allow fuzz for partition table : flavor size and registered size
+# need to be different to actual size.
+DISK=$(( $3 + 1))
+
+case $4 in
+    i386) ARCH='i686' ;;
+    amd64) ARCH='x86_64' ;;
+    *) echo "Unsupported arch $4!" ; exit 1 ;;
+esac
+
+TOTAL=$(($5 - 1))
+BRIDGE=$6
+EMULATOR=$7
+
+LIBVIRT_NIC_DRIVER=${LIBVIRT_NIC_DRIVER:-"e1000"}
+LIBVIRT_STORAGE_POOL=${LIBVIRT_STORAGE_POOL:-"default"}
+LIBVIRT_CONNECT_URI=${LIBVIRT_CONNECT_URI:-"qemu:///system"}
+
+export VIRSH_DEFAULT_CONNECT_URI=$LIBVIRT_CONNECT_URI
+
+if ! virsh pool-list --all | grep -q $LIBVIRT_STORAGE_POOL; then
+    virsh pool-define-as --name $LIBVIRT_STORAGE_POOL dir --target /var/lib/libvirt/images >&2
+    virsh pool-autostart $LIBVIRT_STORAGE_POOL >&2
+    virsh pool-start $LIBVIRT_STORAGE_POOL >&2
+fi
+
+pool_state=$(virsh pool-info $LIBVIRT_STORAGE_POOL | grep State | awk '{ print $2 }')
+if [ "$pool_state" != "running" ] ; then
+  [ ! -d /var/lib/libvirt/images ] && sudo mkdir /var/lib/libvirt/images
+  virsh pool-start $LIBVIRT_STORAGE_POOL >&2
+fi
+
+PREALLOC=
+if [ -f /etc/debian_version ]; then
+    PREALLOC="--prealloc-metadata"
+fi
+
+DOMS=""
+for idx in $(seq 0 $TOTAL) ; do
+    NAME="baremetal${BRIDGE}_${idx}"
+    DOMS="$DOMS $NAME"
+    VOL_NAME="baremetal${BRIDGE}-${idx}.qcow2"
+    (virsh list --all | grep -q $NAME) && continue
+
+    virsh vol-list --pool $LIBVIRT_STORAGE_POOL | grep -q $VOL_NAME &&
+        virsh vol-delete $VOL_NAME --pool $LIBVIRT_STORAGE_POOL >&2
+    virsh vol-create-as $LIBVIRT_STORAGE_POOL ${VOL_NAME} ${DISK}G --format qcow2 $PREALLOC >&2
+    volume_path=$(virsh vol-path --pool $LIBVIRT_STORAGE_POOL $VOL_NAME)
+    # Pre-touch the VM to set +C, as it can only be set on empty files.
+    sudo touch "$volume_path"
+    sudo chattr +C "$volume_path" || true
+    $TOP_DIR/scripts/configure-vm --bootdev network --name $NAME --image "$volume_path" --arch $ARCH --cpus $CPU --memory $MEM --libvirt-nic-driver $LIBVIRT_NIC_DRIVER --emulator $EMULATOR --network $BRIDGE >&2
+done
+
+for dom in $DOMS ; do
+    # echo mac
+    virsh dumpxml $dom | grep "mac address" | head -1 | cut -d\' -f2
+done
diff --git a/tools/ironic/scripts/setup-network b/tools/ironic/scripts/setup-network
new file mode 100755
index 0000000..e326bf8
--- /dev/null
+++ b/tools/ironic/scripts/setup-network
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+
+# **setup-network**
+
+# Setups openvswitch libvirt network suitable for
+# running baremetal poseur nodes for ironic testing purposes
+
+set -exu
+
+LIBVIRT_CONNECT_URI=${LIBVIRT_CONNECT_URI:-"qemu:///system"}
+
+# Keep track of the devstack directory
+TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
+BRIDGE_SUFFIX=${1:-''}
+BRIDGE_NAME=brbm$BRIDGE_SUFFIX
+
+export VIRSH_DEFAULT_CONNECT_URI="$LIBVIRT_CONNECT_URI"
+
+# Only add bridge if missing
+(sudo ovs-vsctl list-br | grep ${BRIDGE_NAME}$) || sudo ovs-vsctl add-br ${BRIDGE_NAME}
+
+# remove bridge before replacing it.
+(virsh net-list | grep "${BRIDGE_NAME} ") && virsh net-destroy ${BRIDGE_NAME}
+(virsh net-list --inactive  | grep "${BRIDGE_NAME} ") && virsh net-undefine ${BRIDGE_NAME}
+
+virsh net-define <(sed s/brbm/$BRIDGE_NAME/ $TOP_DIR/templates/brbm.xml)
+virsh net-autostart ${BRIDGE_NAME}
+virsh net-start ${BRIDGE_NAME}
diff --git a/tools/ironic/templates/brbm.xml b/tools/ironic/templates/brbm.xml
new file mode 100644
index 0000000..0769d3f
--- /dev/null
+++ b/tools/ironic/templates/brbm.xml
@@ -0,0 +1,6 @@
+<network>
+  <name>brbm</name>
+  <forward mode='bridge'/>
+  <bridge name='brbm'/>
+  <virtualport type='openvswitch'/>
+</network>
diff --git a/tools/ironic/templates/tftpd-xinetd.template b/tools/ironic/templates/tftpd-xinetd.template
new file mode 100644
index 0000000..7b9b0f8
--- /dev/null
+++ b/tools/ironic/templates/tftpd-xinetd.template
@@ -0,0 +1,11 @@
+service tftp
+{
+  protocol        = udp
+  port            = 69
+  socket_type     = dgram
+  wait            = yes
+  user            = root
+  server          = /usr/sbin/in.tftpd
+  server_args     = -v -v -v -v -v --map-file %TFTPBOOT_DIR%/map-file %TFTPBOOT_DIR%
+  disable         = no
+}
diff --git a/tools/ironic/templates/vm.xml b/tools/ironic/templates/vm.xml
new file mode 100644
index 0000000..b18dec0
--- /dev/null
+++ b/tools/ironic/templates/vm.xml
@@ -0,0 +1,43 @@
+<domain type='%(engine)s'>
+  <name>%(name)s</name>
+  <memory unit='KiB'>%(memory)s</memory>
+  <vcpu>%(cpus)s</vcpu>
+  <os>
+    <type arch='%(arch)s' machine='pc-1.0'>hvm</type>
+    <boot dev='%(bootdev)s'/>
+    <bootmenu enable='no'/>
+  </os>
+  <features>
+    <acpi/>
+    <apic/>
+    <pae/>
+  </features>
+  <clock offset='utc'/>
+  <on_poweroff>destroy</on_poweroff>
+  <on_reboot>restart</on_reboot>
+  <on_crash>restart</on_crash>
+  <devices>
+    <emulator>%(emulator)s</emulator>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2' cache='writeback'/>
+      <source file='%(imagefile)s'/>
+      <target dev='vda' bus='virtio'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
+    </disk>
+    <controller type='ide' index='0'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x1'/>
+    </controller>
+    %(network)s
+    %(bm_network)s
+    <input type='mouse' bus='ps2'/>
+    <graphics type='vnc' port='-1' autoport='yes'/>
+    <video>
+      <model type='cirrus' vram='9216' heads='1'/>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
+    </video>
+    <memballoon model='virtio'>
+      <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
+    </memballoon>
+  </devices>
+</domain>
+
diff --git a/tools/jenkins/jenkins_home/print_summary.py b/tools/jenkins/jenkins_home/print_summary.py
index ee3790f..8be500b 100755
--- a/tools/jenkins/jenkins_home/print_summary.py
+++ b/tools/jenkins/jenkins_home/print_summary.py
@@ -18,8 +18,8 @@
 
 
 def print_usage():
-    print ("Usage: %s [jenkins_url (eg. http://50.56.12.202:8080/)]"
-           % sys.argv[0])
+    print("Usage: %s [jenkins_url (eg. http://50.56.12.202:8080/)]"
+          % sys.argv[0])
     sys.exit()