Merge pull request #170 from cloudbuilders/ci-tests

Add CI tests
diff --git a/exercise.sh b/exercise.sh
index cca9a13..7703f40 100755
--- a/exercise.sh
+++ b/exercise.sh
@@ -1,214 +1,46 @@
 #!/usr/bin/env bash
 
-# **exercise.sh** - using the cloud can be fun
+# Run everything in the exercises/ directory that isn't explicitly disabled
 
-# we will use the ``nova`` cli tool provided by the ``python-novaclient``
-# package
-#
+# comma separated list of script basenames to skip
+# to refrain from exercising euca.sh use SKIP_EXERCISES=euca
+SKIP_EXERCISES=${SKIP_EXERCISES:-""}
 
+# Locate the scripts we should run
+EXERCISE_DIR=$(dirname "$0")/exercises
+basenames=$(for b in `ls $EXERCISE_DIR/*.sh`; do basename $b .sh; done)
 
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occured.
-set -o errexit
+# Track the state of each script
+passes=""
+failures=""
+skips=""
 
-# Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following allowing as the install occurs.
-set -o xtrace
-
-
-# Settings
-# ========
-
-# Use openrc + stackrc + localrc for settings
-source ./openrc
-
-# Get a token for clients that don't support service catalog
-# ==========================================================
-
-# manually create a token by querying keystone (sending JSON data).  Keystone
-# returns a token and catalog of endpoints.  We use python to parse the token
-# and save it.
-
-TOKEN=`curl -s -d  "{\"auth\":{\"passwordCredentials\": {\"username\": \"$NOVA_USERNAME\", \"password\": \"$NOVA_API_KEY\"}}}" -H "Content-type: application/json" http://$HOST_IP:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"`
-
-# Launching a server
-# ==================
-
-# List servers for tenant:
-nova list
-
-# Images
-# ------
-
-# Nova has a **deprecated** way of listing images.
-nova image-list
-
-# But we recommend using glance directly
-glance -A $TOKEN index
-
-# Let's grab the id of the first AMI image to launch
-IMAGE=`glance -A $TOKEN index | egrep ami | cut -d" " -f1`
-
-# Security Groups
-# ---------------
-SECGROUP=test_secgroup
-
-# List of secgroups:
-nova secgroup-list
-
-# Create a secgroup
-nova secgroup-create $SECGROUP "test_secgroup description"
-
-# determine flavor
-# ----------------
-
-# List of flavors:
-nova flavor-list
-
-# and grab the first flavor in the list to launch
-FLAVOR=`nova flavor-list | head -n 4 | tail -n 1 | cut -d"|" -f2`
-
-NAME="myserver"
-
-nova boot --flavor $FLAVOR --image $IMAGE $NAME --security_groups=$SECGROUP
-
-# Testing
-# =======
-
-# First check if it spins up (becomes active and responds to ping on
-# internal ip).  If you run this script from a nova node, you should
-# bypass security groups and have direct access to the server.
-
-# Waiting for boot
-# ----------------
-
-# Max time to wait while vm goes from build to active state
-ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-10}
-
-# Max time till the vm is bootable
-BOOT_TIMEOUT=${BOOT_TIMEOUT:-15}
-
-# Max time to wait for proper association and dis-association.
-ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10}
-
-# check that the status is active within ACTIVE_TIMEOUT seconds
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $NAME | grep status | grep -q ACTIVE; do sleep 1; done"; then
-    echo "server didn't become active!"
-    exit 1
-fi
-
-# get the IP of the server
-IP=`nova show $NAME | grep "private network" | cut -d"|" -f3`
-
-# for single node deployments, we can ping private ips
-MULTI_HOST=${MULTI_HOST:-0}
-if [ "$MULTI_HOST" = "0" ]; then
-    # sometimes the first ping fails (10 seconds isn't enough time for the VM's
-    # network to respond?), so let's ping for a default of 15 seconds with a
-    # timeout of a second for each ping.
-    if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $IP; do sleep 1; done"; then
-        echo "Couldn't ping server"
-        exit 1
+# Loop over each possible script (by basename)
+for script in $basenames; do
+    if [[ "$SKIP_EXERCISES" =~ $script ]] ; then
+        skips="$skips $script"
+    else
+        echo =========================
+        echo Running $script
+        echo =========================
+        $EXERCISE_DIR/$script.sh
+        if [[ $? -ne 0 ]] ; then
+            failures="$failures $script"
+        else
+            passes="$passes $script"
+        fi
     fi
-else
-    # On a multi-host system, without vm net access, do a sleep to wait for the boot
-    sleep $BOOT_TIMEOUT
-fi
+done
 
-# Security Groups & Floating IPs
-# ------------------------------
-
-# allow icmp traffic (ping)
-nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0
-
-# List rules for a secgroup
-nova secgroup-list-rules $SECGROUP
-
-# allocate a floating ip
-nova floating-ip-create
-
-# store  floating address
-FLOATING_IP=`nova floating-ip-list | grep None | head -1 | cut -d '|' -f2 | sed 's/ //g'`
-
-# add floating ip to our server
-nova add-floating-ip $NAME $FLOATING_IP
-
-# test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds
-if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
-    echo "Couldn't ping server with floating ip"
-    exit 1
-fi
-
-# pause the VM and verify we can't ping it anymore
-nova pause $NAME
-
-sleep 2
-
-if ( ping -c1 -w1 $IP); then
-    echo "Pause failure - ping shouldn't work"
-    exit 1
-fi
-
-if ( ping -c1 -w1 $FLOATING_IP); then
-    echo "Pause failure - ping floating ips shouldn't work"
-    exit 1
-fi
-
-# unpause the VM and verify we can ping it again
-nova unpause $NAME
-
-sleep 2
-
-ping -c1 -w1 $IP
-
-# dis-allow icmp traffic (ping)
-nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0
-
-# FIXME (anthony): make xs support security groups
-if [ "$VIRT_DRIVER" != "xenserver" ]; then
-    # test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds
-    if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
-        print "Security group failure - ping should not be allowed!"
-        echo "Couldn't ping server with floating ip"
-        exit 1
-    fi
-fi
-
-# de-allocate the floating ip
-nova floating-ip-delete $FLOATING_IP
-
-# shutdown the server
-nova delete $NAME
-
-# Delete a secgroup
-nova secgroup-delete $SECGROUP
-
-# FIXME: validate shutdown within 5 seconds
-# (nova show $NAME returns 1 or status != ACTIVE)?
-
-# Testing Euca2ools
-# ==================
-
-# make sure that we can describe instances
-euca-describe-instances
-
-if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
-    # Testing Swift
-    # =============
-
-    # Check if we have to swift via keystone
-    swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD stat
-
-    # We start by creating a test container
-    swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD post testcontainer
-
-    # add some files into it.
-    swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD upload testcontainer /etc/issue
-
-    # list them
-    swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD list testcontainer 
-
-    # And we may want to delete them now that we have tested that
-    # everything works.
-    swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD delete testcontainer
-fi
+# output status of exercise run
+echo =========================
+echo =========================
+for script in $skips; do
+    echo SKIP $script
+done
+for script in $passes; do
+    echo PASS $script
+done
+for script in $failures; do
+    echo FAILED $script
+done
diff --git a/exercises/euca.sh b/exercises/euca.sh
new file mode 100755
index 0000000..9605ace
--- /dev/null
+++ b/exercises/euca.sh
@@ -0,0 +1,36 @@
+#!/usr/bin/env bash
+
+# we will use the ``euca2ools`` cli tool that wraps the python boto 
+# library to test ec2 compatibility
+#
+
+# This script exits on an error so that errors don't compound and you see
+# only the first error that occured.
+set -o errexit
+
+# Print the commands being run so that we can see the command that triggers
+# an error.  It is also useful for following allowing as the install occurs.
+set -o xtrace
+
+
+# Settings
+# ========
+
+# Use openrc + stackrc + localrc for settings
+pushd $(cd $(dirname "$0")/.. && pwd)
+source ./openrc
+popd
+
+# find a machine image to boot
+IMAGE=`euca-describe-images | grep machine | cut -f2`
+
+# launch it
+INSTANCE=`euca-run-instances $IMAGE | grep INSTANCE | cut -f2`
+
+# assure it has booted within a reasonable time
+if ! timeout $RUNNING_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then
+    echo "server didn't become active within $RUNNING_TIMEOUT seconds"
+    exit 1
+fi
+
+euca-terminate-instances $INSTANCE
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
new file mode 100755
index 0000000..75046d1
--- /dev/null
+++ b/exercises/floating_ips.sh
@@ -0,0 +1,190 @@
+#!/usr/bin/env bash
+
+# **exercise.sh** - using the cloud can be fun
+
+# we will use the ``nova`` cli tool provided by the ``python-novaclient``
+# package
+#
+
+
+# This script exits on an error so that errors don't compound and you see
+# only the first error that occured.
+set -o errexit
+
+# Print the commands being run so that we can see the command that triggers
+# an error.  It is also useful for following allowing as the install occurs.
+set -o xtrace
+
+
+# Settings
+# ========
+
+# Use openrc + stackrc + localrc for settings
+pushd $(cd $(dirname "$0")/.. && pwd)
+source ./openrc
+popd
+
+# Get a token for clients that don't support service catalog
+# ==========================================================
+
+# manually create a token by querying keystone (sending JSON data).  Keystone
+# returns a token and catalog of endpoints.  We use python to parse the token
+# and save it.
+
+TOKEN=`curl -s -d  "{\"auth\":{\"passwordCredentials\": {\"username\": \"$NOVA_USERNAME\", \"password\": \"$NOVA_API_KEY\"}}}" -H "Content-type: application/json" http://$HOST_IP:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"`
+
+# Launching a server
+# ==================
+
+# List servers for tenant:
+nova list
+
+# Images
+# ------
+
+# Nova has a **deprecated** way of listing images.
+nova image-list
+
+# But we recommend using glance directly
+glance -A $TOKEN index
+
+# Let's grab the id of the first AMI image to launch
+IMAGE=`glance -A $TOKEN index | egrep ami | cut -d" " -f1`
+
+# Security Groups
+# ---------------
+SECGROUP=test_secgroup
+
+# List of secgroups:
+nova secgroup-list
+
+# Create a secgroup
+nova secgroup-create $SECGROUP "test_secgroup description"
+
+# determine flavor
+# ----------------
+
+# List of flavors:
+nova flavor-list
+
+# and grab the first flavor in the list to launch
+FLAVOR=`nova flavor-list | head -n 4 | tail -n 1 | cut -d"|" -f2`
+
+NAME="myserver"
+
+nova boot --flavor $FLAVOR --image $IMAGE $NAME --security_groups=$SECGROUP
+
+# Testing
+# =======
+
+# First check if it spins up (becomes active and responds to ping on
+# internal ip).  If you run this script from a nova node, you should
+# bypass security groups and have direct access to the server.
+
+# Waiting for boot
+# ----------------
+
+# Max time to wait while vm goes from build to active state
+ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-10}
+
+# Max time till the vm is bootable
+BOOT_TIMEOUT=${BOOT_TIMEOUT:-15}
+
+# Max time to wait for proper association and dis-association.
+ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10}
+
+# check that the status is active within ACTIVE_TIMEOUT seconds
+if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $NAME | grep status | grep -q ACTIVE; do sleep 1; done"; then
+    echo "server didn't become active!"
+    exit 1
+fi
+
+# get the IP of the server
+IP=`nova show $NAME | grep "private network" | cut -d"|" -f3`
+
+# for single node deployments, we can ping private ips
+MULTI_HOST=${MULTI_HOST:-0}
+if [ "$MULTI_HOST" = "0" ]; then
+    # sometimes the first ping fails (10 seconds isn't enough time for the VM's
+    # network to respond?), so let's ping for a default of 15 seconds with a
+    # timeout of a second for each ping.
+    if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $IP; do sleep 1; done"; then
+        echo "Couldn't ping server"
+        exit 1
+    fi
+else
+    # On a multi-host system, without vm net access, do a sleep to wait for the boot
+    sleep $BOOT_TIMEOUT
+fi
+
+# Security Groups & Floating IPs
+# ------------------------------
+
+# allow icmp traffic (ping)
+nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0
+
+# List rules for a secgroup
+nova secgroup-list-rules $SECGROUP
+
+# allocate a floating ip
+nova floating-ip-create
+
+# store  floating address
+FLOATING_IP=`nova floating-ip-list | grep None | head -1 | cut -d '|' -f2 | sed 's/ //g'`
+
+# add floating ip to our server
+nova add-floating-ip $NAME $FLOATING_IP
+
+# test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds
+if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
+    echo "Couldn't ping server with floating ip"
+    exit 1
+fi
+
+# pause the VM and verify we can't ping it anymore
+nova pause $NAME
+
+sleep 2
+
+if ( ping -c1 -w1 $IP); then
+    echo "Pause failure - ping shouldn't work"
+    exit 1
+fi
+
+if ( ping -c1 -w1 $FLOATING_IP); then
+    echo "Pause failure - ping floating ips shouldn't work"
+    exit 1
+fi
+
+# unpause the VM and verify we can ping it again
+nova unpause $NAME
+
+sleep 2
+
+ping -c1 -w1 $IP
+
+# dis-allow icmp traffic (ping)
+nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0
+
+# FIXME (anthony): make xs support security groups
+if [ "$VIRT_DRIVER" != "xenserver" ]; then
+    # test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds
+    if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
+        print "Security group failure - ping should not be allowed!"
+        echo "Couldn't ping server with floating ip"
+        exit 1
+    fi
+fi
+
+# de-allocate the floating ip
+nova floating-ip-delete $FLOATING_IP
+
+# shutdown the server
+nova delete $NAME
+
+# Delete a secgroup
+nova secgroup-delete $SECGROUP
+
+# FIXME: validate shutdown within 5 seconds
+# (nova show $NAME returns 1 or status != ACTIVE)?
+
diff --git a/exercises/swift.sh b/exercises/swift.sh
new file mode 100755
index 0000000..f7be099
--- /dev/null
+++ b/exercises/swift.sh
@@ -0,0 +1,40 @@
+#!/usr/bin/env bash
+
+# Test swift via the command line tools that ship with it.
+
+# This script exits on an error so that errors don't compound and you see
+# only the first error that occured.
+set -o errexit
+
+# Print the commands being run so that we can see the command that triggers
+# an error.  It is also useful for following allowing as the install occurs.
+set -o xtrace
+
+
+# Settings
+# ========
+
+# Use openrc + stackrc + localrc for settings
+pushd $(cd $(dirname "$0")/.. && pwd)
+source ./openrc
+popd
+
+
+# Testing Swift
+# =============
+
+# Check if we have to swift via keystone
+swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD stat
+
+# We start by creating a test container
+swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD post testcontainer
+
+# add some files into it.
+swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD upload testcontainer /etc/issue
+
+# list them
+swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD list testcontainer
+
+# And we may want to delete them now that we have tested that
+# everything works.
+swift --auth-version 2 -A http://${HOST_IP}:5000/v2.0 -U admin -K $ADMIN_PASSWORD delete testcontainer
diff --git a/files/apts/nova b/files/apts/nova
index 405d53b..77622a8 100644
--- a/files/apts/nova
+++ b/files/apts/nova
@@ -1,4 +1,5 @@
 dnsmasq-base
+dnsmasq-utils # for dhcp_release
 kpartx
 parted
 arping # used for send_arp_for_ha option in nova-network
@@ -38,4 +39,5 @@
 
 # Stuff for diablo volumes
 iscsitarget
+iscsitarget-dkms
 lvm2
diff --git a/files/apts/preseed b/files/apts/preseed
deleted file mode 100644
index 8712d5d..0000000
--- a/files/apts/preseed
+++ /dev/null
@@ -1,18 +0,0 @@
-# a collection of packages that speed up installation as they are dependencies
-# of packages we can't install during bootstraping (rabbitmq-server, 
-# mysql-server, libvirt-bin)
-#
-# NOTE: only add packages to this file that aren't needed directly
-mysql-common
-mysql-client-5.1
-erlang-base
-erlang-ssl 
-erlang-nox
-erlang-inets
-erlang-mnesia
-libhtml-template-perl
-gettext-base
-libavahi-client3
-libxml2-utils
-libpciaccess0
-libparted0debian1
diff --git a/files/glance-api.conf b/files/glance-api.conf
index 3499ff7..bb758af 100644
--- a/files/glance-api.conf
+++ b/files/glance-api.conf
@@ -24,7 +24,7 @@
 
 # Log to this file. Make sure you do not set the same log
 # file for both the API and registry servers!
-log_file = %DEST%/glance/api.log
+#log_file = %DEST%/glance/api.log
 
 # Send logs to syslog (/dev/log) instead of to file specified by `log_file`
 use_syslog = %SYSLOG%
diff --git a/files/glance-registry.conf b/files/glance-registry.conf
index 351b09f..1e04186 100644
--- a/files/glance-registry.conf
+++ b/files/glance-registry.conf
@@ -13,7 +13,7 @@
 
 # Log to this file. Make sure you do not set the same log
 # file for both the API and registry servers!
-log_file = %DEST%/glance/registry.log
+#log_file = %DEST%/glance/registry.log
 
 # Where to store images
 filesystem_store_datadir = %DEST%/glance/images
diff --git a/files/swift/proxy-server.conf b/files/swift/proxy-server.conf
index fe7e39b..2db6d32 100644
--- a/files/swift/proxy-server.conf
+++ b/files/swift/proxy-server.conf
@@ -16,6 +16,7 @@
 use = egg:swiftkeystone2#keystone2
 keystone_admin_token = %SERVICE_TOKEN%
 keystone_url = http://localhost:35357/v2.0
+keystone_admin_group = Member
 
 [filter:tempauth]
 use = egg:swift#tempauth
diff --git a/openrc b/openrc
index 324780b..4b36112 100644
--- a/openrc
+++ b/openrc
@@ -49,3 +49,14 @@
 # set log level to DEBUG (helps debug issues)
 # export NOVACLIENT_DEBUG=1
 
+# Max time till the vm is bootable
+export BOOT_TIMEOUT=${BOOT_TIMEOUT:-15}
+
+# Max time to wait while vm goes from build to active state
+export ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-10}
+
+# Max time from run instance command until it is running
+export RUNNING_TIMEOUT=${RUNNING_TIMEOUT:-$(($BOOT_TIMEOUT + $ACTIVE_TIMEOUT))}
+
+# Max time to wait for proper IP association and dis-association.
+export ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10}
diff --git a/stack.sh b/stack.sh
index 841cbb4..4461e40 100755
--- a/stack.sh
+++ b/stack.sh
@@ -159,6 +159,9 @@
 # Specify which services to launch.  These generally correspond to screen tabs
 ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-vnc,horizon,mysql,rabbit}
 
+# Name of the lvm volume group to use/create for iscsi volumes
+VOLUME_GROUP=${VOLUME_GROUP:-nova-volumes}
+
 # Nova hypervisor configuration.  We default to libvirt whth  **kvm** but will
 # drop back to **qemu** if we are unable to load the kvm module.  Stack.sh can
 # also install an **LXC** based system.
@@ -368,7 +371,7 @@
 apt_get install `cat $FILES/apts/* | cut -d\# -f1 | grep -Ev "mysql-server|rabbitmq-server|memcached"`
 
 # install python requirements
-sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install `cat $FILES/pips/*`
+sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install --use-mirrors `cat $FILES/pips/*`
 
 # git clone only if directory doesn't exist already.  Since ``DEST`` might not
 # be owned by the installation user, we create the directory and change the
@@ -394,7 +397,7 @@
         # remove the existing ignored files (like pyc) as they cause breakage
         # (due to the py files having older timestamps than our pyc, so python
         # thinks the pyc files are correct using them)
-        sudo git clean -f -d
+        find $GIT_DEST -name '*.pyc' -delete
         git checkout -f origin/$GIT_BRANCH
         # a local branch might not exist
         git branch -D $GIT_BRANCH || true
@@ -691,7 +694,7 @@
 
    # swift-init has a bug using /etc/swift until bug #885595 is fixed
    # we have to create a link
-   sudo ln -s ${SWIFT_CONFIG_LOCATION} /etc/swift
+   sudo ln -sf ${SWIFT_CONFIG_LOCATION} /etc/swift
 
    # Swift use rsync to syncronize between all the different
    # partitions (which make more sense when you have a multi-node
@@ -704,6 +707,11 @@
    # configured keystone it will checkout the directory.
    if [[ "$ENABLED_SERVICES" =~ "key" ]]; then
        swift_auth_server=keystone
+
+       # We install the memcache server as this is will be used by the
+       # middleware to cache the tokens auths for a long this is needed.
+       apt_get install memcached
+
        # We need a special version of bin/swift which understand the
        # OpenStack api 2.0, we download it until this is getting
        # integrated in swift.
@@ -778,12 +786,12 @@
     #
     # By default, the backing file is 2G in size, and is stored in /opt/stack.
     #
-    if ! sudo vgdisplay | grep -q nova-volumes; then
+    if ! sudo vgdisplay | grep -q $VOLUME_GROUP; then
         VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DEST/nova-volumes-backing-file}
         VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-2052M}
         truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE
         DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE`
-        sudo vgcreate nova-volumes $DEV
+        sudo vgcreate $VOLUME_GROUP $DEV
     fi
 
     # Configure iscsitarget
@@ -812,6 +820,9 @@
 else
     add_nova_flag "--network_manager=nova.network.manager.$NET_MAN"
 fi
+if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then
+    add_nova_flag "--volume_group=$VOLUME_GROUP"
+fi
 add_nova_flag "--my_ip=$HOST_IP"
 add_nova_flag "--public_interface=$PUBLIC_INTERFACE"
 add_nova_flag "--vlan_interface=$VLAN_INTERFACE"
@@ -826,6 +837,7 @@
 add_nova_flag "--rabbit_host=$RABBIT_HOST"
 add_nova_flag "--rabbit_password=$RABBIT_PASSWORD"
 add_nova_flag "--glance_api_servers=$GLANCE_HOSTPORT"
+add_nova_flag "--force_dhcp_release"
 if [ -n "$INSTANCES_PATH" ]; then
     add_nova_flag "--instances_path=$INSTANCES_PATH"
 fi
diff --git a/stackrc b/stackrc
index ba98f15..854a44c 100644
--- a/stackrc
+++ b/stackrc
@@ -1,6 +1,6 @@
 # compute service
-NOVA_REPO=https://github.com/cloudbuilders/nova.git
-NOVA_BRANCH=diablo
+NOVA_REPO=https://github.com/openstack/nova.git
+NOVA_BRANCH=stable/diablo
 
 # storage service
 SWIFT_REPO=https://github.com/openstack/swift.git
@@ -11,12 +11,12 @@
 SWIFT_KEYSTONE_BRANCH=master
 
 # image catalog service
-GLANCE_REPO=https://github.com/cloudbuilders/glance.git
-GLANCE_BRANCH=diablo
+GLANCE_REPO=https://github.com/openstack/glance.git
+GLANCE_BRANCH=stable/diablo
 
 # unified auth system (manages accounts/tokens)
-KEYSTONE_REPO=https://github.com/cloudbuilders/keystone.git
-KEYSTONE_BRANCH=diablo
+KEYSTONE_REPO=https://github.com/openstack/keystone.git
+KEYSTONE_BRANCH=stable/diablo
 
 # a websockets/html5 or flash powered VNC console for vm instances
 NOVNC_REPO=https://github.com/cloudbuilders/noVNC.git
diff --git a/tools/build_pxe_boot.sh b/tools/build_pxe_env.sh
similarity index 93%
rename from tools/build_pxe_boot.sh
rename to tools/build_pxe_env.sh
index ab64098..1ab51f8 100755
--- a/tools/build_pxe_boot.sh
+++ b/tools/build_pxe_env.sh
@@ -1,11 +1,14 @@
 #!/bin/bash -e
-# build_pxe_boot.sh - Create a PXE boot environment
+# build_pxe_env.sh - Create a PXE boot environment
 #
-# build_pxe_boot.sh destdir
+# build_pxe_env.sh destdir
 #
-# Assumes syslinux is installed
+# Requires Ubuntu Oneiric
+#
 # Only needs to run as root if the destdir permissions require it
 
+dpkg -l syslinux || apt-get install -y syslinux
+
 DEST_DIR=${1:-/tmp}/tftpboot
 PXEDIR=${PXEDIR:-/var/cache/devstack/pxe}
 OPWD=`pwd`
diff --git a/tools/build_uec.sh b/tools/build_uec.sh
index 6bab526..39c0d17 100755
--- a/tools/build_uec.sh
+++ b/tools/build_uec.sh
@@ -1,8 +1,8 @@
 #!/usr/bin/env bash
 
-# Make sure that we have the proper version of ubuntu (only works on natty/oneiric)
-if ! egrep -q "oneiric|natty" /etc/lsb-release; then
-    echo "This script only works with ubuntu oneiric and natty"
+# Make sure that we have the proper version of ubuntu (only works on oneiric)
+if ! egrep -q "oneiric" /etc/lsb-release; then
+    echo "This script only works with ubuntu oneiric."
     exit 1
 fi
 
@@ -33,8 +33,8 @@
 fi
 
 # Install deps if needed
-DEPS="kvm libvirt-bin kpartx"
-dpkg -l $DEPS || apt-get install -y --force-yes $DEPS
+DEPS="kvm libvirt-bin kpartx cloud-utils curl"
+apt-get install -y --force-yes $DEPS
 
 # Where to store files and instances
 WORK_DIR=${WORK_DIR:-/opt/kvmstack}
@@ -90,9 +90,10 @@
 
 # libvirt.xml configuration
 NET_XML=$vm_dir/net.xml
+NET_NAME=${NET_NAME:-devstack-$GUEST_NETWORK}
 cat > $NET_XML <<EOF
 <network>
-  <name>devstack-$GUEST_NETWORK</name>
+  <name>$NET_NAME</name>
   <bridge name="stackbr%d" />
   <forward/>
   <ip address="$GUEST_GATEWAY" netmask="$GUEST_NETMASK">
@@ -104,9 +105,9 @@
 EOF
 
 if [[ "$GUEST_RECREATE_NET" == "yes" ]]; then
-    virsh net-destroy devstack-$GUEST_NETWORK || true
+    virsh net-destroy $NET_NAME || true
     # destroying the network isn't enough to delete the leases
-    rm -f /var/lib/libvirt/dnsmasq/devstack-$GUEST_NETWORK.leases
+    rm -f /var/lib/libvirt/dnsmasq/$NET_NAME.leases
     virsh net-create $vm_dir/net.xml
 fi
 
@@ -134,7 +135,7 @@
     </disk>
 
     <interface type='network'>
-      <source network='devstack-$GUEST_NETWORK'/>
+      <source network='$NET_NAME'/>
     </interface>
         
     <!-- The order is significant here.  File must be defined first -->
@@ -170,7 +171,7 @@
 local-hostname: $GUEST_NAME.local
 EOF
 
-# set metadata
+# set user-data
 cat > $vm_dir/uec/user-data<<EOF
 #!/bin/bash
 # hostname needs to resolve for rabbit
@@ -186,6 +187,33 @@
 ROOTSLEEP=0
 `cat $TOP_DIR/localrc`
 LOCAL_EOF
+# Disable byobu
+/usr/bin/byobu-disable
+EOF
+
+# Setup stack user with our key
+CONFIGURE_STACK_USER=${CONFIGURE_STACK_USER:-yes}
+if [[ -e ~/.ssh/id_rsa.pub  && "$CONFIGURE_STACK_USER" = "yes" ]]; then
+    PUB_KEY=`cat  ~/.ssh/id_rsa.pub`
+    cat >> $vm_dir/uec/user-data<<EOF
+mkdir -p /opt/stack
+useradd -U -G sudo -s /bin/bash -d /opt/stack -m stack
+echo stack:pass | chpasswd
+mkdir -p /opt/stack/.ssh
+echo "$PUB_KEY" > /opt/stack/.ssh/authorized_keys
+chown -R stack /opt/stack
+chmod 700 /opt/stack/.ssh
+chmod 600 /opt/stack/.ssh/authorized_keys
+
+grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers ||
+    echo "#includedir /etc/sudoers.d" >> /etc/sudoers
+( umask 226 && echo "stack ALL=(ALL) NOPASSWD:ALL" \
+    > /etc/sudoers.d/50_stack_sh )
+EOF
+fi
+
+# Run stack.sh
+cat >> $vm_dir/uec/user-data<<EOF
 ./stack.sh
 EOF
 
diff --git a/tools/install_openvpn.sh b/tools/install_openvpn.sh
index a3a2346..ff88a06 100644
--- a/tools/install_openvpn.sh
+++ b/tools/install_openvpn.sh
@@ -10,13 +10,20 @@
 # --server mode configures the host with a running OpenVPN server instance
 # --client mode creates a tarball of a client configuration for this server
 
+# Get config file
+if [ -e localrc.vpn ]; then
+    . localrc.vpn
+fi
+
 # VPN Config
 VPN_SERVER=${VPN_SERVER:-`ifconfig eth0 | awk "/inet addr:/ { print \$2 }" | cut -d: -f2`}  # 50.56.12.212
 VPN_PROTO=${VPN_PROTO:-tcp}
 VPN_PORT=${VPN_PORT:-6081}
 VPN_DEV=${VPN_DEV:-tun}
+VPN_BRIDGE=${VPN_BRIDGE:-br0}
 VPN_CLIENT_NET=${VPN_CLIENT_NET:-172.16.28.0}
 VPN_CLIENT_MASK=${VPN_CLIENT_MASK:-255.255.255.0}
+VPN_CLIENT_DHCP="${VPN_CLIENT_DHCP:-172.16.28.1 172.16.28.254}"
 VPN_LOCAL_NET=${VPN_LOCAL_NET:-10.0.0.0}
 VPN_LOCAL_MASK=${VPN_LOCAL_MASK:-255.255.0.0}
 
@@ -39,7 +46,8 @@
 fi
 
 # Install OpenVPN
-if [ ! -x `which openvpn` ]; then
+VPN_EXEC=`which openvpn`
+if [ -z "$VPN_EXEC" -o ! -x "$VPN_EXEC" ]; then
     apt-get install -y openvpn bridge-utils
 fi
 if [ ! -d $CA_DIR ]; then
@@ -73,21 +81,49 @@
     (cd $CA_DIR/keys;
         cp $NAME.crt $NAME.key ca.crt dh1024.pem ta.key $VPN_DIR
     )
+    cat >$VPN_DIR/br-up <<EOF
+#!/bin/bash
+
+BR="$VPN_BRIDGE"
+TAP="\$1"
+
+for t in \$TAP; do
+    openvpn --mktun --dev \$t
+    brctl addif \$BR \$t
+    ifconfig \$t 0.0.0.0 promisc up
+done
+EOF
+    chmod +x $VPN_DIR/br-up
+    cat >$VPN_DIR/br-down <<EOF
+#!/bin/bash
+
+BR="$VPN_BRIDGE"
+TAP="\$1"
+
+for i in \$TAP; do
+    brctl delif \$BR $t
+    openvpn --rmtun --dev \$i
+done
+EOF
+    chmod +x $VPN_DIR/br-down
     cat >$VPN_DIR/$NAME.conf <<EOF
 proto $VPN_PROTO
 port $VPN_PORT
 dev $VPN_DEV
+up $VPN_DIR/br-up
+down $VPN_DIR/br-down
 cert $NAME.crt
 key $NAME.key  # This file should be kept secret
 ca ca.crt
 dh dh1024.pem
 duplicate-cn
-server $VPN_CLIENT_NET $VPN_CLIENT_MASK
+#server $VPN_CLIENT_NET $VPN_CLIENT_MASK
+server-bridge $VPN_CLIENT_NET $VPN_CLIENT_MASK $VPN_CLIENT_DHCP
 ifconfig-pool-persist ipp.txt
 push "route $VPN_LOCAL_NET $VPN_LOCAL_MASK"
 comp-lzo
 user nobody
-group nobody
+group nogroup
 persist-key
 persist-tun
 status openvpn-status.log
@@ -121,7 +157,7 @@
 resolv-retry infinite
 nobind
 user nobody
-group nobody
+group nogroup
 persist-key
 persist-tun
 comp-lzo
diff --git a/tools/jenkins/README.md b/tools/jenkins/README.md
new file mode 100644
index 0000000..371017d
--- /dev/null
+++ b/tools/jenkins/README.md
@@ -0,0 +1,38 @@
+Getting Started With Jenkins and Devstack
+=========================================
+This little corner of devstack is to show how to get an Openstack jenkins
+environment up and running quickly, using the rcb configuration methodology.
+
+
+To create a jenkins server
+--------------------------
+
+    cd tools/jenkins/jenkins_home
+    ./build_jenkins.sh
+
+This will create a jenkins environment configured with sample test scripts that run against xen and kvm.
+
+Configuring XS
+--------------
+In order to make the tests for XS work, you must install xs 5.6 on a separate machine,
+and install the the jenkins public key on that server.  You then need to create the
+/var/lib/jenkins/xenrc on your jenkins server like so:
+
+    MYSQL_PASSWORD=secrete
+    SERVICE_TOKEN=secrete
+    ADMIN_PASSWORD=secrete
+    RABBIT_PASSWORD=secrete
+    # This is the password for your guest (for both stack and root users)
+    GUEST_PASSWORD=secrete
+    # Do not download the usual images yet!
+    IMAGE_URLS=""
+    FLOATING_RANGE=192.168.1.224/28
+    VIRT_DRIVER=xenserver
+    # Explicitly set multi-host
+    MULTI_HOST=1
+    # Give extra time for boot
+    ACTIVE_TIMEOUT=45
+    #  IMPORTANT: This is the ip of your xenserver
+    XEN_IP=10.5.5.1
+    # IMPORTANT: The following must be set to your dom0 root password!
+    XENAPI_PASSWORD='MY_XEN_ROOT_PW'
diff --git a/tools/jenkins/adapters/euca.sh b/tools/jenkins/adapters/euca.sh
new file mode 100755
index 0000000..b49ce9f
--- /dev/null
+++ b/tools/jenkins/adapters/euca.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+# Echo commands, exit on error
+set -o xtrace
+set -o errexit
+
+TOP_DIR=$(cd ../../.. && pwd)
+HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2`
+ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises &&  ./euca.sh'
diff --git a/tools/jenkins/adapters/floating_ips.sh b/tools/jenkins/adapters/floating_ips.sh
new file mode 100755
index 0000000..a97f935
--- /dev/null
+++ b/tools/jenkins/adapters/floating_ips.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+# Echo commands, exit on error
+set -o xtrace
+set -o errexit
+
+TOP_DIR=$(cd ../../.. && pwd)
+HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2`
+ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises &&  ./floating_ips.sh'
diff --git a/tools/jenkins/build_configuration.sh b/tools/jenkins/build_configuration.sh
new file mode 100755
index 0000000..e295ef2
--- /dev/null
+++ b/tools/jenkins/build_configuration.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+EXECUTOR_NUMBER=$1
+CONFIGURATION=$2
+ADAPTER=$3
+RC=$4
+
+function usage() {
+    echo "Usage: $0 -  Build a configuration"
+    echo ""
+    echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER] [RC (optional)]"
+    exit 1
+}
+
+# Validate inputs
+if [[ "$EXECUTOR_NUMBER" = "" || "$CONFIGURATION" = ""  || "$ADAPTER" = "" ]]; then
+    usage
+fi
+
+# Execute configuration script
+cd configurations && ./$CONFIGURATION.sh $EXECUTOR_NUMBER $CONFIGURATION $ADAPTER "$RC"
diff --git a/tools/jenkins/configurations/kvm.sh b/tools/jenkins/configurations/kvm.sh
new file mode 100755
index 0000000..5a9df47
--- /dev/null
+++ b/tools/jenkins/configurations/kvm.sh
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+EXECUTOR_NUMBER=$1
+CONFIGURATION=$2
+ADAPTER=$3
+RC=$4
+
+function usage() {
+    echo "Usage: $0 - Build a test configuration"
+    echo ""
+    echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER] [RC (optional)]"
+    exit 1
+}
+
+# Validate inputs
+if [[ "$EXECUTOR_NUMBER" = "" || "$CONFIGURATION" = ""  || "$ADAPTER" = "" ]]; then
+    usage
+fi
+
+# This directory
+CUR_DIR=$(cd $(dirname "$0") && pwd)
+
+# devstack directory
+cd ../../..
+TOP_DIR=$(pwd)
+
+# Deps
+apt-get install -y --force-yes libvirt-bin
+
+# Name test instance based on executor
+BASE_NAME=executor-`printf "%02d" $EXECUTOR_NUMBER`
+GUEST_NAME=$BASE_NAME.$ADAPTER
+virsh list | grep $BASE_NAME | cut -d " " -f1 | xargs -n 1 virsh destroy || true
+virsh net-list | grep $BASE_NAME | cut -d " " -f1 | xargs -n 1 virsh net-destroy || true
+
+# Configure localrc
+cat <<EOF >localrc
+RECLONE=yes
+GUEST_NETWORK=$EXECUTOR_NUMBER
+GUEST_NAME=$GUEST_NAME
+FLOATING_RANGE=192.168.$EXECUTOR_NUMBER.128/27
+GUEST_CORES=1
+GUEST_RAM=12574720
+MYSQL_PASSWORD=chicken
+RABBIT_PASSWORD=chicken
+SERVICE_TOKEN=chicken
+ADMIN_PASSWORD=chicken
+USERNAME=admin
+TENANT=admin
+NET_NAME=$BASE_NAME
+ACTIVE_TIMEOUT=45
+BOOT_TIMEOUT=45
+$RC
+EOF
+cd tools
+sudo ./build_uec.sh
+
+# Make the address of the instances available to test runners
+echo HEAD=`cat /var/lib/libvirt/dnsmasq/$BASE_NAME.leases | cut -d " " -f3` > $TOP_DIR/addresses
diff --git a/tools/jenkins/configurations/xs.sh b/tools/jenkins/configurations/xs.sh
new file mode 100755
index 0000000..864f949
--- /dev/null
+++ b/tools/jenkins/configurations/xs.sh
@@ -0,0 +1,53 @@
+#!/bin/bash
+set -o errexit
+set -o xtrace
+
+
+EXECUTOR_NUMBER=$1
+CONFIGURATION=$2
+ADAPTER=$3
+RC=$4
+
+function usage() {
+    echo "Usage: $0 - Build a test configuration"
+    echo ""
+    echo "$0 [EXECUTOR_NUMBER] [CONFIGURATION] [ADAPTER] [RC (optional)]"
+    exit 1
+}
+
+# Validate inputs
+if [[ "$EXECUTOR_NUMBER" = "" || "$CONFIGURATION" = ""  || "$ADAPTER" = "" ]]; then
+    usage
+fi
+
+# Configuration of xenrc
+XENRC=/var/lib/jenkins/xenrc
+if [ ! -e $XENRC ]; then
+    echo "/var/lib/jenkins/xenrc is not present! See README.md"
+    exit 1
+fi
+
+# Move to top of devstack
+cd ../../..
+
+# Use xenrc as the start of our localrc
+cp $XENRC localrc
+
+# Set the PUB_IP
+PUB_IP=192.168.1.1$EXECUTOR_NUMBER
+echo "PUB_IP=$PUB_IP" >> localrc
+
+# Overrides
+echo "$RC" >> localrc
+
+# Source localrc
+. localrc
+
+# Make host ip available to tester
+echo "HEAD=$PUB_IP" > addresses
+
+# Build configuration
+REMOTE_DEVSTACK=/root/devstack
+ssh root@$XEN_IP "rm -rf $REMOTE_DEVSTACK"
+scp -pr . root@$XEN_IP:$REMOTE_DEVSTACK
+ssh root@$XEN_IP "cd $REMOTE_DEVSTACK/tools/xen && ./build_domU.sh"
diff --git a/tools/jenkins/jenkins_home/.gitignore b/tools/jenkins/jenkins_home/.gitignore
new file mode 100644
index 0000000..d831d01
--- /dev/null
+++ b/tools/jenkins/jenkins_home/.gitignore
@@ -0,0 +1,3 @@
+builds
+workspace
+*.sw*
diff --git a/tools/jenkins/jenkins_home/build_jenkins.sh b/tools/jenkins/jenkins_home/build_jenkins.sh
new file mode 100755
index 0000000..d60679b
--- /dev/null
+++ b/tools/jenkins/jenkins_home/build_jenkins.sh
@@ -0,0 +1,108 @@
+#!/bin/bash
+
+# Echo commands, exit on error
+set -o xtrace
+set -o errexit
+
+# Make sure only root can run our script
+if [[ $EUID -ne 0 ]]; then
+   echo "This script must be run as root"
+   exit 1
+fi
+
+# Make sure user has configured an ssh pubkey
+if [ ! -e /root/.ssh/id_rsa.pub ]; then
+   echo "Public key is missing.  This is used to ssh into your instances."
+   echo "Please run ssh-keygen before proceeding"
+   exit 1
+fi
+
+# This directory
+CUR_DIR=$(cd $(dirname "$0") && pwd)
+
+# Configure trunk jenkins!
+echo "deb http://pkg.jenkins-ci.org/debian binary/" > /etc/apt/sources.list.d/jenkins.list
+wget -q -O - http://pkg.jenkins-ci.org/debian/jenkins-ci.org.key | sudo apt-key add -
+apt-get update
+
+
+# Clean out old jenkins - useful if you are having issues upgrading
+CLEAN_JENKINS=${CLEAN_JENKINS:-no}
+if [ "$CLEAN_JENKINS" = "yes" ]; then
+    apt-get remove jenkins jenkins-common
+fi
+
+# Install software
+DEPS="jenkins cloud-utils"
+apt-get install -y --force-yes $DEPS
+
+# Install jenkins
+if [ ! -e /var/lib/jenkins ]; then
+   echo "Jenkins installation failed"
+   exit 1
+fi
+
+# Setup sudo
+JENKINS_SUDO=/etc/sudoers.d/jenkins
+cat > $JENKINS_SUDO <<EOF
+jenkins ALL = NOPASSWD: ALL
+EOF
+chmod 440 $JENKINS_SUDO
+
+# Setup .gitconfig
+JENKINS_GITCONF=/var/lib/jenkins/hudson.plugins.git.GitSCM.xml
+cat > $JENKINS_GITCONF <<EOF
+<?xml version='1.0' encoding='UTF-8'?>
+<hudson.plugins.git.GitSCM_-DescriptorImpl>
+  <generation>4</generation>
+  <globalConfigName>Jenkins</globalConfigName>
+  <globalConfigEmail>jenkins@rcb.me</globalConfigEmail>
+</hudson.plugins.git.GitSCM_-DescriptorImpl>
+EOF
+
+# Add build numbers
+JOBS=`ls jobs`
+for job in ${JOBS// / }; do
+    if [ ! -e jobs/$job/nextBuildNumber ]; then
+        echo 1 > jobs/$job/nextBuildNumber
+    fi
+done
+
+# Set ownership to jenkins
+chown -R jenkins $CUR_DIR
+
+# Make sure this directory is accessible to jenkins
+if ! su -c "ls $CUR_DIR" jenkins; then
+    echo "Your devstack directory is not accessible by jenkins."
+    echo "There is a decent chance you are trying to run this from a directory in /root."
+    echo "If so, try moving devstack elsewhere (eg. /opt/devstack)."
+    exit 1
+fi
+
+# Move aside old jobs, if present
+if [ ! -h /var/lib/jenkins/jobs ]; then
+    echo "Installing jobs symlink"
+    if [ -d /var/lib/jenkins/jobs ]; then
+        mv /var/lib/jenkins/jobs /var/lib/jenkins/jobs.old
+    fi
+fi
+
+# Set up jobs symlink
+rm -f /var/lib/jenkins/jobs
+ln -s $CUR_DIR/jobs /var/lib/jenkins/jobs
+
+# List of plugins
+PLUGINS=http://hudson-ci.org/downloads/plugins/build-timeout/1.6/build-timeout.hpi,http://mirrors.jenkins-ci.org/plugins/git/1.1.12/git.hpi,http://hudson-ci.org/downloads/plugins/global-build-stats/1.2/global-build-stats.hpi,http://hudson-ci.org/downloads/plugins/greenballs/1.10/greenballs.hpi,http://download.hudson-labs.org/plugins/console-column-plugin/1.0/console-column-plugin.hpi
+
+# Configure plugins
+for plugin in ${PLUGINS//,/ }; do
+    name=`basename $plugin`   
+    dest=/var/lib/jenkins/plugins/$name
+    if [ ! -e $dest ]; then
+        curl -L $plugin -o $dest
+    fi
+done
+
+# Restart jenkins
+/etc/init.d/jenkins stop || true
+/etc/init.d/jenkins start
diff --git a/tools/jenkins/jenkins_home/clean.sh b/tools/jenkins/jenkins_home/clean.sh
new file mode 100755
index 0000000..eb03022
--- /dev/null
+++ b/tools/jenkins/jenkins_home/clean.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+# This script is not yet for general consumption.
+
+set -o errexit
+
+if [ ! "$FORCE" = "yes" ]; then
+    echo "FORCE not set to 'yes'.  Make sure this is something you really want to do.  Exiting."
+    exit 1
+fi
+
+virsh list | cut -d " " -f1 | grep -v "-" | egrep -e "[0-9]" | xargs -n 1 virsh destroy || true
+virsh net-list | grep active | cut -d " " -f1 | xargs -n 1 virsh net-destroy || true
+killall dnsmasq || true
+if [ "$CLEAN" = "yes" ]; then
+    rm -rf jobs
+fi
+rm /var/lib/jenkins/jobs
+git checkout -f
+git fetch
+git merge origin/jenkins
+./build_jenkins.sh
diff --git a/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/config.xml b/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/config.xml
new file mode 100644
index 0000000..bb5e1d0
--- /dev/null
+++ b/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/config.xml
@@ -0,0 +1,82 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<matrix-project>
+  <actions/>
+  <description></description>
+  <keepDependencies>false</keepDependencies>
+  <properties>
+    <hudson.model.ParametersDefinitionProperty>
+      <parameterDefinitions>
+        <hudson.model.StringParameterDefinition>
+          <name>RC</name>
+          <description></description>
+          <defaultValue></defaultValue>
+        </hudson.model.StringParameterDefinition>
+      </parameterDefinitions>
+    </hudson.model.ParametersDefinitionProperty>
+  </properties>
+  <scm class="hudson.plugins.git.GitSCM">
+    <configVersion>2</configVersion>
+    <userRemoteConfigs>
+      <hudson.plugins.git.UserRemoteConfig>
+        <name>origin</name>
+        <refspec>+refs/heads/*:refs/remotes/origin/*</refspec>
+        <url>git://github.com/cloudbuilders/devstack.git</url>
+      </hudson.plugins.git.UserRemoteConfig>
+    </userRemoteConfigs>
+    <branches>
+      <hudson.plugins.git.BranchSpec>
+        <name>jenkins</name>
+      </hudson.plugins.git.BranchSpec>
+    </branches>
+    <recursiveSubmodules>false</recursiveSubmodules>
+    <doGenerateSubmoduleConfigurations>false</doGenerateSubmoduleConfigurations>
+    <authorOrCommitter>false</authorOrCommitter>
+    <clean>false</clean>
+    <wipeOutWorkspace>false</wipeOutWorkspace>
+    <pruneBranches>false</pruneBranches>
+    <remotePoll>false</remotePoll>
+    <buildChooser class="hudson.plugins.git.util.DefaultBuildChooser"/>
+    <gitTool>Default</gitTool>
+    <submoduleCfg class="list"/>
+    <relativeTargetDir></relativeTargetDir>
+    <excludedRegions></excludedRegions>
+    <excludedUsers></excludedUsers>
+    <gitConfigName></gitConfigName>
+    <gitConfigEmail></gitConfigEmail>
+    <skipTag>false</skipTag>
+    <scmName></scmName>
+  </scm>
+  <canRoam>true</canRoam>
+  <disabled>false</disabled>
+  <blockBuildWhenDownstreamBuilding>false</blockBuildWhenDownstreamBuilding>
+  <blockBuildWhenUpstreamBuilding>false</blockBuildWhenUpstreamBuilding>
+  <triggers class="vector"/>
+  <concurrentBuild>false</concurrentBuild>
+  <axes>
+    <hudson.matrix.TextAxis>
+      <name>ADAPTER</name>
+      <values>
+        <string>euca</string>
+        <string>floating_ips</string>
+      </values>
+    </hudson.matrix.TextAxis>
+  </axes>
+  <builders>
+    <hudson.tasks.Shell>
+      <command>sed -i &apos;s/) 2&gt;&amp;1 | tee &quot;${LOGFILE}&quot;/)/&apos; stack.sh</command>
+    </hudson.tasks.Shell>
+    <hudson.tasks.Shell>
+      <command>set -o errexit
+cd tools/jenkins
+sudo ./build_configuration.sh $EXECUTOR_NUMBER kvm $ADAPTER &quot;$RC&quot;</command>
+    </hudson.tasks.Shell>
+    <hudson.tasks.Shell>
+      <command>set -o errexit
+cd tools/jenkins
+./run_test.sh $EXECUTOR_NUMBER $ADAPTER $RC &quot;$RC&quot;</command>
+    </hudson.tasks.Shell>
+  </builders>
+  <publishers/>
+  <buildWrappers/>
+  <runSequentially>false</runSequentially>
+</matrix-project>
diff --git a/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/configurations/axis-ADAPTER/euca/config.xml b/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/configurations/axis-ADAPTER/euca/config.xml
new file mode 100644
index 0000000..0be70a5
--- /dev/null
+++ b/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/configurations/axis-ADAPTER/euca/config.xml
@@ -0,0 +1,15 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<matrix-config>
+  <keepDependencies>false</keepDependencies>
+  <properties/>
+  <scm class="hudson.scm.NullSCM"/>
+  <canRoam>false</canRoam>
+  <disabled>false</disabled>
+  <blockBuildWhenDownstreamBuilding>false</blockBuildWhenDownstreamBuilding>
+  <blockBuildWhenUpstreamBuilding>false</blockBuildWhenUpstreamBuilding>
+  <triggers class="vector"/>
+  <concurrentBuild>false</concurrentBuild>
+  <builders/>
+  <publishers/>
+  <buildWrappers/>
+</matrix-config>
\ No newline at end of file
diff --git a/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/configurations/axis-ADAPTER/floatingips/config.xml b/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/configurations/axis-ADAPTER/floatingips/config.xml
new file mode 100644
index 0000000..0be70a5
--- /dev/null
+++ b/tools/jenkins/jenkins_home/jobs/diablo-kvm_ha/configurations/axis-ADAPTER/floatingips/config.xml
@@ -0,0 +1,15 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<matrix-config>
+  <keepDependencies>false</keepDependencies>
+  <properties/>
+  <scm class="hudson.scm.NullSCM"/>
+  <canRoam>false</canRoam>
+  <disabled>false</disabled>
+  <blockBuildWhenDownstreamBuilding>false</blockBuildWhenDownstreamBuilding>
+  <blockBuildWhenUpstreamBuilding>false</blockBuildWhenUpstreamBuilding>
+  <triggers class="vector"/>
+  <concurrentBuild>false</concurrentBuild>
+  <builders/>
+  <publishers/>
+  <buildWrappers/>
+</matrix-config>
\ No newline at end of file
diff --git a/tools/jenkins/jenkins_home/jobs/diablo-xs_ha/config.xml b/tools/jenkins/jenkins_home/jobs/diablo-xs_ha/config.xml
new file mode 100644
index 0000000..21cd496
--- /dev/null
+++ b/tools/jenkins/jenkins_home/jobs/diablo-xs_ha/config.xml
@@ -0,0 +1,88 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<matrix-project>
+  <actions/>
+  <description>In order for this to work, you must create a /var/lib/jenkins/xenrc file as described in README.md</description>
+  <keepDependencies>false</keepDependencies>
+  <properties>
+    <hudson.model.ParametersDefinitionProperty>
+      <parameterDefinitions>
+        <hudson.model.StringParameterDefinition>
+          <name>RC</name>
+          <description></description>
+          <defaultValue></defaultValue>
+        </hudson.model.StringParameterDefinition>
+      </parameterDefinitions>
+    </hudson.model.ParametersDefinitionProperty>
+  </properties>
+  <scm class="hudson.plugins.git.GitSCM">
+    <configVersion>2</configVersion>
+    <userRemoteConfigs>
+      <hudson.plugins.git.UserRemoteConfig>
+        <name>origin</name>
+        <refspec>+refs/heads/*:refs/remotes/origin/*</refspec>
+        <url>git://github.com/cloudbuilders/devstack.git</url>
+      </hudson.plugins.git.UserRemoteConfig>
+    </userRemoteConfigs>
+    <branches>
+      <hudson.plugins.git.BranchSpec>
+        <name>jenkins</name>
+      </hudson.plugins.git.BranchSpec>
+    </branches>
+    <recursiveSubmodules>false</recursiveSubmodules>
+    <doGenerateSubmoduleConfigurations>false</doGenerateSubmoduleConfigurations>
+    <authorOrCommitter>false</authorOrCommitter>
+    <clean>false</clean>
+    <wipeOutWorkspace>false</wipeOutWorkspace>
+    <pruneBranches>false</pruneBranches>
+    <remotePoll>false</remotePoll>
+    <buildChooser class="hudson.plugins.git.util.DefaultBuildChooser"/>
+    <gitTool>Default</gitTool>
+    <submoduleCfg class="list"/>
+    <relativeTargetDir></relativeTargetDir>
+    <excludedRegions></excludedRegions>
+    <excludedUsers></excludedUsers>
+    <gitConfigName></gitConfigName>
+    <gitConfigEmail></gitConfigEmail>
+    <skipTag>false</skipTag>
+    <scmName></scmName>
+  </scm>
+  <canRoam>true</canRoam>
+  <disabled>false</disabled>
+  <blockBuildWhenDownstreamBuilding>false</blockBuildWhenDownstreamBuilding>
+  <blockBuildWhenUpstreamBuilding>false</blockBuildWhenUpstreamBuilding>
+  <triggers class="vector"/>
+  <concurrentBuild>false</concurrentBuild>
+  <axes>
+    <hudson.matrix.TextAxis>
+      <name>ADAPTER</name>
+      <values>
+        <string>euca</string>
+        <string>floating_ips</string>
+      </values>
+    </hudson.matrix.TextAxis>
+  </axes>
+  <builders>
+    <hudson.tasks.Shell>
+      <command>sed -i &apos;s/) 2&gt;&amp;1 | tee &quot;${LOGFILE}&quot;/)/&apos; stack.sh</command>
+    </hudson.tasks.Shell>
+    <hudson.tasks.Shell>
+      <command>set -o errexit
+cd tools/jenkins
+sudo ./build_configuration.sh $EXECUTOR_NUMBER xs $ADAPTER &quot;$RC&quot;</command>
+    </hudson.tasks.Shell>
+    <hudson.tasks.Shell>
+      <command>#!/bin/bash
+set -o errexit
+set -o xtrace
+
+. localrc
+
+# Unlike kvm, ssh to the xen host to run tests, in case the test instance is launch with a host only network
+ssh root@$XEN_IP &quot;cd devstack &amp;&amp; . localrc &amp;&amp; cd tools/jenkins &amp;&amp; ./run_test.sh $EXECUTOR_NUMBER $ADAPTER &apos;$RC&apos;&quot; 
+</command>
+    </hudson.tasks.Shell>
+  </builders>
+  <publishers/>
+  <buildWrappers/>
+  <runSequentially>true</runSequentially>
+</matrix-project>
\ No newline at end of file
diff --git a/tools/jenkins/jenkins_home/print_summary.py b/tools/jenkins/jenkins_home/print_summary.py
new file mode 100755
index 0000000..1d71a4a
--- /dev/null
+++ b/tools/jenkins/jenkins_home/print_summary.py
@@ -0,0 +1,45 @@
+#!/usr/bin/python
+import urllib
+import json
+import sys
+
+
+def print_usage():
+    print "Usage: %s [jenkins_url (eg. http://50.56.12.202:8080/)]"\
+          % sys.argv[0]
+    sys.exit()
+
+
+def fetch_blob(url):
+    return json.loads(urllib.urlopen(url + '/api/json').read())
+
+
+if len(sys.argv) < 2:
+    print_usage()
+
+BASE_URL = sys.argv[1]
+
+root = fetch_blob(BASE_URL)
+results = {}
+for job_url in root['jobs']:
+    job = fetch_blob(job_url['url'])
+    if job.get('activeConfigurations'):
+        (tag, name) = job['name'].split('-')
+        if not results.get(tag):
+            results[tag] = {}
+        if not results[tag].get(name):
+            results[tag][name] = []
+
+        for config_url in job['activeConfigurations']:
+            config = fetch_blob(config_url['url'])
+
+            log_url = ''
+            if config.get('lastBuild'):
+                log_url = config['lastBuild']['url'] + 'console'
+
+            results[tag][name].append({'test': config['displayName'],
+                                       'status': config['color'],
+                                       'logUrl': log_url,
+                                       'healthReport': config['healthReport']})
+
+print json.dumps(results)
diff --git a/tools/jenkins/run_test.sh b/tools/jenkins/run_test.sh
new file mode 100755
index 0000000..4649563
--- /dev/null
+++ b/tools/jenkins/run_test.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+EXECUTOR_NUMBER=$1
+ADAPTER=$2
+RC=$3
+
+function usage() {
+    echo "Usage: $0 - Run a test"
+    echo ""
+    echo "$0 [EXECUTOR_NUMBER] [ADAPTER] [RC (optional)]"
+    exit 1
+}
+
+# Validate inputs
+if [[ "$EXECUTOR_NUMBER" = "" || "$ADAPTER" = "" ]]; then
+    usage
+fi
+
+# Execute configuration script
+cd adapters && ./$ADAPTER.sh $EXECUTOR_NUMBER $ADAPTER "$RC"
diff --git a/tools/setup_stack_user.sh b/tools/setup_stack_user.sh
new file mode 100755
index 0000000..fcb9733
--- /dev/null
+++ b/tools/setup_stack_user.sh
@@ -0,0 +1,70 @@
+#!/usr/bin/env bash
+
+# Echo commands
+set -o xtrace
+
+# Exit on error to stop unexpected errors
+set -o errexit
+
+# Keep track of the current directory
+TOOLS_DIR=$(cd $(dirname "$0") && pwd)
+TOP_DIR=`cd $TOOLS_DIR/..; pwd`
+
+# Change dir to top of devstack
+cd $TOP_DIR
+
+# Echo usage
+usage() {
+    echo "Add stack user and keys"
+    echo ""
+    echo "Usage: $0 [full path to raw uec base image]"
+}
+
+# Make sure this is a raw image
+if ! qemu-img info $1 | grep -q "file format: raw"; then
+    usage
+    exit 1
+fi
+
+# Mount the image
+DEST=/opt/stack
+STAGING_DIR=/tmp/`echo $1 | sed  "s/\//_/g"`.stage.user
+mkdir -p $STAGING_DIR
+umount $STAGING_DIR || true
+sleep 1
+mount -t ext4 -o loop $1 $STAGING_DIR
+mkdir -p $STAGING_DIR/$DEST
+
+# Create a stack user that is a member of the libvirtd group so that stack
+# is able to interact with libvirt.
+chroot $STAGING_DIR groupadd libvirtd || true
+chroot $STAGING_DIR useradd stack -s /bin/bash -d $DEST -G libvirtd || true
+
+# Add a simple password - pass
+echo stack:pass | chroot $STAGING_DIR chpasswd
+
+# Configure sudo
+grep -q "^#includedir.*/etc/sudoers.d" $STAGING_DIR/etc/sudoers ||
+    echo "#includedir /etc/sudoers.d" | sudo tee -a $STAGING_DIR/etc/sudoers
+cp $TOP_DIR/files/sudo/* $STAGING_DIR/etc/sudoers.d/
+sed -e "s,%USER%,$USER,g" -i $STAGING_DIR/etc/sudoers.d/*
+
+# Gracefully cp only if source file/dir exists
+function cp_it {
+    if [ -e $1 ] || [ -d $1 ]; then
+        cp -pRL $1 $2
+    fi
+}
+
+# Copy over your ssh keys and env if desired
+cp_it ~/.ssh $STAGING_DIR/$DEST/.ssh
+cp_it ~/.ssh/id_rsa.pub $STAGING_DIR/$DEST/.ssh/authorized_keys
+cp_it ~/.gitconfig $STAGING_DIR/$DEST/.gitconfig
+cp_it ~/.vimrc $STAGING_DIR/$DEST/.vimrc
+cp_it ~/.bashrc $STAGING_DIR/$DEST/.bashrc
+
+# Give stack ownership over $DEST so it may do the work needed
+chroot $STAGING_DIR chown -R stack $DEST
+
+# Unmount
+umount $STAGING_DIR
diff --git a/tools/warm_apts_and_pips.sh b/tools/warm_apts_and_pips.sh
new file mode 100755
index 0000000..ec7e916
--- /dev/null
+++ b/tools/warm_apts_and_pips.sh
@@ -0,0 +1,53 @@
+#!/usr/bin/env bash
+
+# Echo commands
+set -o xtrace
+
+# Exit on error to stop unexpected errors
+set -o errexit
+
+# Keep track of the current directory
+TOOLS_DIR=$(cd $(dirname "$0") && pwd)
+TOP_DIR=`cd $TOOLS_DIR/..; pwd`
+
+# Change dir to top of devstack
+cd $TOP_DIR
+
+# Echo usage
+usage() {
+    echo "Cache OpenStack dependencies on a uec image to speed up performance."
+    echo ""
+    echo "Usage: $0 [full path to raw uec base image]"
+}
+
+# Make sure this is a raw image
+if ! qemu-img info $1 | grep -q "file format: raw"; then
+    usage
+    exit 1
+fi
+
+# Make sure we are in the correct dir
+if [ ! -d files/apts ]; then
+    echo "Please run this script from devstack/tools/"
+    exit 1
+fi 
+
+# Mount the image
+STAGING_DIR=/tmp/`echo $1 | sed  "s/\//_/g"`.stage
+mkdir -p $STAGING_DIR
+umount $STAGING_DIR || true
+sleep 1
+mount -t ext4 -o loop $1 $STAGING_DIR
+
+# Make sure that base requirements are installed
+cp /etc/resolv.conf $STAGING_DIR/etc/resolv.conf
+
+# Perform caching on the base image to speed up subsequent runs
+chroot $STAGING_DIR apt-get update
+chroot $STAGING_DIR apt-get install -y --download-only `cat files/apts/* | grep NOPRIME | cut -d\# -f1`
+chroot $STAGING_DIR apt-get install -y --force-yes `cat files/apts/* | grep -v NOPRIME | cut -d\# -f1` || true
+mkdir -p $STAGING_DIR/var/cache/pip
+PIP_DOWNLOAD_CACHE=/var/cache/pip chroot $STAGING_DIR pip install `cat files/pips/*` || true
+
+# Unmount
+umount $STAGING_DIR
diff --git a/tools/xen/build_domU.sh b/tools/xen/build_domU.sh
index 6362849..d79d5c3 100755
--- a/tools/xen/build_domU.sh
+++ b/tools/xen/build_domU.sh
@@ -240,6 +240,11 @@
         xe vm-shutdown uuid=$uuid
         xe vm-destroy uuid=$uuid
     done
+
+    # Destroy orphaned vdis
+    for uuid in `xe vdi-list | grep -1 Glance | grep uuid | sed "s/.*\: //g"`; do
+        xe vdi-destroy uuid=$uuid
+    done
 fi
 
 # Path to head xva.  By default keep overwriting the same one to save space