Merge "Configure swift logging."
diff --git a/AUTHORS b/AUTHORS
index 9d8366b..84a565e 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -11,6 +11,7 @@
 Jay Pipes <jaypipes@gmail.com>
 Jesse Andrews <anotherjesse@gmail.com>
 Justin Shepherd <galstrom21@gmail.com>
+Kiall Mac Innes <kiall@managedit.ie>
 Scott Moser <smoser@ubuntu.com>
 Todd Willey <xtoddx@gmail.com>
 Tres Henry <tres@treshenry.net>
diff --git a/README.md b/README.md
index 8b2b038..a185f34 100644
--- a/README.md
+++ b/README.md
@@ -1,37 +1,42 @@
-Tool to quickly deploy openstack dev environments.
+Devstack is a set of scripts and utilities to quickly deploy an OpenStack cloud.
 
 # Goals
 
-* To quickly build dev openstack environments in clean oneiric environments
-* To describe working configurations of openstack (which code branches work together?  what do config files look like for those branches?)
-* To make it easier for developers to dive into openstack so that they can productively contribute without having to understand every part of the system at once
+* To quickly build dev OpenStack environments in a clean oneiric environment
+* To describe working configurations of OpenStack (which code branches work together?  what do config files look like for those branches?)
+* To make it easier for developers to dive into OpenStack so that they can productively contribute without having to understand every part of the system at once
 * To make it easy to prototype cross-project features
 
 Read more at http://devstack.org (built from the gh-pages branch)
 
-Be sure to carefully read these scripts before you run them as they install software and may alter your networking configuration.
+IMPORTANT: Be sure to carefully read stack.sh and any other scripts you execute before you run them, as they install software and may alter your networking configuration.  We strongly recommend that you run stack.sh in a clean and disposable vm when you are first getting started.
 
-# To start a dev cloud on your local machine (installing on a dedicated vm is safer!):
+# Versions
+
+The devstack master branch generally points to trunk versions of OpenStack components.  For older, stable versions, look for branches named stable/[mil
+estone].  For example, you can do the following to create a diablo OpenStack cloud:
+
+    git checkout stable/diablo
+    ./stack.sh
+
+# To start a dev cloud (Installing in a dedicated, disposable vm is safer than installing on your dev machine!):
 
     ./stack.sh
 
-If working correctly, you should be able to access openstack endpoints, like:
+When the script finishes executing, you should be able to access OpenStack endpoints, like so:
 
 * Horizon: http://myhost/
 * Keystone: http://myhost:5000/v2.0/
 
+We also provide an environment file that you can use to interact with your cloud via CLI:
+
+    # source openrc file to load your environment with osapi and ec2 creds
+    . openrc
+    # list instances
+    nova list
+    # list instances using ec2 api
+    euca-describe-instances
+
 # Customizing
 
-You can tweak environment variables by creating file name 'localrc' should you need to override defaults.  It is likely that you will need to do this to tweak your networking configuration should you need to access your cloud from a different host.
-
-# Todo
-
-* Add python-novaclient cli support
-* syslog
-* Add volume support
-* Add quantum support
-
-# Future
-
-* idea: move from screen to tmux?
-* idea: create a live-cd / vmware preview image using this?
+You can override environment variables used in stack.sh by creating file name 'localrc'.  It is likely that you will need to do this to tweak your networking configuration should you need to access your cloud from a different host.
diff --git a/exercises/euca.sh b/exercises/euca.sh
index 9605ace..2f7a17b 100755
--- a/exercises/euca.sh
+++ b/exercises/euca.sh
@@ -1,6 +1,6 @@
 #!/usr/bin/env bash
 
-# we will use the ``euca2ools`` cli tool that wraps the python boto 
+# we will use the ``euca2ools`` cli tool that wraps the python boto
 # library to test ec2 compatibility
 #
 
@@ -12,7 +12,6 @@
 # an error.  It is also useful for following allowing as the install occurs.
 set -o xtrace
 
-
 # Settings
 # ========
 
@@ -21,16 +20,59 @@
 source ./openrc
 popd
 
-# find a machine image to boot
-IMAGE=`euca-describe-images | grep machine | cut -f2`
+# Find a machine image to boot
+IMAGE=`euca-describe-images | grep machine | cut -f2 | head -n1`
 
-# launch it
-INSTANCE=`euca-run-instances $IMAGE | grep INSTANCE | cut -f2`
+# Define secgroup
+SECGROUP=euca_secgroup
 
-# assure it has booted within a reasonable time
-if ! timeout $RUNNING_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then
+# Add a secgroup
+euca-add-group -d description $SECGROUP
+
+# Launch it
+DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
+INSTANCE=`euca-run-instances -g $SECGROUP -t $DEFAULT_INSTANCE_TYPE $IMAGE | grep INSTANCE | cut -f2`
+
+# Assure it has booted within a reasonable time
+if ! timeout $RUNNING_TIMEOUT sh -c "while ! euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then
     echo "server didn't become active within $RUNNING_TIMEOUT seconds"
     exit 1
 fi
 
+# Allocate floating address
+FLOATING_IP=`euca-allocate-address | cut -f2`
+
+# Release floating address
+euca-associate-address -i $INSTANCE $FLOATING_IP
+
+
+# Authorize pinging
+euca-authorize -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP
+
+# Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds
+ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10}
+if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! ping -c1 -w1 $FLOATING_IP; do sleep 1; done"; then
+    echo "Couldn't ping server with floating ip"
+    exit 1
+fi
+
+# Revoke pinging
+euca-revoke -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP
+
+# Delete group
+euca-delete-group $SECGROUP
+
+# Release floating address
+euca-disassociate-address $FLOATING_IP
+
+# Release floating address
+euca-release-address $FLOATING_IP
+
+# Wait just a tick for everything above to complete so terminate doesn't fail
+if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep -q $FLOATING_IP; do sleep 1; done"; then
+    echo "Floating ip $FLOATING_IP not released within $ASSOCIATE_TIMEOUT seconds"
+    exit 1
+fi
+
+# Terminate instance
 euca-terminate-instances $INSTANCE
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index dca6d5b..135c8c1 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -67,12 +67,16 @@
 # List of flavors:
 nova flavor-list
 
-# and grab the first flavor in the list to launch
-FLAVOR=`nova flavor-list | head -n 4 | tail -n 1 | cut -d"|" -f2`
+DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
+INSTANCE_TYPE=`nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | cut -d"|" -f2`
+if [[ -z "$INSTANCE_TYPE" ]]; then
+    # grab the first flavor in the list to launch if default doesn't exist
+   INSTANCE_TYPE=`nova flavor-list | head -n 4 | tail -n 1 | cut -d"|" -f2`
+fi
 
 NAME="myserver"
 
-nova boot --flavor $FLAVOR --image $IMAGE $NAME --security_groups=$SECGROUP
+nova boot --flavor $INSTANCE_TYPE --image $IMAGE $NAME --security_groups=$SECGROUP
 
 # Testing
 # =======
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
new file mode 100755
index 0000000..6ea9a51
--- /dev/null
+++ b/exercises/volumes.sh
@@ -0,0 +1,160 @@
+#!/usr/bin/env bash
+
+# Test nova volumes with the nova command from python-novaclient
+
+# This script exits on an error so that errors don't compound and you see
+# only the first error that occured.
+set -o errexit
+
+# Print the commands being run so that we can see the command that triggers
+# an error.  It is also useful for following allowing as the install occurs.
+set -o xtrace
+
+
+# Settings
+# ========
+
+# Use openrc + stackrc + localrc for settings
+pushd $(cd $(dirname "$0")/.. && pwd)
+source ./openrc
+popd
+
+# Get a token for clients that don't support service catalog
+# ==========================================================
+
+# manually create a token by querying keystone (sending JSON data).  Keystone
+# returns a token and catalog of endpoints.  We use python to parse the token
+# and save it.
+
+TOKEN=`curl -s -d  "{\"auth\":{\"passwordCredentials\": {\"username\": \"$NOVA_USERNAME\", \"password\": \"$NOVA_PASSWORD\"}}}" -H "Content-type: application/json" http://$HOST_IP:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"`
+
+# Launching a server
+# ==================
+
+# List servers for tenant:
+nova list
+
+# Images
+# ------
+
+# Nova has a **deprecated** way of listing images.
+nova image-list
+
+# But we recommend using glance directly
+glance -A $TOKEN index
+
+# Let's grab the id of the first AMI image to launch
+IMAGE=`glance -A $TOKEN index | egrep ami | head -1 | cut -d" " -f1`
+
+# determinine instance type
+# -------------------------
+
+# List of instance types:
+nova flavor-list
+
+DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
+INSTANCE_TYPE=`nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | cut -d"|" -f2`
+if [[ -z "$INSTANCE_TYPE" ]]; then
+    # grab the first flavor in the list to launch if default doesn't exist
+   INSTANCE_TYPE=`nova flavor-list | head -n 4 | tail -n 1 | cut -d"|" -f2`
+fi
+
+NAME="myserver"
+
+VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE $NAME --security_groups=$SECGROUP | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'`
+
+# Testing
+# =======
+
+# First check if it spins up (becomes active and responds to ping on
+# internal ip).  If you run this script from a nova node, you should
+# bypass security groups and have direct access to the server.
+
+# Waiting for boot
+# ----------------
+
+# Max time to wait while vm goes from build to active state
+ACTIVE_TIMEOUT=${ACTIVE_TIMEOUT:-30}
+
+# Max time till the vm is bootable
+BOOT_TIMEOUT=${BOOT_TIMEOUT:-15}
+
+# Max time to wait for proper association and dis-association.
+ASSOCIATE_TIMEOUT=${ASSOCIATE_TIMEOUT:-10}
+
+# check that the status is active within ACTIVE_TIMEOUT seconds
+if ! timeout $BOOT_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
+    echo "server didn't become active!"
+    exit 1
+fi
+
+# get the IP of the server
+IP=`nova show $VM_UUID | grep "private network" | cut -d"|" -f3`
+#VM_UUID=`nova list | grep $NAME | head -1 | cut -d'|' -f2 | sed 's/ //g'`
+
+# for single node deployments, we can ping private ips
+MULTI_HOST=${MULTI_HOST:-0}
+if [ "$MULTI_HOST" = "0" ]; then
+    # sometimes the first ping fails (10 seconds isn't enough time for the VM's
+    # network to respond?), so let's ping for a default of 15 seconds with a
+    # timeout of a second for each ping.
+    if ! timeout $BOOT_TIMEOUT sh -c "while ! ping -c1 -w1 $IP; do sleep 1; done"; then
+        echo "Couldn't ping server"
+        exit 1
+    fi
+else
+    # On a multi-host system, without vm net access, do a sleep to wait for the boot
+    sleep $BOOT_TIMEOUT
+fi
+
+# Volumes
+# -------
+
+VOL_NAME="myvol-$(openssl rand -hex 4)"
+
+# Verify it doesn't exist
+if [[ -n "`nova volume-list | grep $VOL_NAME | head -1 | cut -d'|' -f3 | sed 's/ //g'`" ]]; then
+    echo "Volume $VOL_NAME already exists"
+    exit 1
+fi
+
+# Create a new volume
+nova volume-create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" 1
+if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep available; do sleep 1; done"; then
+    echo "Volume $VOL_NAME not created"
+    exit 1
+fi
+
+# Get volume ID
+VOL_ID=`nova volume-list | grep $VOL_NAME | head -1 | cut -d'|' -f2 | sed 's/ //g'`
+
+# Attach to server
+DEVICE=/dev/vdb
+nova volume-attach $VM_UUID $VOL_ID $DEVICE
+if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep in-use; do sleep 1; done"; then
+    echo "Volume $VOL_NAME not attached to $NAME"
+    exit 1
+fi
+
+VOL_ATTACH=`nova volume-list | grep $VOL_NAME | head -1 | cut -d'|' -f6 | sed 's/ //g'`
+if [[ "$VOL_ATTACH" != $VM_UUID ]]; then
+    echo "Volume not attached to correct instance"
+    exit 1
+fi
+
+# Detach volume
+nova volume-detach $VM_UUID $VOL_ID
+if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME | grep available; do sleep 1; done"; then
+    echo "Volume $VOL_NAME not detached from $NAME"
+    exit 1
+fi
+
+# Delete volume
+nova volume-delete $VOL_ID
+if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova volume-list | grep $VOL_NAME; do sleep 1; done"; then
+    echo "Volume $VOL_NAME not deleted"
+    exit 1
+fi
+
+# shutdown the server
+nova delete $NAME
diff --git a/files/000-default.template b/files/000-default.template
index 43013df..1d7380d 100644
--- a/files/000-default.template
+++ b/files/000-default.template
@@ -6,7 +6,7 @@
     WSGIProcessGroup horizon
 
     DocumentRoot %HORIZON_DIR%/.blackhole/
-    Alias /media %HORIZON_DIR%/openstack-dashboard/media
+    Alias /media %HORIZON_DIR%/openstack-dashboard/dashboard/static
     Alias /vpn /opt/stack/vpn
 
     <Directory />
diff --git a/files/apts/horizon b/files/apts/horizon
index 6f145e1..1e0b0e6 100644
--- a/files/apts/horizon
+++ b/files/apts/horizon
@@ -13,3 +13,14 @@
 pep8
 python-eventlet
 python-nose
+python-sphinx
+python-mox
+python-kombu
+python-coverage
+python-cherrypy3 # why?
+python-django
+python-django-mailer
+python-django-nose
+python-django-registration
+python-cloudfiles
+python-migrate
diff --git a/files/apts/n-cpu b/files/apts/n-cpu
new file mode 100644
index 0000000..06c21a2
--- /dev/null
+++ b/files/apts/n-cpu
@@ -0,0 +1,4 @@
+# Stuff for diablo volumes
+lvm2
+open-iscsi
+open-iscsi-utils
diff --git a/files/apts/novnc b/files/apts/n-vnc
similarity index 100%
rename from files/apts/novnc
rename to files/apts/n-vnc
diff --git a/files/apts/n-vol b/files/apts/n-vol
new file mode 100644
index 0000000..edaee2c
--- /dev/null
+++ b/files/apts/n-vol
@@ -0,0 +1,3 @@
+iscsitarget  # NOPRIME
+iscsitarget-dkms  # NOPRIME
+lvm2
diff --git a/files/apts/nova b/files/apts/nova
index f4fe459..bc0c23b 100644
--- a/files/apts/nova
+++ b/files/apts/nova
@@ -2,7 +2,8 @@
 dnsmasq-utils # for dhcp_release only available in dist:oneiric
 kpartx
 parted
-arping # used for send_arp_for_ha option in nova-network
+arping # only available in dist:natty
+iputils-arping # only available in dist:oneiric
 mysql-server # NOPRIME
 python-mysqldb
 python-xattr # needed for glance which is needed for nova --- this shouldn't be here
@@ -39,8 +40,4 @@
 python-m2crypto
 python-boto
 python-kombu
-
-# Stuff for diablo volumes
-iscsitarget  # NOPRIME
-iscsitarget-dkms  # NOPRIME
-lvm2
+python-feedparser
diff --git a/files/glance-api.conf b/files/glance-api.conf
index bb758af..6c670b5 100644
--- a/files/glance-api.conf
+++ b/files/glance-api.conf
@@ -141,30 +141,32 @@
 [pipeline:glance-api]
 #pipeline = versionnegotiation context apiv1app
 # NOTE: use the following pipeline for keystone
-pipeline = versionnegotiation authtoken context apiv1app
+pipeline = versionnegotiation authtoken auth-context apiv1app
 
 # To enable Image Cache Management API replace pipeline with below:
 # pipeline = versionnegotiation context imagecache apiv1app
 # NOTE: use the following pipeline for keystone auth (with caching)
-# pipeline = versionnegotiation authtoken context imagecache apiv1app
-
-[pipeline:versions]
-pipeline = versionsapp
-
-[app:versionsapp]
-paste.app_factory = glance.api.versions:app_factory
+# pipeline = versionnegotiation authtoken auth-context imagecache apiv1app
 
 [app:apiv1app]
-paste.app_factory = glance.api.v1:app_factory
+paste.app_factory = glance.common.wsgi:app_factory
+glance.app_factory = glance.api.v1.router:API
 
 [filter:versionnegotiation]
-paste.filter_factory = glance.api.middleware.version_negotiation:filter_factory
+paste.filter_factory = glance.common.wsgi:filter_factory
+glance.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter
 
-[filter:imagecache]
-paste.filter_factory = glance.api.middleware.image_cache:filter_factory
+[filter:cache]
+paste.filter_factory = glance.common.wsgi:filter_factory
+glance.filter_factory = glance.api.middleware.cache:CacheFilter
+
+[filter:cachemanage]
+paste.filter_factory = glance.common.wsgi:filter_factory
+glance.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter
 
 [filter:context]
-paste.filter_factory = glance.common.context:filter_factory
+paste.filter_factory = glance.common.wsgi:filter_factory
+glance.filter_factory = glance.common.context:ContextMiddleware
 
 [filter:authtoken]
 paste.filter_factory = keystone.middleware.auth_token:filter_factory
@@ -176,3 +178,7 @@
 auth_protocol = http
 auth_uri = http://127.0.0.1:5000/
 admin_token = %SERVICE_TOKEN%
+
+[filter:auth-context]
+paste.filter_factory = glance.common.wsgi:filter_factory
+glance.filter_factory = keystone.middleware.glance_auth_token:KeystoneContextMiddleware
diff --git a/files/glance-registry.conf b/files/glance-registry.conf
index 1e04186..e732e86 100644
--- a/files/glance-registry.conf
+++ b/files/glance-registry.conf
@@ -46,14 +46,16 @@
 [pipeline:glance-registry]
 #pipeline = context registryapp
 # NOTE: use the following pipeline for keystone
-pipeline = authtoken keystone_shim context registryapp
+pipeline = authtoken auth-context context registryapp
 
 [app:registryapp]
-paste.app_factory = glance.registry.server:app_factory
+paste.app_factory = glance.common.wsgi:app_factory
+glance.app_factory = glance.registry.api.v1:API
 
 [filter:context]
 context_class = glance.registry.context.RequestContext
-paste.filter_factory = glance.common.context:filter_factory
+paste.filter_factory = glance.common.wsgi:filter_factory
+glance.filter_factory = glance.common.context:ContextMiddleware
 
 [filter:authtoken]
 paste.filter_factory = keystone.middleware.auth_token:filter_factory
@@ -66,5 +68,7 @@
 auth_uri = http://127.0.0.1:5000/
 admin_token = %SERVICE_TOKEN%
 
-[filter:keystone_shim]
-paste.filter_factory = keystone.middleware.glance_auth_token:filter_factory
+[filter:auth-context]
+context_class = glance.registry.context.RequestContext
+paste.filter_factory = glance.common.wsgi:filter_factory
+glance.filter_factory = keystone.middleware.glance_auth_token:KeystoneContextMiddleware
diff --git a/files/horizon_settings.py b/files/horizon_settings.py
index 3a17db2..05ddfe7 100644
--- a/files/horizon_settings.py
+++ b/files/horizon_settings.py
@@ -12,23 +12,13 @@
     'default': {
         'ENGINE': 'django.db.backends.sqlite3',
         'NAME': os.path.join(LOCAL_PATH, 'dashboard_openstack.sqlite3'),
+        'TEST_NAME': os.path.join(LOCAL_PATH, 'test.sqlite3'),
     },
 }
 
+# The default values for these two settings seem to cause issues with apache
 CACHE_BACKEND = 'dummy://'
-
-# Add apps to horizon installation.
-INSTALLED_APPS = (
-    'dashboard',
-    'django.contrib.contenttypes',
-    'django.contrib.sessions',
-    'django.contrib.messages',
-    'django.contrib.staticfiles',
-    'django_openstack',
-    'django_openstack.templatetags',
-    'mailer',
-)
-
+SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
 
 # Send email to the console by default
 EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
@@ -44,32 +34,40 @@
 # EMAIL_HOST_USER = 'djangomail'
 # EMAIL_HOST_PASSWORD = 'top-secret!'
 
-# FIXME: This needs to be changed to allow for multi-node setup.
-OPENSTACK_KEYSTONE_URL = "http://localhost:5000/v2.0/"
-OPENSTACK_KEYSTONE_ADMIN_URL = "http://localhost:35357/v2.0"
+HORIZON_CONFIG = {
+    'dashboards': ('nova', 'syspanel', 'settings',),
+    'default_dashboard': 'nova',
+    'user_home': 'dashboard.views.user_home',
+}
+
+OPENSTACK_HOST = "127.0.0.1"
+OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST
+# FIXME: this is only needed until keystone fixes its GET /tenants call
+# so that it doesn't return everything for admins
+OPENSTACK_KEYSTONE_ADMIN_URL = "http://%s:35357/v2.0" % OPENSTACK_HOST
 OPENSTACK_KEYSTONE_DEFAULT_ROLE = "Member"
 
-# NOTE(tres): Available services should come from the service
-#             catalog in Keystone.
-SWIFT_ENABLED = False
+SWIFT_PAGINATE_LIMIT = 100
 
 # Configure quantum connection details for networking
 QUANTUM_ENABLED = False
-QUANTUM_URL = '127.0.0.1'
+QUANTUM_URL = '%s'  % OPENSTACK_HOST
 QUANTUM_PORT = '9696'
 QUANTUM_TENANT = '1234'
 QUANTUM_CLIENT_VERSION='0.1'
 
-# No monitoring links currently
-EXTERNAL_MONITORING = []
+# If you have external monitoring links, eg:
+# EXTERNAL_MONITORING = [
+#     ['Nagios','http://foo.com'],
+#     ['Ganglia','http://bar.com'],
+# ]
 
-# Uncomment the following segment to silence most logging
-# django.db and boto DEBUG logging is extremely verbose.
 #LOGGING = {
 #        'version': 1,
-#        # set to True will disable all logging except that specified, unless
-#        # nothing is specified except that django.db.backends will still log,
-#        # even when set to True, so disable explicitly
+#        # When set to True this will disable all logging except
+#        # for loggers specified in this configuration dictionary. Note that
+#        # if nothing is specified here and disable_existing_loggers is True,
+#        # django.db.backends will still log unless it is disabled explicitly.
 #        'disable_existing_loggers': False,
 #        'handlers': {
 #            'null': {
@@ -77,20 +75,34 @@
 #                'class': 'django.utils.log.NullHandler',
 #                },
 #            'console': {
-#                'level': 'DEBUG',
+#                # Set the level to "DEBUG" for verbose output logging.
+#                'level': 'INFO',
 #                'class': 'logging.StreamHandler',
 #                },
 #            },
 #        'loggers': {
-#            # Comment or Uncomment these to turn on/off logging output
+#            # Logging from django.db.backends is VERY verbose, send to null
+#            # by default.
 #            'django.db.backends': {
 #                'handlers': ['null'],
 #                'propagate': False,
 #                },
-#            'django_openstack': {
-#                'handlers': ['null'],
+#            'horizon': {
+#                'handlers': ['console'],
 #                'propagate': False,
 #            },
+#            'novaclient': {
+#                'handlers': ['console'],
+#                'propagate': False,
+#            },
+#            'keystoneclient': {
+#                'handlers': ['console'],
+#                'propagate': False,
+#            },
+#            'nose.plugins.manager': {
+#                'handlers': ['console'],
+#                'propagate': False,
+#            }
 #        }
 #}
 
diff --git a/files/keystone.conf b/files/keystone.conf
index 687273b..a646513 100644
--- a/files/keystone.conf
+++ b/files/keystone.conf
@@ -25,6 +25,9 @@
 	'swift' : 'X-Storage-Url',
 	'cdn' : 'X-CDN-Management-Url'}
 
+#List of extensions currently supported
+extensions= osksadm,oskscatalog
+
 # Address to bind the API server
 # TODO Properties defined within app not available via pipeline.
 service_host = 0.0.0.0
@@ -32,23 +35,47 @@
 # Port the bind the API server to
 service_port = 5000
 
+# SSL for API server
+service_ssl = False
+
 # Address to bind the Admin API server
 admin_host = 0.0.0.0
 
 # Port the bind the Admin API server to
 admin_port = 35357
 
+# SSL for API Admin server
+admin_ssl = False
+
+# Keystone certificate file (modify as needed)
+# Only required if *_ssl is set to True
+certfile = /etc/keystone/ssl/certs/keystone.pem
+
+# Keystone private key file (modify as needed)
+# Only required if *_ssl is set to True
+keyfile = /etc/keystone/ssl/private/keystonekey.pem
+
+# Keystone trusted CA certificates  (modify as needed)
+# Only required if *_ssl is set to True
+ca_certs = /etc/keystone/ssl/certs/ca.pem
+
+# Client certificate required
+# Only relevant if *_ssl is set to True
+cert_required = True
+
 #Role that allows to perform admin operations.
-keystone-admin-role = KeystoneAdmin
+keystone-admin-role = Admin
 
 #Role that allows to perform service admin operations.
 keystone-service-admin-role = KeystoneServiceAdmin
 
+#Tells whether password user need to be hashed in the backend
+hash-password = True
+
 [keystone.backends.sqlalchemy]
 # SQLAlchemy connection string for the reference implementation registry
 # server. Any valid SQLAlchemy connection string is fine.
 # See: http://bit.ly/ideIpI
-#sql_connection = sqlite:///keystone.db
 sql_connection = %SQL_CONN%
 backend_entities = ['UserRoleAssociation', 'Endpoints', 'Role', 'Tenant',
                     'User', 'Credentials', 'EndpointTemplates', 'Token',
@@ -60,14 +87,13 @@
 
 [pipeline:admin]
 pipeline =
-	urlrewritefilter
-	admin_api
+    urlrewritefilter
+    admin_api
 
 [pipeline:keystone-legacy-auth]
 pipeline =
-	urlrewritefilter
+    urlrewritefilter
     legacy_auth
-    RAX-KEY-extension
     service_api
 
 [app:service_api]
@@ -82,5 +108,5 @@
 [filter:legacy_auth]
 paste.filter_factory = keystone.frontends.legacy_token_auth:filter_factory
 
-[filter:RAX-KEY-extension]
-paste.filter_factory = keystone.contrib.extensions.service.raxkey.frontend:filter_factory
+[filter:debug]
+paste.filter_factory = keystone.common.wsgi:debug_filter_factory
diff --git a/files/keystone_data.sh b/files/keystone_data.sh
index d926c52..a25ba20 100755
--- a/files/keystone_data.sh
+++ b/files/keystone_data.sh
@@ -1,48 +1,54 @@
 #!/bin/bash
 BIN_DIR=${BIN_DIR:-.}
 # Tenants
-$BIN_DIR/keystone-manage $* tenant add admin
-$BIN_DIR/keystone-manage $* tenant add demo
-$BIN_DIR/keystone-manage $* tenant add invisible_to_admin
+$BIN_DIR/keystone-manage tenant add admin
+$BIN_DIR/keystone-manage tenant add demo
+$BIN_DIR/keystone-manage tenant add invisible_to_admin
 
 # Users
-$BIN_DIR/keystone-manage $* user add admin %ADMIN_PASSWORD%
-$BIN_DIR/keystone-manage $* user add demo %ADMIN_PASSWORD%
+$BIN_DIR/keystone-manage user add admin %ADMIN_PASSWORD%
+$BIN_DIR/keystone-manage user add demo %ADMIN_PASSWORD%
 
 # Roles
-$BIN_DIR/keystone-manage $* role add Admin
-$BIN_DIR/keystone-manage $* role add Member
-$BIN_DIR/keystone-manage $* role add KeystoneAdmin
-$BIN_DIR/keystone-manage $* role add KeystoneServiceAdmin
-$BIN_DIR/keystone-manage $* role add sysadmin
-$BIN_DIR/keystone-manage $* role add netadmin
-$BIN_DIR/keystone-manage $* role grant Admin admin admin
-$BIN_DIR/keystone-manage $* role grant Member demo demo
-$BIN_DIR/keystone-manage $* role grant sysadmin demo demo
-$BIN_DIR/keystone-manage $* role grant netadmin demo demo
-$BIN_DIR/keystone-manage $* role grant Member demo invisible_to_admin
-$BIN_DIR/keystone-manage $* role grant Admin admin demo
-$BIN_DIR/keystone-manage $* role grant Admin admin
-$BIN_DIR/keystone-manage $* role grant KeystoneAdmin admin
-$BIN_DIR/keystone-manage $* role grant KeystoneServiceAdmin admin
+$BIN_DIR/keystone-manage role add Admin
+$BIN_DIR/keystone-manage role add Member
+$BIN_DIR/keystone-manage role add KeystoneAdmin
+$BIN_DIR/keystone-manage role add KeystoneServiceAdmin
+$BIN_DIR/keystone-manage role add sysadmin
+$BIN_DIR/keystone-manage role add netadmin
+$BIN_DIR/keystone-manage role grant Admin admin admin
+$BIN_DIR/keystone-manage role grant Member demo demo
+$BIN_DIR/keystone-manage role grant sysadmin demo demo
+$BIN_DIR/keystone-manage role grant netadmin demo demo
+$BIN_DIR/keystone-manage role grant Member demo invisible_to_admin
+$BIN_DIR/keystone-manage role grant Admin admin demo
+$BIN_DIR/keystone-manage role grant Admin admin
+$BIN_DIR/keystone-manage role grant KeystoneAdmin admin
+$BIN_DIR/keystone-manage role grant KeystoneServiceAdmin admin
 
 # Services
-$BIN_DIR/keystone-manage $* service add nova compute "Nova Compute Service"
-$BIN_DIR/keystone-manage $* service add glance image "Glance Image Service"
-$BIN_DIR/keystone-manage $* service add keystone identity "Keystone Identity Service"
-$BIN_DIR/keystone-manage $* service add swift object-store "Swift Service"
+$BIN_DIR/keystone-manage service add nova compute "Nova Compute Service"
+$BIN_DIR/keystone-manage service add ec2 ec2 "EC2 Compatability Layer"
+$BIN_DIR/keystone-manage service add glance image "Glance Image Service"
+$BIN_DIR/keystone-manage service add keystone identity "Keystone Identity Service"
+if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
+    $BIN_DIR/keystone-manage service add swift object-store "Swift Service"
+fi
 
 #endpointTemplates
-$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne nova http://%HOST_IP%:8774/v1.1/%tenant_id% http://%HOST_IP%:8774/v1.1/%tenant_id%  http://%HOST_IP%:8774/v1.1/%tenant_id% 1 1
-$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne glance http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% http://%HOST_IP%:9292/v1.1/%tenant_id% 1 1
-$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne keystone http://%HOST_IP%:5000/v2.0 http://%HOST_IP%:35357/v2.0 http://%HOST_IP%:5000/v2.0 1 1
-$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne swift http://%HOST_IP%:8080/v1/AUTH_%tenant_id% http://%HOST_IP%:8080/ http://%HOST_IP%:8080/v1/AUTH_%tenant_id% 1 1
+$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne nova http://%SERVICE_HOST%:8774/v1.1/%tenant_id% http://%SERVICE_HOST%:8774/v1.1/%tenant_id%  http://%SERVICE_HOST%:8774/v1.1/%tenant_id% 1 1
+$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne ec2 http://%SERVICE_HOST%:8773/services/Cloud http://%SERVICE_HOST%:8773/services/Admin http://%SERVICE_HOST%:8773/services/Cloud 1 1
+$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne glance http://%SERVICE_HOST%:9292/v1 http://%SERVICE_HOST%:9292/v1 http://%SERVICE_HOST%:9292/v1 1 1
+$BIN_DIR/keystone-manage $* endpointTemplates add RegionOne keystone http://%SERVICE_HOST%:5000/v2.0 http://%SERVICE_HOST%:35357/v2.0 http://%SERVICE_HOST%:5000/v2.0 1 1
+if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
+    $BIN_DIR/keystone-manage $* endpointTemplates add RegionOne swift http://%SERVICE_HOST%:8080/v1/AUTH_%tenant_id% http://%SERVICE_HOST%:8080/ http://%SERVICE_HOST%:8080/v1/AUTH_%tenant_id% 1 1
+fi
 
 # Tokens
-$BIN_DIR/keystone-manage $* token add %SERVICE_TOKEN% admin admin 2015-02-05T00:00
+$BIN_DIR/keystone-manage token add %SERVICE_TOKEN% admin admin 2015-02-05T00:00
 
 # EC2 related creds - note we are setting the secret key to ADMIN_PASSWORD
 # but keystone doesn't parse them - it is just a blob from keystone's
 # point of view
-$BIN_DIR/keystone-manage $* credentials add admin EC2 'admin' '%ADMIN_PASSWORD%' admin || echo "no support for adding credentials"
-$BIN_DIR/keystone-manage $* credentials add demo EC2 'demo' '%ADMIN_PASSWORD%' demo || echo "no support for adding credentials"
+$BIN_DIR/keystone-manage credentials add admin EC2 'admin' '%ADMIN_PASSWORD%' admin || echo "no support for adding credentials"
+$BIN_DIR/keystone-manage credentials add demo EC2 'demo' '%ADMIN_PASSWORD%' demo || echo "no support for adding credentials"
diff --git a/files/nova-api-paste.ini b/files/nova-api-paste.ini
index 2c642f8..7f27fdc 100644
--- a/files/nova-api-paste.ini
+++ b/files/nova-api-paste.ini
@@ -1,34 +1,54 @@
+############
+# Metadata #
+############
+[composite:metadata]
+use = egg:Paste#urlmap
+/: metaversions
+/latest: meta
+/2007-01-19: meta
+/2007-03-01: meta
+/2007-08-29: meta
+/2007-10-10: meta
+/2007-12-15: meta
+/2008-02-01: meta
+/2008-09-01: meta
+/2009-04-04: meta
+
+[pipeline:metaversions]
+pipeline = ec2faultwrap logrequest metaverapp
+
+[pipeline:meta]
+pipeline = ec2faultwrap logrequest metaapp
+
+[app:metaverapp]
+paste.app_factory = nova.api.metadata.handler:Versions.factory
+
+[app:metaapp]
+paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory
+
 #######
 # EC2 #
 #######
 
 [composite:ec2]
 use = egg:Paste#urlmap
-/: ec2versions
 /services/Cloud: ec2cloud
 /services/Admin: ec2admin
-/latest: ec2metadata
-/2007-01-19: ec2metadata
-/2007-03-01: ec2metadata
-/2007-08-29: ec2metadata
-/2007-10-10: ec2metadata
-/2007-12-15: ec2metadata
-/2008-02-01: ec2metadata
-/2008-09-01: ec2metadata
-/2009-04-04: ec2metadata
-/1.0: ec2metadata
 
 [pipeline:ec2cloud]
-pipeline = logrequest totoken authtoken keystonecontext cloudrequest authorizer ec2executor
+pipeline = ec2faultwrap logrequest totoken authtoken keystonecontext cloudrequest authorizer ec2executor
 
 [pipeline:ec2admin]
-pipeline = logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor
+pipeline = ec2faultwrap logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor
 
 [pipeline:ec2metadata]
-pipeline = logrequest ec2md
+pipeline = ec2faultwrap logrequest ec2md
 
 [pipeline:ec2versions]
-pipeline = logrequest ec2ver
+pipeline = ec2faultwrap logrequest ec2ver
+
+[filter:ec2faultwrap]
+paste.filter_factory = nova.api.ec2:FaultWrapper.factory
 
 [filter:logrequest]
 paste.filter_factory = nova.api.ec2:RequestLogging.factory
@@ -59,54 +79,45 @@
 [app:ec2executor]
 paste.app_factory = nova.api.ec2:Executor.factory
 
-[app:ec2ver]
-paste.app_factory = nova.api.ec2:Versions.factory
-
-[app:ec2md]
-paste.app_factory = nova.api.ec2.metadatarequesthandler:MetadataRequestHandler.factory
-
 #############
 # Openstack #
 #############
 
 [composite:osapi]
-use = egg:Paste#urlmap
+use = call:nova.api.openstack.v2.urlmap:urlmap_factory
 /: osversions
-/v1.0: openstackapi10
-/v1.1: openstackapi11
+/v1.1: openstack_api_v2
+/v2: openstack_api_v2
 
-[pipeline:openstackapi10]
-pipeline = faultwrap authtoken keystonecontext ratelimit osapiapp10
-
-[pipeline:openstackapi11]
-pipeline = faultwrap authtoken keystonecontext ratelimit extensions osapiapp11
+[pipeline:openstack_api_v2]
+pipeline = faultwrap authtoken keystonecontext ratelimit serialize extensions osapi_app_v2
 
 [filter:faultwrap]
-paste.filter_factory = nova.api.openstack:FaultWrapper.factory
+paste.filter_factory = nova.api.openstack.v2:FaultWrapper.factory
 
 [filter:auth]
-paste.filter_factory = nova.api.openstack.auth:AuthMiddleware.factory
+paste.filter_factory = nova.api.openstack.v2.auth:AuthMiddleware.factory
 
 [filter:noauth]
-paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
+paste.filter_factory = nova.api.openstack.v2.auth:NoAuthMiddleware.factory
 
 [filter:ratelimit]
-paste.filter_factory = nova.api.openstack.limits:RateLimitingMiddleware.factory
+paste.filter_factory = nova.api.openstack.v2.limits:RateLimitingMiddleware.factory
+
+[filter:serialize]
+paste.filter_factory = nova.api.openstack.wsgi:LazySerializationMiddleware.factory
 
 [filter:extensions]
-paste.filter_factory = nova.api.openstack.extensions:ExtensionMiddleware.factory
+paste.filter_factory = nova.api.openstack.v2.extensions:ExtensionMiddleware.factory
 
-[app:osapiapp10]
-paste.app_factory = nova.api.openstack:APIRouterV10.factory
-
-[app:osapiapp11]
-paste.app_factory = nova.api.openstack:APIRouterV11.factory
+[app:osapi_app_v2]
+paste.app_factory = nova.api.openstack.v2:APIRouter.factory
 
 [pipeline:osversions]
 pipeline = faultwrap osversionapp
 
 [app:osversionapp]
-paste.app_factory = nova.api.openstack.versions:Versions.factory
+paste.app_factory = nova.api.openstack.v2.versions:Versions.factory
 
 ##########
 # Shared #
diff --git a/files/pips/horizon b/files/pips/horizon
index 672fbee..893efb7 100644
--- a/files/pips/horizon
+++ b/files/pips/horizon
@@ -1,9 +1,4 @@
-Django==1.3
-django-nose==0.1.2
-django-mailer
-django-registration==0.7
-python-cloudfiles
-sqlalchemy-migrate
+django-nose-selenium
+pycrypto==2.3
 
 -e git+https://github.com/jacobian/openstack.compute.git#egg=openstack
-
diff --git a/files/pips/openstack-integration-tests b/files/pips/tempest
similarity index 100%
rename from files/pips/openstack-integration-tests
rename to files/pips/tempest
diff --git a/files/screenrc b/files/screenrc
deleted file mode 100644
index e18db39..0000000
--- a/files/screenrc
+++ /dev/null
@@ -1,9 +0,0 @@
-hardstatus on
-hardstatus alwayslastline
-hardstatus string "%{.bW}%-w%{.rW}%n %t%{-}%+w %=%{..G}%H %{..Y}%d/%m %c"
-
-defscrollback 10240
-
-vbell off
-startup_message off
-
diff --git a/files/sudo/nova b/files/sudo/nova
index 62685b3..0a79c21 100644
--- a/files/sudo/nova
+++ b/files/sudo/nova
@@ -1,4 +1,4 @@
-Cmnd_Alias NOVACMDS = /bin/chmod /var/lib/nova/tmp/*/root/.ssh, \
+Cmnd_Alias NOVADEVCMDS = /bin/chmod /var/lib/nova/tmp/*/root/.ssh, \
                       /bin/chown /var/lib/nova/tmp/*/root/.ssh, \
                       /bin/chown,                               \
                       /bin/chmod,                               \
@@ -43,5 +43,5 @@
                       /usr/sbin/dnsmasq,                        \
                       /usr/sbin/arping
 
-%USER% ALL = (root) NOPASSWD: SETENV: NOVACMDS
+%USER% ALL = (root) NOPASSWD: SETENV: NOVADEVCMDS
 
diff --git a/files/swift/proxy-server.conf b/files/swift/proxy-server.conf
index 1c567c3..da6b1fa 100644
--- a/files/swift/proxy-server.conf
+++ b/files/swift/proxy-server.conf
@@ -19,7 +19,7 @@
 use = egg:swiftkeystone2#keystone2
 keystone_admin_token = %SERVICE_TOKEN%
 keystone_url = http://localhost:35357/v2.0
-keystone_admin_group = Member
+keystone_swift_operator_roles = Member,Admin
 
 [filter:tempauth]
 use = egg:swift#tempauth
diff --git a/openrc b/openrc
index 7c1e129..4395975 100644
--- a/openrc
+++ b/openrc
@@ -3,8 +3,10 @@
 # Load local configuration
 source ./stackrc
 
-# Set api host endpoint
+# Set api HOST_IP endpoint.  SERVICE_HOST may also be used to specify the endpoint,
+# which is convenient for some localrc configurations.
 HOST_IP=${HOST_IP:-127.0.0.1}
+SERVICE_HOST=${SERVICE_HOST:-$HOST_IP}
 
 # Nova original used project_id as the *account* that owned resources (servers,
 # ip address, ...)   With the addition of Keystone we have standardized on the
@@ -29,7 +31,7 @@
 #
 # *NOTE*: Using the 2.0 *auth api* does not mean that compute api is 2.0.  We
 # will use the 1.1 *compute api*
-export NOVA_URL=${NOVA_URL:-http://$HOST_IP:5000/v2.0/}
+export NOVA_URL=${NOVA_URL:-http://$SERVICE_HOST:5000/v2.0/}
 
 # Currently novaclient needs you to specify the *compute api* version.  This
 # needs to match the config of your catalog returned by Keystone.
@@ -39,7 +41,7 @@
 export NOVA_REGION_NAME=${NOVA_REGION_NAME:-RegionOne}
 
 # Set the ec2 url so euca2ools works
-export EC2_URL=${EC2_URL:-http://$HOST_IP:8773/services/Cloud}
+export EC2_URL=${EC2_URL:-http://$SERVICE_HOST:8773/services/Cloud}
 
 # Access key is set in the initial keystone data to be the same as username
 export EC2_ACCESS_KEY=${USERNAME:-demo}
diff --git a/stack.sh b/stack.sh
index bc7af65..420fc25 100755
--- a/stack.sh
+++ b/stack.sh
@@ -80,17 +80,22 @@
 # Destination path for installation ``DEST``
 DEST=${DEST:-/opt/stack}
 
-# Configure services to syslog instead of writing to individual log files
-SYSLOG=${SYSLOG:-False}
-
 # apt-get wrapper to just get arguments set correctly
 function apt_get() {
+    [[ "$OFFLINE" = "True" ]] && return
     local sudo="sudo"
     [ "$(id -u)" = "0" ] && sudo="env"
     $sudo DEBIAN_FRONTEND=noninteractive apt-get \
         --option "Dpkg::Options::=--force-confold" --assume-yes "$@"
 }
 
+# Check to see if we are already running a stack.sh
+if screen -ls | egrep -q "[0-9].stack"; then
+    echo "You are already running a stack.sh session."
+    echo "To rejoin this session type 'screen -x stack'."
+    echo "To destroy this session, kill the running screen."
+    exit 1
+fi
 
 # OpenStack is designed to be run as a regular user (Horizon will fail to run
 # as root, since apache refused to startup serve content from root user).  If
@@ -121,7 +126,7 @@
 
     echo "Copying files to stack user"
     STACK_DIR="$DEST/${PWD##*/}"
-    cp -r -f "$PWD" "$STACK_DIR"
+    cp -r -f -T "$PWD" "$STACK_DIR"
     chown -R stack "$STACK_DIR"
     if [[ "$SHELL_AFTER_RUN" != "no" ]]; then
         exec su -c "set -e; cd $STACK_DIR; bash stack.sh; bash" stack
@@ -143,12 +148,30 @@
     sudo mv $TEMPFILE /etc/sudoers.d/stack_sh_nova
 fi
 
+# Normalize config values to True or False
+# VAR=`trueorfalse default-value test-value`
+function trueorfalse() {
+    local default=$1
+    local testval=$2
+
+    [[ -z "$testval" ]] && { echo "$default"; return; }
+    [[ "0 no false False FALSE" =~ "$testval" ]] && { echo "False"; return; }
+    [[ "1 yes true True TRUE" =~ "$testval" ]] && { echo "True"; return; }
+    echo "$default"
+}
+
+# Set True to configure stack.sh to run cleanly without Internet access.
+# stack.sh must have been previously run with Internet access to install
+# prerequisites and initialize $DEST.
+OFFLINE=`trueorfalse False $OFFLINE`
+
 # Set the destination directories for openstack projects
 NOVA_DIR=$DEST/nova
 HORIZON_DIR=$DEST/horizon
 GLANCE_DIR=$DEST/glance
 KEYSTONE_DIR=$DEST/keystone
 NOVACLIENT_DIR=$DEST/python-novaclient
+KEYSTONECLIENT_DIR=$DEST/python-keystoneclient
 OPENSTACKX_DIR=$DEST/openstackx
 NOVNC_DIR=$DEST/noVNC
 SWIFT_DIR=$DEST/swift
@@ -157,12 +180,18 @@
 
 # Default Quantum Plugin
 Q_PLUGIN=${Q_PLUGIN:-openvswitch}
+# Default Quantum Port
+Q_PORT=${Q_PORT:-9696}
+# Default Quantum Host
+Q_HOST=${Q_HOST:-localhost}
 
 # Specify which services to launch.  These generally correspond to screen tabs
 ENABLED_SERVICES=${ENABLED_SERVICES:-g-api,g-reg,key,n-api,n-cpu,n-net,n-sch,n-vnc,horizon,mysql,rabbit,openstackx}
 
 # Name of the lvm volume group to use/create for iscsi volumes
 VOLUME_GROUP=${VOLUME_GROUP:-nova-volumes}
+VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
+INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-}
 
 # Nova hypervisor configuration.  We default to libvirt whth  **kvm** but will
 # drop back to **qemu** if we are unable to load the kvm module.  Stack.sh can
@@ -186,6 +215,14 @@
     fi
 fi
 
+# Allow the use of an alternate hostname (such as localhost/127.0.0.1) for service endpoints.
+SERVICE_HOST=${SERVICE_HOST:-$HOST_IP}
+
+# Configure services to syslog instead of writing to individual log files
+SYSLOG=`trueorfalse False $SYSLOG`
+SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP}
+SYSLOG_PORT=${SYSLOG_PORT:-516}
+
 # Service startup timeout
 SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60}
 
@@ -210,12 +247,17 @@
         echo '################################################################################'
         echo $msg
         echo '################################################################################'
-        echo "This value will be written to your localrc file so you don't have to enter it again."
-        echo "It is probably best to avoid spaces and weird characters."
+        echo "This value will be written to your localrc file so you don't have to enter it "
+        echo "again.  Use only alphanumeric characters."
         echo "If you leave this blank, a random default value will be used."
-        echo "Enter a password now:"
-        read $var
-        pw=${!var}
+        pw=" "
+        while true; do
+            echo "Enter a password now:"
+            read -e $var
+            pw=${!var}
+            [[ "$pw" = "`echo $pw | tr -cd [:alnum:]`" ]] && break
+            echo "Invalid chars in password.  Try again:"
+        done
         if [ ! $pw ]; then
             pw=`openssl rand -hex 10`
         fi
@@ -237,7 +279,7 @@
 FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256}
 FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.224/28}
 NET_MAN=${NET_MAN:-FlatDHCPManager}
-EC2_DMZ_HOST=${EC2_DMZ_HOST:-$HOST_IP}
+EC2_DMZ_HOST=${EC2_DMZ_HOST:-$SERVICE_HOST}
 FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-br100}
 VLAN_INTERFACE=${VLAN_INTERFACE:-$PUBLIC_INTERFACE}
 
@@ -266,8 +308,9 @@
 
 # Using Quantum networking:
 #
-# Make sure that q-svc is enabled in ENABLED_SERVICES.  If it is the network
-# manager will be set to the QuantumManager.
+# Make sure that quantum is enabled in ENABLED_SERVICES.  If it is the network
+# manager will be set to the QuantumManager.  If you want to run Quantum on
+# this host, make sure that q-svc is also in ENABLED_SERVICES.
 #
 # If you're planning to use the Quantum openvswitch plugin, set Q_PLUGIN to
 # "openvswitch" and make sure the q-agt service is enabled in
@@ -299,7 +342,7 @@
 read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT."
 
 # Glance connection info.  Note the port must be specified.
-GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$HOST_IP:9292}
+GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$SERVICE_HOST:9292}
 
 # SWIFT
 # -----
@@ -378,14 +421,18 @@
 # - We are parsing the packages files and detecting metadatas.
 #  - If there is a NOPRIME as comment mean we are not doing the install
 #    just yet.
-#  - If we have the meta-keyword distro:DISTRO or
-#    distro:DISTRO1,DISTRO2 it will be installed only for those
+#  - If we have the meta-keyword dist:DISTRO or
+#    dist:DISTRO1,DISTRO2 it will be installed only for those
 #    distros (case insensitive).
 function get_packages() {
     local file_to_parse="general"
     local service
 
     for service in ${ENABLED_SERVICES//,/ }; do
+        # Allow individual services to specify dependencies
+        if [[ -e $FILES/apts/${service} ]]; then
+            file_to_parse="${file_to_parse} $service"
+        fi
         if [[ $service == n-* ]]; then
             if [[ ! $file_to_parse =~ nova ]]; then
                 file_to_parse="${file_to_parse} nova"
@@ -398,8 +445,6 @@
             if [[ ! $file_to_parse =~ keystone ]]; then
                 file_to_parse="${file_to_parse} keystone"
             fi
-        elif [[ -e $FILES/apts/${service} ]]; then
-            file_to_parse="${file_to_parse} $service"
         fi
     done
 
@@ -430,42 +475,57 @@
     done
 }
 
+function pip_install {
+    [[ "$OFFLINE" = "True" ]] && return
+    sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install --use-mirrors $@
+}
+
 # install apt requirements
 apt_get update
 apt_get install $(get_packages)
 
 # install python requirements
-sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install --use-mirrors `cat $FILES/pips/*`
+pip_install `cat $FILES/pips/* | uniq`
 
 # git clone only if directory doesn't exist already.  Since ``DEST`` might not
 # be owned by the installation user, we create the directory and change the
 # ownership to the proper user.
 function git_clone {
+    [[ "$OFFLINE" = "True" ]] && return
 
     GIT_REMOTE=$1
     GIT_DEST=$2
     GIT_BRANCH=$3
 
-    # do a full clone only if the directory doesn't exist
-    if [ ! -d $GIT_DEST ]; then
-        git clone $GIT_REMOTE $GIT_DEST
-        cd $2
-        # This checkout syntax works for both branches and tags
-        git checkout $GIT_BRANCH
-    elif [[ "$RECLONE" == "yes" ]]; then
-        # if it does exist then simulate what clone does if asked to RECLONE
+    if echo $GIT_BRANCH | egrep -q "^refs"; then
+        # If our branch name is a gerrit style refs/changes/...
+        if [ ! -d $GIT_DEST ]; then
+            git clone $GIT_REMOTE $GIT_DEST
+        fi
         cd $GIT_DEST
-        # set the url to pull from and fetch
-        git remote set-url origin $GIT_REMOTE
-        git fetch origin
-        # remove the existing ignored files (like pyc) as they cause breakage
-        # (due to the py files having older timestamps than our pyc, so python
-        # thinks the pyc files are correct using them)
-        find $GIT_DEST -name '*.pyc' -delete
-        git checkout -f origin/$GIT_BRANCH
-        # a local branch might not exist
-        git branch -D $GIT_BRANCH || true
-        git checkout -b $GIT_BRANCH
+        git fetch $GIT_REMOTE $GIT_BRANCH && git checkout FETCH_HEAD
+    else
+        # do a full clone only if the directory doesn't exist
+        if [ ! -d $GIT_DEST ]; then
+            git clone $GIT_REMOTE $GIT_DEST
+            cd $GIT_DEST
+            # This checkout syntax works for both branches and tags
+            git checkout $GIT_BRANCH
+        elif [[ "$RECLONE" == "yes" ]]; then
+            # if it does exist then simulate what clone does if asked to RECLONE
+            cd $GIT_DEST
+            # set the url to pull from and fetch
+            git remote set-url origin $GIT_REMOTE
+            git fetch origin
+            # remove the existing ignored files (like pyc) as they cause breakage
+            # (due to the py files having older timestamps than our pyc, so python
+            # thinks the pyc files are correct using them)
+            find $GIT_DEST -name '*.pyc' -delete
+            git checkout -f origin/$GIT_BRANCH
+            # a local branch might not exist
+            git branch -D $GIT_BRANCH || true
+            git checkout -b $GIT_BRANCH
+        fi
     fi
 }
 
@@ -475,9 +535,9 @@
 git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH
 
 # glance, swift middleware and nova api needs keystone middleware
-if [[ "$ENABLED_SERVICES" =~ "key" || 
-      "$ENABLED_SERVICES" =~ "g-api" || 
-      "$ENABLED_SERVICES" =~ "n-api" || 
+if [[ "$ENABLED_SERVICES" =~ "key" ||
+      "$ENABLED_SERVICES" =~ "g-api" ||
+      "$ENABLED_SERVICES" =~ "n-api" ||
       "$ENABLED_SERVICES" =~ "swift" ]]; then
     # unified auth system (manages accounts/tokens)
     git_clone $KEYSTONE_REPO $KEYSTONE_DIR $KEYSTONE_BRANCH
@@ -500,13 +560,14 @@
 if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then
     # django powered web control panel for openstack
     git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH $HORIZON_TAG
+    git_clone $KEYSTONECLIENT_REPO $KEYSTONECLIENT_DIR $KEYSTONECLIENT_BRANCH
 fi
 if [[ "$ENABLED_SERVICES" =~ "openstackx" ]]; then
     # openstackx is a collection of extensions to openstack.compute & nova
     # that is *deprecated*.  The code is being moved into python-novaclient & nova.
     git_clone $OPENSTACKX_REPO $OPENSTACKX_DIR $OPENSTACKX_BRANCH
 fi
-if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then
+if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then
     # quantum
     git_clone $QUANTUM_REPO $QUANTUM_DIR $QUANTUM_BRANCH
 fi
@@ -517,9 +578,9 @@
 
 # setup our checkouts so they are installed into python path
 # allowing ``import nova`` or ``import glance.client``
-if [[ "$ENABLED_SERVICES" =~ "key" || 
-      "$ENABLED_SERVICES" =~ "g-api" || 
-      "$ENABLED_SERVICES" =~ "n-api" || 
+if [[ "$ENABLED_SERVICES" =~ "key" ||
+      "$ENABLED_SERVICES" =~ "g-api" ||
+      "$ENABLED_SERVICES" =~ "n-api" ||
       "$ENABLED_SERVICES" =~ "swift" ]]; then
     cd $KEYSTONE_DIR; sudo python setup.py develop
 fi
@@ -537,16 +598,35 @@
     cd $OPENSTACKX_DIR; sudo python setup.py develop
 fi
 if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then
-    cd $HORIZON_DIR/django-openstack; sudo python setup.py develop
+    cd $KEYSTONECLIENT_DIR; sudo python setup.py develop
+    cd $HORIZON_DIR/horizon; sudo python setup.py develop
     cd $HORIZON_DIR/openstack-dashboard; sudo python setup.py develop
 fi
-if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then
+if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then
     cd $QUANTUM_DIR; sudo python setup.py develop
 fi
 
-# Add a useful screenrc.  This isn't required to run openstack but is we do
-# it since we are going to run the services in screen for simple
-cp $FILES/screenrc ~/.screenrc
+# Syslog
+# ---------
+
+if [[ $SYSLOG != "False" ]]; then
+    apt_get install -y rsyslog-relp
+    if [[ "$SYSLOG_HOST" = "$HOST_IP" ]]; then
+        # Configure the master host to receive
+        cat <<EOF >/tmp/90-stack-m.conf
+\$ModLoad imrelp
+\$InputRELPServerRun $SYSLOG_PORT
+EOF
+        sudo mv /tmp/90-stack-m.conf /etc/rsyslog.d
+    else
+        # Set rsyslog to send to remote host
+        cat <<EOF >/tmp/90-stack-s.conf
+*.*		:omrelp:$SYSLOG_HOST:$SYSLOG_PORT
+EOF
+        sudo mv /tmp/90-stack-s.conf /etc/rsyslog.d
+    fi
+    sudo /usr/sbin/service rsyslog restart
+fi
 
 # Rabbit
 # ---------
@@ -618,12 +698,18 @@
 
 
     # ``local_settings.py`` is used to override horizon default settings.
-    cp $FILES/horizon_settings.py $HORIZON_DIR/openstack-dashboard/local/local_settings.py
+    local_settings=$HORIZON_DIR/openstack-dashboard/local/local_settings.py
+    cp $FILES/horizon_settings.py $local_settings
+
+    # Enable quantum in dashboard, if requested
+    if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then
+        sudo sed -e "s,QUANTUM_ENABLED = False,QUANTUM_ENABLED = True,g" -i $local_settings
+    fi
 
     # Initialize the horizon database (it stores sessions and notices shown to
     # users).  The user system is external (keystone).
     cd $HORIZON_DIR/openstack-dashboard
-    dashboard/manage.py syncdb
+    python manage.py syncdb
 
     # create an empty directory that apache uses as docroot
     sudo mkdir -p $HORIZON_DIR/.blackhole
@@ -678,10 +764,23 @@
     sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $NOVA_DIR/bin/nova-api-paste.ini
 fi
 
+# Helper to clean iptables rules
+function clean_iptables() {
+    # Delete rules
+    sudo iptables -S -v | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-A" |  sed "s/-A/-D/g" | awk '{print "sudo iptables",$0}' | bash
+    # Delete nat rules
+    sudo iptables -S -v -t nat | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" |  grep "\-A" | sed "s/-A/-D/g" | awk '{print "sudo iptables -t nat",$0}' | bash
+    # Delete chains
+    sudo iptables -S -v | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" | grep "\-N" |  sed "s/-N/-X/g" | awk '{print "sudo iptables",$0}' | bash
+    # Delete nat chains
+    sudo iptables -S -v -t nat | sed "s/-c [0-9]* [0-9]* //g" | grep "nova" |  grep "\-N" | sed "s/-N/-X/g" | awk '{print "sudo iptables -t nat",$0}' | bash
+}
+
 if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then
 
     # Virtualization Configuration
     # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+    apt_get install libvirt-bin
 
     # attempt to load modules: network block device - used to manage qcow images
     sudo modprobe nbd || true
@@ -690,7 +789,6 @@
     # kvm, we drop back to the slower emulation mode (qemu).  Note: many systems
     # come with hardware virtualization disabled in BIOS.
     if [[ "$LIBVIRT_TYPE" == "kvm" ]]; then
-        apt_get install libvirt-bin
         sudo modprobe kvm || true
         if [ ! -e /dev/kvm ]; then
             echo "WARNING: Switching to QEMU"
@@ -702,15 +800,17 @@
     # splitting a system into many smaller parts.  LXC uses cgroups and chroot
     # to simulate multiple systems.
     if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then
-        apt_get install lxc
-        # lxc uses cgroups (a kernel interface via virtual filesystem) configured
-        # and mounted to ``/cgroup``
-        sudo mkdir -p /cgroup
-        if ! grep -q cgroup /etc/fstab; then
-            echo none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0 | sudo tee -a /etc/fstab
-        fi
-        if ! mount -n | grep -q cgroup; then
-            sudo mount /cgroup
+        if [[ "$DISTRO" > natty ]]; then
+            apt_get install cgroup-lite
+        else
+            cgline="none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0"
+            sudo mkdir -p /cgroup
+            if ! grep -q cgroup /etc/fstab; then
+                echo "$cgline" | sudo tee -a /etc/fstab
+            fi
+            if ! mount -n | grep -q cgroup; then
+                sudo mount /cgroup
+            fi
         fi
     fi
 
@@ -739,13 +839,24 @@
         fi
     fi
 
+    # Clean iptables from previous runs
+    clean_iptables
+
+    # Destroy old instances
+    instances=`virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"`
+    if [ ! $instances = "" ]; then
+        echo $instances | xargs -n1 virsh destroy || true
+        echo $instances | xargs -n1 virsh undefine || true
+    fi
+
     # Clean out the instances directory.
     sudo rm -rf $NOVA_DIR/instances/*
 fi
 
 if [[ "$ENABLED_SERVICES" =~ "n-net" ]]; then
-    # delete traces of nova networks from prior runs
+    # Delete traces of nova networks from prior runs
     sudo killall dnsmasq || true
+    clean_iptables
     rm -rf $NOVA_DIR/networks
     mkdir -p $NOVA_DIR/networks
 fi
@@ -904,12 +1015,29 @@
 
     apt_get install iscsitarget-dkms iscsitarget
 
-    if ! sudo vgdisplay | grep -q $VOLUME_GROUP; then
+    if ! sudo vgs $VOLUME_GROUP; then
         VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DEST/nova-volumes-backing-file}
         VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-2052M}
-        truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE
+        # Only create if the file doesn't already exists
+        [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE
         DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE`
-        sudo vgcreate $VOLUME_GROUP $DEV
+        # Only create if the loopback device doesn't contain $VOLUME_GROUP
+        if ! sudo vgs $VOLUME_GROUP; then sudo vgcreate $VOLUME_GROUP $DEV; fi
+    fi
+
+    if sudo vgs $VOLUME_GROUP; then
+        # Clean out existing volumes
+        for lv in `sudo lvs --noheadings -o lv_name $VOLUME_GROUP`; do
+            # VOLUME_NAME_PREFIX prefixes the LVs we want
+            if [[ "${lv#$VOLUME_NAME_PREFIX}" != "$lv" ]]; then
+                tid=`egrep "^tid.+$lv" /proc/net/iet/volume | cut -f1 -d' ' | tr ':' '='`
+                if [[ -n "$tid" ]]; then
+                    lun=`egrep "lun.+$lv" /proc/net/iet/volume | cut -f1 -d' ' | tr ':' '=' | tr -d '\t'`
+                    sudo ietadm --op delete --$tid --$lun
+                fi
+                sudo lvremove -f $VOLUME_GROUP/$lv
+            fi
+        done
     fi
 
     # Configure iscsitarget
@@ -928,28 +1056,36 @@
 add_nova_flag "--scheduler_driver=$SCHEDULER"
 add_nova_flag "--dhcpbridge_flagfile=$NOVA_DIR/bin/nova.conf"
 add_nova_flag "--fixed_range=$FIXED_RANGE"
-if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then
+if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then
     add_nova_flag "--network_manager=nova.network.quantum.manager.QuantumManager"
-    if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
+    add_nova_flag "--quantum_connection_host=$Q_HOST"
+    add_nova_flag "--quantum_connection_port=$Q_PORT"
+    if [[ "$ENABLED_SERVICES" =~ "q-svc" && "$Q_PLUGIN" = "openvswitch" ]]; then
         add_nova_flag "--libvirt_vif_type=ethernet"
         add_nova_flag "--libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtOpenVswitchDriver"
+        add_nova_flag "--linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver"
+        add_nova_flag "--quantum-use-dhcp"
     fi
 else
     add_nova_flag "--network_manager=nova.network.manager.$NET_MAN"
 fi
 if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then
     add_nova_flag "--volume_group=$VOLUME_GROUP"
+    add_nova_flag "--volume_name_template=${VOLUME_NAME_PREFIX}%08x"
 fi
 add_nova_flag "--my_ip=$HOST_IP"
 add_nova_flag "--public_interface=$PUBLIC_INTERFACE"
 add_nova_flag "--vlan_interface=$VLAN_INTERFACE"
 add_nova_flag "--sql_connection=$BASE_SQL_CONN/nova"
 add_nova_flag "--libvirt_type=$LIBVIRT_TYPE"
+add_nova_flag "--instance_name_template=${INSTANCE_NAME_PREFIX}%08x"
 if [[ "$ENABLED_SERVICES" =~ "openstackx" ]]; then
-    add_nova_flag "--osapi_extensions_path=$OPENSTACKX_DIR/extensions"
+    add_nova_flag "--osapi_extension=nova.api.openstack.v2.contrib.standard_extensions"
+    add_nova_flag "--osapi_extension=extensions.admin.Admin"
 fi
 if [[ "$ENABLED_SERVICES" =~ "n-vnc" ]]; then
-    add_nova_flag "--vncproxy_url=http://$HOST_IP:6080"
+    VNCPROXY_URL=${VNCPROXY_URL:-"http://$SERVICE_HOST:6080"}
+    add_nova_flag "--vncproxy_url=$VNCPROXY_URL"
     add_nova_flag "--vncproxy_wwwroot=$NOVNC_DIR/"
 fi
 add_nova_flag "--api_paste_config=$NOVA_DIR/bin/nova-api-paste.ini"
@@ -973,7 +1109,7 @@
 # You can define extra nova conf flags by defining the array EXTRA_FLAGS,
 # For Example: EXTRA_FLAGS=(--foo --bar=2)
 for I in "${EXTRA_FLAGS[@]}"; do
-    add_nova_flag $i
+    add_nova_flag $I
 done
 
 # XenServer
@@ -1020,7 +1156,7 @@
     mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS keystone;'
     mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE keystone;'
 
-    # FIXME (anthony) keystone should use keystone.conf.example
+    # Configure keystone.conf
     KEYSTONE_CONF=$KEYSTONE_DIR/etc/keystone.conf
     cp $FILES/keystone.conf $KEYSTONE_CONF
     sudo sed -e "s,%SQL_CONN%,$BASE_SQL_CONN/keystone,g" -i $KEYSTONE_CONF
@@ -1029,11 +1165,19 @@
     # keystone_data.sh creates our admin user and our ``SERVICE_TOKEN``.
     KEYSTONE_DATA=$KEYSTONE_DIR/bin/keystone_data.sh
     cp $FILES/keystone_data.sh $KEYSTONE_DATA
-    sudo sed -e "s,%HOST_IP%,$HOST_IP,g" -i $KEYSTONE_DATA
+    sudo sed -e "s,%SERVICE_HOST%,$SERVICE_HOST,g" -i $KEYSTONE_DATA
     sudo sed -e "s,%SERVICE_TOKEN%,$SERVICE_TOKEN,g" -i $KEYSTONE_DATA
     sudo sed -e "s,%ADMIN_PASSWORD%,$ADMIN_PASSWORD,g" -i $KEYSTONE_DATA
     # initialize keystone with default users/endpoints
-    BIN_DIR=$KEYSTONE_DIR/bin bash $KEYSTONE_DATA
+    ENABLED_SERVICES=$ENABLED_SERVICES BIN_DIR=$KEYSTONE_DIR/bin bash $KEYSTONE_DATA
+
+    if [ "$SYSLOG" != "False" ]; then
+        sed -i -e '/^handlers=devel$/s/=devel/=production/' \
+            $KEYSTONE_DIR/etc/logging.cnf
+        sed -i -e "/^log_file/s/log_file/\#log_file/" \
+            $KEYSTONE_DIR/etc/keystone.conf
+        KEYSTONE_LOG_CONFIG="--log-config $KEYSTONE_DIR/etc/logging.cnf"
+    fi
 fi
 
 
@@ -1065,6 +1209,8 @@
 # create a new named screen to run processes in
 screen -d -m -S stack -t stack
 sleep 1
+# set a reasonable statusbar
+screen -r stack -X hardstatus alwayslastline "%-Lw%{= BW}%50>%n%f* %t%{-}%+Lw%< %= %H"
 
 # launch the glance registry service
 if [[ "$ENABLED_SERVICES" =~ "g-reg" ]]; then
@@ -1083,7 +1229,7 @@
 
 # launch the keystone and wait for it to answer before continuing
 if [[ "$ENABLED_SERVICES" =~ "key" ]]; then
-    screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone --config-file $KEYSTONE_CONF -d"
+    screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d"
     echo "Waiting for keystone to start..."
     if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget -q -O- http://127.0.0.1:5000; do sleep 1; done"; then
       echo "keystone did not start"
@@ -1101,26 +1247,24 @@
     fi
 fi
 
-# Quantum
+# Quantum service
 if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then
-    # Install deps
-    # FIXME add to files/apts/quantum, but don't install if not needed!
-    apt_get install openvswitch-switch openvswitch-datapath-dkms
-
-    # Create database for the plugin/agent
     if [[ "$Q_PLUGIN" = "openvswitch" ]]; then
+        # Install deps
+        # FIXME add to files/apts/quantum, but don't install if not needed!
+        apt_get install openvswitch-switch openvswitch-datapath-dkms
+        # Create database for the plugin/agent
         if [[ "$ENABLED_SERVICES" =~ "mysql" ]]; then
             mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE IF NOT EXISTS ovs_quantum;'
         else
             echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin."
             exit 1
         fi
+        QUANTUM_PLUGIN_INI_FILE=$QUANTUM_DIR/etc/plugins.ini
+        # Make sure we're using the openvswitch plugin
+        sed -i -e "s/^provider =.*$/provider = quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin/g" $QUANTUM_PLUGIN_INI_FILE
     fi
-
-    QUANTUM_PLUGIN_INI_FILE=$QUANTUM_DIR/quantum/plugins.ini
-    # Make sure we're using the openvswitch plugin
-    sed -i -e "s/^provider =.*$/provider = quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPlugin/g" $QUANTUM_PLUGIN_INI_FILE
-    screen_it q-svc "cd $QUANTUM_DIR && export PYTHONPATH=.:$PYTHONPATH; python $QUANTUM_DIR/bin/quantum $QUANTUM_DIR/etc/quantum.conf"
+    screen_it q-svc "cd $QUANTUM_DIR && PYTHONPATH=.:$PYTHONPATH python $QUANTUM_DIR/bin/quantum-server $QUANTUM_DIR/etc/quantum.conf"
 fi
 
 # Quantum agent (for compute nodes)
@@ -1134,7 +1278,7 @@
     fi
 
     # Start up the quantum <-> openvswitch agent
-    screen_it q-agt "sleep 4; sudo python $QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py $QUANTUM_DIR/quantum/plugins/openvswitch/ovs_quantum_plugin.ini -v"
+    screen_it q-agt "sleep 4; sudo python $QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py $QUANTUM_DIR/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini -v"
 fi
 
 # If we're using Quantum (i.e. q-svc is enabled), network creation has to
@@ -1204,20 +1348,55 @@
     for image_url in ${IMAGE_URLS//,/ }; do
         # Downloads the image (uec ami+aki style), then extracts it.
         IMAGE_FNAME=`basename "$image_url"`
-        IMAGE_NAME=`basename "$IMAGE_FNAME" .tar.gz`
         if [ ! -f $FILES/$IMAGE_FNAME ]; then
             wget -c $image_url -O $FILES/$IMAGE_FNAME
         fi
 
-        # Extract ami and aki files
-        tar -zxf $FILES/$IMAGE_FNAME -C $FILES/images
+        KERNEL=""
+        RAMDISK=""
+        case "$IMAGE_FNAME" in
+            *.tar.gz|*.tgz)
+                # Extract ami and aki files
+                [ "${IMAGE_FNAME%.tar.gz}" != "$IMAGE_FNAME" ] &&
+                    IMAGE_NAME="${IMAGE_FNAME%.tar.gz}" ||
+                    IMAGE_NAME="${IMAGE_FNAME%.tgz}"
+                xdir="$FILES/images/$IMAGE_NAME"
+                rm -Rf "$xdir";
+                mkdir "$xdir"
+                tar -zxf $FILES/$IMAGE_FNAME -C "$xdir"
+                KERNEL=$(for f in "$xdir/"*-vmlinuz*; do
+                         [ -f "$f" ] && echo "$f" && break; done; true)
+                RAMDISK=$(for f in "$xdir/"*-initrd*; do
+                         [ -f "$f" ] && echo "$f" && break; done; true)
+                IMAGE=$(for f in "$xdir/"*.img; do
+                         [ -f "$f" ] && echo "$f" && break; done; true)
+                [ -n "$IMAGE_NAME" ]
+                IMAGE_NAME=$(basename "$IMAGE" ".img")
+                ;;
+            *.img)
+                IMAGE="$FILES/$IMAGE_FNAME";
+                IMAGE_NAME=$(basename "$IMAGE" ".img")
+                ;;
+            *.img.gz)
+                IMAGE="$FILES/${IMAGE_FNAME}"
+                IMAGE_NAME=$(basename "$IMAGE" ".img.gz")
+                ;;
+            *) echo "Do not know what to do with $IMAGE_FNAME"; false;;
+        esac
 
         # Use glance client to add the kernel the root filesystem.
         # We parse the results of the first upload to get the glance ID of the
         # kernel for use when uploading the root filesystem.
-        RVAL=`glance add -A $SERVICE_TOKEN name="$IMAGE_NAME-kernel" is_public=true container_format=aki disk_format=aki < $FILES/images/$IMAGE_NAME-vmlinuz*`
-        KERNEL_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "`
-        glance add -A $SERVICE_TOKEN name="$IMAGE_NAME" is_public=true container_format=ami disk_format=ami kernel_id=$KERNEL_ID < $FILES/images/$IMAGE_NAME.img
+        KERNEL_ID=""; RAMDISK_ID="";
+        if [ -n "$KERNEL" ]; then
+            RVAL=`glance add -A $SERVICE_TOKEN name="$IMAGE_NAME-kernel" is_public=true container_format=aki disk_format=aki < "$KERNEL"`
+            KERNEL_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "`
+        fi
+        if [ -n "$RAMDISK" ]; then
+            RVAL=`glance add -A $SERVICE_TOKEN name="$IMAGE_NAME-ramdisk" is_public=true container_format=ari disk_format=ari < "$RAMDISK"`
+            RAMDISK_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "`
+        fi
+        glance add -A $SERVICE_TOKEN name="${IMAGE_NAME%.img}" is_public=true container_format=ami disk_format=ami ${KERNEL_ID:+kernel_id=$KERNEL_ID} ${RAMDISK_ID:+ramdisk_id=$RAMDISK_ID} < <(zcat --force "${IMAGE}")
     done
 fi
 
@@ -1241,18 +1420,21 @@
 # If you installed the horizon on this server, then you should be able
 # to access the site using your browser.
 if [[ "$ENABLED_SERVICES" =~ "horizon" ]]; then
-    echo "horizon is now available at http://$HOST_IP/"
+    echo "horizon is now available at http://$SERVICE_HOST/"
 fi
 
 # If keystone is present, you can point nova cli to this server
 if [[ "$ENABLED_SERVICES" =~ "key" ]]; then
-    echo "keystone is serving at http://$HOST_IP:5000/v2.0/"
+    echo "keystone is serving at http://$SERVICE_HOST:5000/v2.0/"
     echo "examples on using novaclient command line is in exercise.sh"
     echo "the default users are: admin and demo"
     echo "the password: $ADMIN_PASSWORD"
 fi
 
-# indicate how long this took to run (bash maintained variable 'SECONDS')
+# Echo HOST_IP - useful for build_uec.sh, which uses dhcp to give the instance an address
+echo "This is your host ip: $HOST_IP"
+
+# Indicate how long this took to run (bash maintained variable 'SECONDS')
 echo "stack.sh completed in $SECONDS seconds."
 
 ) | tee -a "$LOGFILE"
diff --git a/stackrc b/stackrc
index 6a56a2a..9bc3be6 100644
--- a/stackrc
+++ b/stackrc
@@ -1,10 +1,10 @@
 # compute service
 NOVA_REPO=https://github.com/openstack/nova.git
-NOVA_BRANCH=stable/diablo
+NOVA_BRANCH=master
 
 # storage service
 SWIFT_REPO=https://github.com/openstack/swift.git
-SWIFT_BRANCH=stable/diablo
+SWIFT_BRANCH=master
 
 # swift and keystone integration
 SWIFT_KEYSTONE_REPO=https://github.com/cloudbuilders/swift-keystone2.git
@@ -12,7 +12,7 @@
 
 # image catalog service
 GLANCE_REPO=https://github.com/openstack/glance.git
-GLANCE_BRANCH=stable/diablo
+GLANCE_BRANCH=master
 
 # unified auth system (manages accounts/tokens)
 KEYSTONE_REPO=https://github.com/openstack/keystone.git
@@ -20,31 +20,54 @@
 
 # a websockets/html5 or flash powered VNC console for vm instances
 NOVNC_REPO=https://github.com/cloudbuilders/noVNC.git
-NOVNC_BRANCH=diablo
+NOVNC_BRANCH=master
 
 # django powered web control panel for openstack
 HORIZON_REPO=https://github.com/openstack/horizon.git
-HORIZON_BRANCH=stable/diablo
+HORIZON_BRANCH=master
 
 # python client library to nova that horizon (and others) use
 NOVACLIENT_REPO=https://github.com/openstack/python-novaclient.git
 NOVACLIENT_BRANCH=master
 
+# python keystone client library to nova that horizon uses
+KEYSTONECLIENT_REPO=https://github.com/openstack/python-keystoneclient
+KEYSTONECLIENT_BRANCH=master
+
 # openstackx is a collection of extensions to openstack.compute & nova
 # that is *deprecated*.  The code is being moved into python-novaclient & nova.
 OPENSTACKX_REPO=https://github.com/cloudbuilders/openstackx.git
-OPENSTACKX_BRANCH=diablo
+OPENSTACKX_BRANCH=master
 
 # quantum service
 QUANTUM_REPO=https://github.com/openstack/quantum
-QUANTUM_BRANCH=stable/diablo
+QUANTUM_BRANCH=master
 
 # CI test suite
-CITEST_REPO=https://github.com/openstack/openstack-integration-tests.git
+CITEST_REPO=https://github.com/openstack/tempest.git
 CITEST_BRANCH=master
 
 # Specify a comma-separated list of uec images to download and install into glance.
-IMAGE_URLS=http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz
+# supported urls here are:
+#  * "uec-style" images:
+#     If the file ends in .tar.gz, uncompress the tarball and and select the first
+#     .img file inside it as the image.  If present, use "*-vmlinuz*" as the kernel
+#     and "*-initrd*" as the ramdisk
+#     example: http://cloud-images.ubuntu.com/releases/oneiric/release/ubuntu-11.10-server-cloudimg-amd64.tar.gz
+#  * disk image (*.img,*.img.gz)
+#    if file ends in .img, then it will be uploaded and registered as a to
+#    glance as a disk image.  If it ends in .gz, it is uncompressed first.
+#    example: 
+#      http://cloud-images.ubuntu.com/releases/oneiric/release/ubuntu-11.10-server-cloudimg-armel-disk1.img
+#      http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-rootfs.img.gz
+#IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image
+#IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img" # cirros full disk image
+case "$LIBVIRT_TYPE" in
+    lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc
+        IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-rootfs.img.gz";;
+    *)  # otherwise, use the uec style image (with kernel, ramdisk, disk)
+        IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-uec.tar.gz";;
+esac
 
 # allow local overrides of env variables
 if [ -f ./localrc ]; then
diff --git a/tools/build_ci_config.sh b/tools/build_ci_config.sh
index 8eed8ec..90b8abf 100755
--- a/tools/build_ci_config.sh
+++ b/tools/build_ci_config.sh
@@ -1,23 +1,19 @@
 #!/usr/bin/env bash
 #
-# build_ci_config.sh - Build a config.ini for openstack-integration-tests
-#                      (https://github.com/openstack/openstack-integration-tests)
+# build_ci_config.sh - Build a config.ini for tempest (openstack-integration-tests)
+#                      (https://github.com/openstack/tempest.git)
 
 function usage {
-    echo "$0 - Build config.ini for openstack-integration-tests"
+    echo "$0 - Build config.ini for tempest"
     echo ""
-    echo "Usage: $0 configdir"
+    echo "Usage: $0 [configdir]"
     exit 1
 }
 
-if [ ! "$#" -eq "1" ]; then
+if [ "$1" = "-h" ]; then
     usage
 fi
 
-CONFIG_DIR=$1
-CONFIG_CONF=$CONFIG_DIR/storm.conf
-CONFIG_INI=$CONFIG_DIR/config.ini
-
 # Clean up any resources that may be in use
 cleanup() {
     set +o errexit
@@ -53,8 +49,51 @@
 # Where Openstack code lives
 DEST=${DEST:-/opt/stack}
 
+CITEST_DIR=$DEST/tempest
+
+CONFIG_DIR=${1:-$CITEST_DIR/etc}
+CONFIG_CONF=$CONFIG_DIR/storm.conf
+CONFIG_INI=$CONFIG_DIR/config.ini
+
 DIST_NAME=${DIST_NAME:-oneiric}
 
+# git clone only if directory doesn't exist already.  Since ``DEST`` might not
+# be owned by the installation user, we create the directory and change the
+# ownership to the proper user.
+function git_clone {
+
+    GIT_REMOTE=$1
+    GIT_DEST=$2
+    GIT_BRANCH=$3
+
+    # do a full clone only if the directory doesn't exist
+    if [ ! -d $GIT_DEST ]; then
+        git clone $GIT_REMOTE $GIT_DEST
+        cd $2
+        # This checkout syntax works for both branches and tags
+        git checkout $GIT_BRANCH
+    elif [[ "$RECLONE" == "yes" ]]; then
+        # if it does exist then simulate what clone does if asked to RECLONE
+        cd $GIT_DEST
+        # set the url to pull from and fetch
+        git remote set-url origin $GIT_REMOTE
+        git fetch origin
+        # remove the existing ignored files (like pyc) as they cause breakage
+        # (due to the py files having older timestamps than our pyc, so python
+        # thinks the pyc files are correct using them)
+        find $GIT_DEST -name '*.pyc' -delete
+        git checkout -f origin/$GIT_BRANCH
+        # a local branch might not exist
+        git branch -D $GIT_BRANCH || true
+        git checkout -b $GIT_BRANCH
+    fi
+}
+
+# Install tests and prerequisites
+sudo PIP_DOWNLOAD_CACHE=/var/cache/pip pip install --use-mirrors `cat $TOP_DIR/files/pips/tempest`
+
+git_clone $CITEST_REPO $CITEST_DIR $CITEST_BRANCH
+
 if [ ! -f $DEST/.ramdisk ]; then
     # Process network configuration vars
     GUEST_NETWORK=${GUEST_NETWORK:-1}
@@ -160,7 +199,7 @@
 apiver = v2.0
 user = admin
 password = $ADMIN_PASSWORD
-tenant_id = 1
+tenant_name = admin
 
 [nova]
 host = $HOST_IP
diff --git a/tools/build_uec.sh b/tools/build_uec.sh
index 8167105..04e1a45 100755
--- a/tools/build_uec.sh
+++ b/tools/build_uec.sh
@@ -185,17 +185,6 @@
 sed -i "s/127.0.0.1/127.0.0.1 \`hostname\`/" /etc/hosts
 apt-get update
 apt-get install git sudo -y
-if [ ! -d devstack ]; then
-    git clone https://github.com/cloudbuilders/devstack.git
-    cd devstack
-    git remote set-url origin `cd $TOP_DIR; git remote show origin | grep Fetch | awk '{print $3}'`
-    git fetch
-    git checkout `git rev-parse HEAD`
-    cat > localrc <<LOCAL_EOF
-ROOTSLEEP=0
-`cat $TOP_DIR/localrc`
-LOCAL_EOF
-fi
 # Disable byobu
 sudo apt-get remove -y byobu
 EOF
@@ -205,6 +194,14 @@
     PUB_KEY=`cat  ~/.ssh/id_rsa.pub`
     cat >> $vm_dir/uec/user-data<<EOF
 mkdir -p /opt/stack
+if [ ! -d /opt/stack/devstack ]; then
+    git clone https://github.com/cloudbuilders/devstack.git /opt/stack/devstack
+    cd /opt/stack/devstack
+    cat > localrc <<LOCAL_EOF
+ROOTSLEEP=0
+`cat $TOP_DIR/localrc`
+LOCAL_EOF
+fi
 useradd -U -G sudo -s /bin/bash -d /opt/stack -m stack
 echo stack:pass | chpasswd
 mkdir -p /opt/stack/.ssh
@@ -222,7 +219,7 @@
 
 # Run stack.sh
 cat >> $vm_dir/uec/user-data<<EOF
-./stack.sh
+su -c "cd /opt/stack/devstack && ./stack.sh" stack
 EOF
 
 # (re)start a metadata service
diff --git a/tools/build_uec_ramdisk.sh b/tools/build_uec_ramdisk.sh
index 0c27734..3bd704b 100755
--- a/tools/build_uec_ramdisk.sh
+++ b/tools/build_uec_ramdisk.sh
@@ -149,7 +149,7 @@
 git_clone $HORIZON_REPO $DEST/horizon $HORIZON_BRANCH
 git_clone $NOVACLIENT_REPO $DEST/python-novaclient $NOVACLIENT_BRANCH
 git_clone $OPENSTACKX_REPO $DEST/openstackx $OPENSTACKX_BRANCH
-git_clone $CITEST_REPO $DEST/openstack-integration-tests $CITEST_BRANCH
+git_clone $CITEST_REPO $DEST/tempest $CITEST_BRANCH
 
 # Use this version of devstack
 rm -rf $MNT_DIR/$DEST/devstack
diff --git a/tools/get_uec_image.sh b/tools/get_uec_image.sh
index 7fa920e..f66f2bc 100755
--- a/tools/get_uec_image.sh
+++ b/tools/get_uec_image.sh
@@ -10,6 +10,7 @@
 
 # exit on error to stop unexpected errors
 set -o errexit
+set -o xtrace
 
 usage() {
     echo "Usage: $0 - Fetch and prepare Ubuntu images"
diff --git a/tools/install_openvpn.sh b/tools/install_openvpn.sh
index ff88a06..44eee72 100755
--- a/tools/install_openvpn.sh
+++ b/tools/install_openvpn.sh
@@ -11,24 +11,41 @@
 # --client mode creates a tarball of a client configuration for this server
 
 # Get config file
-if [ -e localrc.vpn ]; then
-    . localrc.vpn
+if [ -e localrc ]; then
+    . localrc
 fi
+if [ -e vpnrc ]; then
+    . vpnrc
+fi
+
+# Do some IP manipulation
+function cidr2netmask() {
+    set -- $(( 5 - ($1 / 8) )) 255 255 255 255 $(( (255 << (8 - ($1 % 8))) & 255 )) 0 0 0
+    if [[ $1 -gt 1 ]]; then
+        shift $1
+    else
+        shift
+    fi
+    echo ${1-0}.${2-0}.${3-0}.${4-0}
+}
+
+FIXED_NET=`echo $FIXED_RANGE | cut -d'/' -f1`
+FIXED_CIDR=`echo $FIXED_RANGE | cut -d'/' -f2`
+FIXED_MASK=`cidr2netmask $FIXED_CIDR`
 
 # VPN Config
 VPN_SERVER=${VPN_SERVER:-`ifconfig eth0 | awk "/inet addr:/ { print \$2 }" | cut -d: -f2`}  # 50.56.12.212
 VPN_PROTO=${VPN_PROTO:-tcp}
 VPN_PORT=${VPN_PORT:-6081}
-VPN_DEV=${VPN_DEV:-tun}
-VPN_BRIDGE=${VPN_BRIDGE:-br0}
-VPN_CLIENT_NET=${VPN_CLIENT_NET:-172.16.28.0}
-VPN_CLIENT_MASK=${VPN_CLIENT_MASK:-255.255.255.0}
-VPN_CLIENT_DHCP="${VPN_CLIENT_DHCP:-172.16.28.1 172.16.28.254}"
-VPN_LOCAL_NET=${VPN_LOCAL_NET:-10.0.0.0}
-VPN_LOCAL_MASK=${VPN_LOCAL_MASK:-255.255.0.0}
+VPN_DEV=${VPN_DEV:-tap0}
+VPN_BRIDGE=${VPN_BRIDGE:-br100}
+VPN_BRIDGE_IF=${VPN_BRIDGE_IF:-$FLAT_INTERFACE}
+VPN_CLIENT_NET=${VPN_CLIENT_NET:-$FIXED_NET}
+VPN_CLIENT_MASK=${VPN_CLIENT_MASK:-$FIXED_MASK}
+VPN_CLIENT_DHCP="${VPN_CLIENT_DHCP:-net.1 net.254}"
 
 VPN_DIR=/etc/openvpn
-CA_DIR=/etc/openvpn/easy-rsa
+CA_DIR=$VPN_DIR/easy-rsa
 
 usage() {
     echo "$0 - OpenVPN install and certificate generation"
@@ -54,7 +71,16 @@
     cp -pR /usr/share/doc/openvpn/examples/easy-rsa/2.0/ $CA_DIR
 fi
 
-OPWD=`pwd`
+# Keep track of the current directory
+TOOLS_DIR=$(cd $(dirname "$0") && pwd)
+TOP_DIR=$(cd $TOOLS_DIR/.. && pwd)
+
+WEB_DIR=$TOP_DIR/../vpn
+if [[ ! -d $WEB_DIR ]]; then
+    mkdir -p $WEB_DIR
+fi
+WEB_DIR=$(cd $TOP_DIR/../vpn && pwd)
+
 cd $CA_DIR
 source ./vars
 
@@ -87,6 +113,10 @@
 BR="$VPN_BRIDGE"
 TAP="\$1"
 
+if [[ ! -d /sys/class/net/\$BR ]]; then
+    brctl addbr \$BR
+fi
+
 for t in \$TAP; do
     openvpn --mktun --dev \$t
     brctl addif \$BR \$t
@@ -117,10 +147,8 @@
 ca ca.crt
 dh dh1024.pem
 duplicate-cn
-#server $VPN_CLIENT_NET $VPN_CLIENT_MASK
 server-bridge $VPN_CLIENT_NET $VPN_CLIENT_MASK $VPN_CLIENT_DHCP
 ifconfig-pool-persist ipp.txt
-push "route $VPN_LOCAL_NET $VPN_LOCAL_MASK"
 comp-lzo
 user nobody
 group nogroup
@@ -163,9 +191,9 @@
 comp-lzo
 verb 3
 EOF
-    (cd $TMP_DIR; tar cf $OPWD/$NAME.tar *)
+    (cd $TMP_DIR; tar cf $WEB_DIR/$NAME.tar *)
     rm -rf $TMP_DIR
-    echo "Client certificate and configuration is in $OPWD/$NAME.tar"
+    echo "Client certificate and configuration is in $WEB_DIR/$NAME.tar"
 }
 
 # Process command line args
diff --git a/tools/jenkins/adapters/swift.sh b/tools/jenkins/adapters/swift.sh
new file mode 100755
index 0000000..c1362ee
--- /dev/null
+++ b/tools/jenkins/adapters/swift.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+# Echo commands, exit on error
+set -o xtrace
+set -o errexit
+
+TOP_DIR=$(cd ../../.. && pwd)
+HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2`
+ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises &&  ./swift.sh'
diff --git a/tools/jenkins/adapters/volumes.sh b/tools/jenkins/adapters/volumes.sh
new file mode 100755
index 0000000..ec29209
--- /dev/null
+++ b/tools/jenkins/adapters/volumes.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+# Echo commands, exit on error
+set -o xtrace
+set -o errexit
+
+TOP_DIR=$(cd ../../.. && pwd)
+HEAD_IP=`cat $TOP_DIR/addresses | grep HEAD | cut -d "=" -f2`
+ssh stack@$HEAD_IP 'cd devstack && source openrc && cd exercises &&  ./volumes.sh'