Merge "Remove leftover references to files/pips/"
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
index 4c2f279..5ebdecc 100755
--- a/exercises/boot_from_volume.sh
+++ b/exercises/boot_from_volume.sh
@@ -35,7 +35,6 @@
 # Import quantum functions if needed
 if is_service_enabled quantum; then
     source $TOP_DIR/lib/quantum
-    setup_quantum
 fi
 
 # Import exercise configuration
@@ -174,10 +173,6 @@
 # Delete a secgroup
 nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP"
 
-if is_service_enabled quantum; then
-    teardown_quantum
-fi
-
 set +o xtrace
 echo "*********************************************************************"
 echo "SUCCESS: End DevStack Exercise: $0"
diff --git a/exercises/euca.sh b/exercises/euca.sh
index c307a06..67da1be 100755
--- a/exercises/euca.sh
+++ b/exercises/euca.sh
@@ -36,7 +36,6 @@
 # Import quantum functions if needed
 if is_service_enabled quantum; then
     source $TOP_DIR/lib/quantum
-    setup_quantum
 fi
 
 # Import exercise configuration
@@ -175,10 +174,6 @@
 # Delete group
 euca-delete-group $SECGROUP || die "Failure deleting security group $SECGROUP"
 
-if is_service_enabled quantum; then
-    teardown_quantum
-fi
-
 set +o xtrace
 echo "*********************************************************************"
 echo "SUCCESS: End DevStack Exercise: $0"
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index ae5691f..8b18e6f 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -34,7 +34,6 @@
 # Import quantum functions if needed
 if is_service_enabled quantum; then
     source $TOP_DIR/lib/quantum
-    setup_quantum
 fi
 
 # Import exercise configuration
@@ -202,10 +201,6 @@
 # Delete a secgroup
 nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP"
 
-if is_service_enabled quantum; then
-    teardown_quantum
-fi
-
 set +o xtrace
 echo "*********************************************************************"
 echo "SUCCESS: End DevStack Exercise: $0"
diff --git a/exercises/horizon.sh b/exercises/horizon.sh
new file mode 100755
index 0000000..c5dae3a
--- /dev/null
+++ b/exercises/horizon.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+
+# **horizon.sh**
+
+# Sanity check that horizon started if enabled
+
+echo "*********************************************************************"
+echo "Begin DevStack Exercise: $0"
+echo "*********************************************************************"
+
+# This script exits on an error so that errors don't compound and you see
+# only the first error that occured.
+set -o errexit
+
+# Print the commands being run so that we can see the command that triggers
+# an error.  It is also useful for following allowing as the install occurs.
+set -o xtrace
+
+
+# Settings
+# ========
+
+# Keep track of the current directory
+EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
+TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
+
+# Import common functions
+source $TOP_DIR/functions
+
+# Import configuration
+source $TOP_DIR/openrc
+
+# Import exercise configuration
+source $TOP_DIR/exerciserc
+
+is_service_enabled horizon || exit 55
+
+# can we get the front page
+curl http://$SERVICE_HOST 2>/dev/null | grep -q '<h3>Log In</h3>' || die "Horizon front page not functioning!"
+
+set +o xtrace
+echo "*********************************************************************"
+echo "SUCCESS: End DevStack Exercise: $0"
+echo "*********************************************************************"
+
diff --git a/exercises/quantum-adv-test.sh b/exercises/quantum-adv-test.sh
index 2ee82ff..493e223 100755
--- a/exercises/quantum-adv-test.sh
+++ b/exercises/quantum-adv-test.sh
@@ -58,7 +58,6 @@
 
 # Import quantum fucntions
 source $TOP_DIR/lib/quantum
-setup_quantum
 
 # Import exercise configuration
 source $TOP_DIR/exerciserc
@@ -475,7 +474,6 @@
 }
 
 
-teardown_quantum
 #-------------------------------------------------------------------------------
 # Kick off script.
 #-------------------------------------------------------------------------------
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index 3432763..42f9cb4 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -33,7 +33,6 @@
 # Import quantum functions if needed
 if is_service_enabled quantum; then
     source $TOP_DIR/lib/quantum
-    setup_quantum
 fi
 
 # Import exercise configuration
@@ -212,10 +211,6 @@
 # Delete a secgroup
 nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP"
 
-if is_service_enabled quantum; then
-    teardown_quantum
-fi
-
 set +o xtrace
 echo "*********************************************************************"
 echo "SUCCESS: End DevStack Exercise: $0"
diff --git a/files/keystone_data.sh b/files/keystone_data.sh
index f75d24a..c8e68dd 100755
--- a/files/keystone_data.sh
+++ b/files/keystone_data.sh
@@ -4,7 +4,6 @@
 #
 # Tenant               User       Roles
 # ------------------------------------------------------------------
-# admin                admin      admin
 # service              glance     admin
 # service              nova       admin, [ResellerAdmin (swift only)]
 # service              quantum    admin        # if enabled
@@ -12,9 +11,6 @@
 # service              cinder     admin        # if enabled
 # service              heat       admin        # if enabled
 # service              ceilometer admin        # if enabled
-# demo                 admin      admin
-# demo                 demo       Member, anotherrole
-# invisible_to_admin   demo       Member
 # Tempest Only:
 # alt_demo             alt_demo  Member
 #
@@ -40,71 +36,23 @@
     echo `"$@" | awk '/ id / { print $4 }'`
 }
 
-
-# Tenants
-# -------
-
-ADMIN_TENANT=$(get_id keystone tenant-create --name=admin)
-SERVICE_TENANT=$(get_id keystone tenant-create --name=$SERVICE_TENANT_NAME)
-DEMO_TENANT=$(get_id keystone tenant-create --name=demo)
-INVIS_TENANT=$(get_id keystone tenant-create --name=invisible_to_admin)
-
-
-# Users
-# -----
-
-ADMIN_USER=$(get_id keystone user-create --name=admin \
-                                         --pass="$ADMIN_PASSWORD" \
-                                         --email=admin@example.com)
-DEMO_USER=$(get_id keystone user-create --name=demo \
-                                        --pass="$ADMIN_PASSWORD" \
-                                        --email=demo@example.com)
+# Lookups
+SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
 
 
 # Roles
 # -----
 
-ADMIN_ROLE=$(get_id keystone role-create --name=admin)
-KEYSTONEADMIN_ROLE=$(get_id keystone role-create --name=KeystoneAdmin)
-KEYSTONESERVICE_ROLE=$(get_id keystone role-create --name=KeystoneServiceAdmin)
-# ANOTHER_ROLE demonstrates that an arbitrary role may be created and used
-# TODO(sleepsonthefloor): show how this can be used for rbac in the future!
-ANOTHER_ROLE=$(get_id keystone role-create --name=anotherrole)
-
-
-# Add Roles to Users in Tenants
-keystone user-role-add --user_id $ADMIN_USER --role_id $ADMIN_ROLE --tenant_id $ADMIN_TENANT
-keystone user-role-add --user_id $ADMIN_USER --role_id $ADMIN_ROLE --tenant_id $DEMO_TENANT
-keystone user-role-add --user_id $DEMO_USER --role_id $ANOTHER_ROLE --tenant_id $DEMO_TENANT
-
-# TODO(termie): these two might be dubious
-keystone user-role-add --user_id $ADMIN_USER --role_id $KEYSTONEADMIN_ROLE --tenant_id $ADMIN_TENANT
-keystone user-role-add --user_id $ADMIN_USER --role_id $KEYSTONESERVICE_ROLE --tenant_id $ADMIN_TENANT
-
-
-# The Member role is used by Horizon and Swift so we need to keep it:
-MEMBER_ROLE=$(get_id keystone role-create --name=Member)
-keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $DEMO_TENANT
-keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $INVIS_TENANT
-
+# The ResellerAdmin role is used by Nova and Ceilometer so we need to keep it.
+# The admin role in swift allows a user to act as an admin for their tenant,
+# but ResellerAdmin is needed for a user to act as any tenant. The name of this
+# role is also configurable in swift-proxy.conf
+RESELLER_ROLE=$(get_id keystone role-create --name=ResellerAdmin)
 
 # Services
 # --------
 
-# Keystone
-if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-	KEYSTONE_SERVICE=$(get_id keystone service-create \
-		--name=keystone \
-		--type=identity \
-		--description="Keystone Identity Service")
-	keystone endpoint-create \
-	    --region RegionOne \
-		--service_id $KEYSTONE_SERVICE \
-		--publicurl "http://$SERVICE_HOST:\$(public_port)s/v2.0" \
-		--adminurl "http://$SERVICE_HOST:\$(admin_port)s/v2.0" \
-		--internalurl "http://$SERVICE_HOST:\$(public_port)s/v2.0"
-fi
-
 # Nova
 if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
     NOVA_USER=$(get_id keystone user-create \
@@ -129,11 +77,7 @@
             --internalurl "http://$SERVICE_HOST:\$(compute_port)s/v2/\$(tenant_id)s"
     fi
     # Nova needs ResellerAdmin role to download images when accessing
-    # swift through the s3 api. The admin role in swift allows a user
-    # to act as an admin for their tenant, but ResellerAdmin is needed
-    # for a user to act as any tenant. The name of this role is also
-    # configurable in swift-proxy.conf
-    RESELLER_ROLE=$(get_id keystone role-create --name=ResellerAdmin)
+    # swift through the s3 api.
     keystone user-role-add \
         --tenant_id $SERVICE_TENANT \
         --user_id $NOVA_USER \
@@ -255,6 +199,10 @@
     keystone user-role-add --tenant_id $SERVICE_TENANT \
                            --user_id $CEILOMETER_USER \
                            --role_id $ADMIN_ROLE
+    # Ceilometer needs ResellerAdmin role to access swift account stats.
+    keystone user-role-add --tenant_id $SERVICE_TENANT \
+                           --user_id $CEILOMETER_USER \
+                           --role_id $RESELLER_ROLE
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
         CEILOMETER_SERVICE=$(get_id keystone service-create \
             --name=ceilometer \
diff --git a/files/rpms-suse/ceilometer-collector b/files/rpms-suse/ceilometer-collector
new file mode 100644
index 0000000..c76454f
--- /dev/null
+++ b/files/rpms-suse/ceilometer-collector
@@ -0,0 +1,4 @@
+# Not available in openSUSE main repositories, but can be fetched from OBS
+# (devel:languages:python and server:database projects)
+mongodb
+python-pymongo
diff --git a/files/rpms-suse/cinder b/files/rpms-suse/cinder
new file mode 100644
index 0000000..e5b4727
--- /dev/null
+++ b/files/rpms-suse/cinder
@@ -0,0 +1,2 @@
+lvm2
+tgt
diff --git a/files/rpms-suse/general b/files/rpms-suse/general
new file mode 100644
index 0000000..8ed74ec
--- /dev/null
+++ b/files/rpms-suse/general
@@ -0,0 +1,23 @@
+bridge-utils
+curl
+euca2ools
+git-core
+iputils
+openssh
+psmisc
+python-cmd2 # dist:opensuse-12.3
+python-netaddr
+python-pep8
+python-pip
+python-pylint
+python-unittest2
+python-virtualenv
+screen
+tar
+tcpdump
+unzip
+vim-enhanced
+wget
+
+findutils-locate # useful when debugging
+lsof # useful when debugging
diff --git a/files/rpms-suse/glance b/files/rpms-suse/glance
new file mode 100644
index 0000000..dd68ac0
--- /dev/null
+++ b/files/rpms-suse/glance
@@ -0,0 +1,12 @@
+gcc
+libxml2-devel
+python-PasteDeploy
+python-Routes
+python-SQLAlchemy
+python-argparse
+python-devel
+python-eventlet
+python-greenlet
+python-iso8601
+python-wsgiref
+python-xattr
diff --git a/files/rpms-suse/horizon b/files/rpms-suse/horizon
new file mode 100644
index 0000000..7e46ffe
--- /dev/null
+++ b/files/rpms-suse/horizon
@@ -0,0 +1,23 @@
+apache2  # NOPRIME
+apache2-mod_wsgi  # NOPRIME
+nodejs
+python-CherryPy # why? (coming from apts)
+python-Paste
+python-PasteDeploy
+python-Routes
+python-Sphinx
+python-SQLAlchemy
+python-WebOb
+python-anyjson
+python-beautifulsoup
+python-coverage
+python-dateutil
+python-eventlet
+python-kombu
+python-mox
+python-netaddr
+python-nose
+python-pep8
+python-pylint
+python-sqlalchemy-migrate
+python-xattr
diff --git a/files/rpms-suse/keystone b/files/rpms-suse/keystone
new file mode 100644
index 0000000..b3c876a
--- /dev/null
+++ b/files/rpms-suse/keystone
@@ -0,0 +1,17 @@
+cyrus-sasl-devel
+openldap2-devel
+python-Paste
+python-PasteDeploy
+python-PasteScript
+python-Routes
+python-SQLAlchemy
+python-WebOb
+python-devel
+python-distribute
+python-setuptools # instead of python-distribute; dist:sle11sp2
+python-greenlet
+python-lxml
+python-mysql
+python-py-bcrypt
+python-pysqlite
+sqlite3
diff --git a/files/rpms-suse/n-api b/files/rpms-suse/n-api
new file mode 100644
index 0000000..ad943ff
--- /dev/null
+++ b/files/rpms-suse/n-api
@@ -0,0 +1,2 @@
+gcc  # temporary because this pulls in glance to get the client without running the glance prereqs
+python-dateutil
diff --git a/files/rpms-suse/n-cpu b/files/rpms-suse/n-cpu
new file mode 100644
index 0000000..27d3254
--- /dev/null
+++ b/files/rpms-suse/n-cpu
@@ -0,0 +1,4 @@
+# Stuff for diablo volumes
+genisoimage
+lvm2
+open-iscsi
diff --git a/files/rpms-suse/n-novnc b/files/rpms-suse/n-novnc
new file mode 100644
index 0000000..c8722b9
--- /dev/null
+++ b/files/rpms-suse/n-novnc
@@ -0,0 +1 @@
+python-numpy
diff --git a/files/rpms-suse/n-vol b/files/rpms-suse/n-vol
new file mode 100644
index 0000000..e5b4727
--- /dev/null
+++ b/files/rpms-suse/n-vol
@@ -0,0 +1,2 @@
+lvm2
+tgt
diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova
new file mode 100644
index 0000000..0306716
--- /dev/null
+++ b/files/rpms-suse/nova
@@ -0,0 +1,50 @@
+curl
+# Note: we need to package dhcp_release in dnsmasq!
+dnsmasq
+ebtables
+gawk
+iptables
+iputils
+kpartx
+kvm
+# qemu as fallback if kvm cannot be used
+qemu
+libvirt # NOPRIME
+libvirt-python
+libxml2-python
+mysql-community-server # NOPRIME
+parted
+python-M2Crypto
+python-m2crypto # dist:sle11sp2
+python-Paste
+python-PasteDeploy
+python-Routes
+python-SQLAlchemy
+python-Tempita
+python-boto
+python-carrot
+python-cheetah
+python-eventlet
+python-feedparser
+python-greenlet
+python-iso8601
+python-kombu
+python-lockfile
+python-lxml # needed for glance which is needed for nova --- this shouldn't be here
+python-mox
+python-mysql
+python-netaddr
+python-paramiko
+python-python-gflags
+python-sqlalchemy-migrate
+python-suds
+python-xattr # needed for glance which is needed for nova --- this shouldn't be here
+rabbitmq-server # NOPRIME
+socat
+sqlite3
+sudo
+vlan
+
+# FIXME: qpid is not part of openSUSE, those names are tentative
+python-qpid # NOPRIME
+qpidd # NOPRIME
diff --git a/files/rpms-suse/postgresql b/files/rpms-suse/postgresql
new file mode 100644
index 0000000..bf19d39
--- /dev/null
+++ b/files/rpms-suse/postgresql
@@ -0,0 +1 @@
+python-psycopg2
diff --git a/files/rpms-suse/quantum b/files/rpms-suse/quantum
new file mode 100644
index 0000000..068c15c
--- /dev/null
+++ b/files/rpms-suse/quantum
@@ -0,0 +1,27 @@
+# Note: we need to package dhcp_release in dnsmasq!
+dnsmasq
+ebtables
+iptables
+iputils
+mysql-community-server # NOPRIME
+python-boto
+python-eventlet
+python-greenlet
+python-iso8601
+python-kombu
+python-mysql
+python-netaddr
+python-Paste
+python-PasteDeploy
+python-pyudev
+python-Routes
+python-SQLAlchemy
+python-suds
+rabbitmq-server # NOPRIME
+sqlite3
+sudo
+vlan
+
+# FIXME: qpid is not part of openSUSE, those names are tentative
+python-qpid # NOPRIME
+qpidd # NOPRIME
diff --git a/files/rpms-suse/ryu b/files/rpms-suse/ryu
new file mode 100644
index 0000000..763fd24
--- /dev/null
+++ b/files/rpms-suse/ryu
@@ -0,0 +1,5 @@
+python-distribute
+python-setuptools # instead of python-distribute; dist:sle11sp2
+python-Sphinx
+python-gevent
+python-python-gflags
diff --git a/files/rpms-suse/swift b/files/rpms-suse/swift
new file mode 100644
index 0000000..db379bb
--- /dev/null
+++ b/files/rpms-suse/swift
@@ -0,0 +1,19 @@
+curl
+gcc
+memcached
+python-PasteDeploy
+python-WebOb
+python-configobj
+python-coverage
+python-devel
+python-distribute
+python-setuptools # instead of python-distribute; dist:sle11sp2
+python-eventlet
+python-greenlet
+python-netifaces
+python-nose
+python-simplejson
+python-xattr
+sqlite3
+xfsprogs
+xinetd
diff --git a/functions b/functions
index f2b12e2..0911557 100644
--- a/functions
+++ b/functions
@@ -341,6 +341,19 @@
 }
 
 
+# Determine if current distribution is an Ubuntu-based distribution.
+# It will also detect non-Ubuntu but Debian-based distros; this is not an issue
+# since Debian and Ubuntu should be compatible.
+# is_ubuntu
+function is_ubuntu {
+    if [[ -z "$os_PACKAGE" ]]; then
+        GetOSVersion
+    fi
+
+    [ "$os_PACKAGE" = "deb" ]
+}
+
+
 # Determine if current distribution is a SUSE-based distribution
 # (openSUSE, SLE).
 # is_suse
@@ -349,8 +362,7 @@
         GetOSVersion
     fi
 
-    [[ "$os_VENDOR" = "openSUSE" || "$os_VENDOR" = "SUSE LINUX" ]]
-    return $?
+    [ "$os_VENDOR" = "openSUSE" ] || [ "$os_VENDOR" = "SUSE LINUX" ]
 }
 
 
@@ -581,11 +593,7 @@
 # Distro-agnostic package installer
 # install_package package [package ...]
 function install_package() {
-    if [[ -z "$os_PACKAGE" ]]; then
-        GetOSVersion
-    fi
-
-    if [[ "$os_PACKAGE" = "deb" ]]; then
+    if is_ubuntu; then
         [[ "$NO_UPDATE_REPOS" = "True" ]] || apt_get update
         NO_UPDATE_REPOS=True
 
@@ -610,6 +618,7 @@
     if [[ -z "$os_PACKAGE" ]]; then
         GetOSVersion
     fi
+
     if [[ "$os_PACKAGE" = "deb" ]]; then
         dpkg -l "$@" > /dev/null
         return $?
@@ -646,11 +655,7 @@
         SUDO_PIP="env"
     else
         SUDO_PIP="sudo"
-        if [[ "$os_PACKAGE" = "deb" || is_suse ]]; then
-            CMD_PIP=/usr/bin/pip
-        else
-            CMD_PIP=/usr/bin/pip-python
-        fi
+        CMD_PIP=$(get_pip_command)
     fi
     if [[ "$PIP_USE_MIRRORS" != "False" ]]; then
         PIP_MIRROR_OPT="--use-mirrors"
@@ -666,10 +671,7 @@
 # Service wrapper to restart services
 # restart_service service-name
 function restart_service() {
-    if [[ -z "$os_PACKAGE" ]]; then
-        GetOSVersion
-    fi
-    if [[ "$os_PACKAGE" = "deb" ]]; then
+    if is_ubuntu; then
         sudo /usr/sbin/service $1 restart
     else
         sudo /sbin/service $1 restart
@@ -751,10 +753,7 @@
 # Service wrapper to start services
 # start_service service-name
 function start_service() {
-    if [[ -z "$os_PACKAGE" ]]; then
-        GetOSVersion
-    fi
-    if [[ "$os_PACKAGE" = "deb" ]]; then
+    if is_ubuntu; then
         sudo /usr/sbin/service $1 start
     else
         sudo /sbin/service $1 start
@@ -765,10 +764,7 @@
 # Service wrapper to stop services
 # stop_service service-name
 function stop_service() {
-    if [[ -z "$os_PACKAGE" ]]; then
-        GetOSVersion
-    fi
-    if [[ "$os_PACKAGE" = "deb" ]]; then
+    if is_ubuntu; then
         sudo /usr/sbin/service $1 stop
     else
         sudo /sbin/service $1 stop
@@ -1036,17 +1032,22 @@
 function get_rootwrap_location() {
     local module=$1
 
-    if [[ -z "$os_PACKAGE" ]]; then
-        GetOSVersion
-    fi
-
-    if [[ "$os_PACKAGE" = "deb" || is_suse ]]; then
+    if is_ubuntu || is_suse; then
         echo "/usr/local/bin/$module-rootwrap"
     else
         echo "/usr/bin/$module-rootwrap"
     fi
 }
 
+# Get the path to the pip command.
+# get_pip_command
+function get_pip_command() {
+    if is_ubuntu || is_suse; then
+        echo "/usr/bin/pip"
+    else
+        echo "/usr/bin/pip-python"
+    fi
+}
 
 # Check if qpid can be used on the current distro.
 # qpid_is_supported
@@ -1057,7 +1058,7 @@
 
     # Qpid was introduced to Ubuntu in precise, disallow it on oneiric; it is
     # not in openSUSE either right now.
-    [[ "$DISTRO" = "oneiric" || is_suse ]]
+    ( ! ([[ "$DISTRO" = "oneiric" ]] || is_suse) )
     return $?
 }
 
diff --git a/lib/cinder b/lib/cinder
index 058fcc2..ce160bf 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -24,6 +24,9 @@
 # Defaults
 # --------
 
+# set up default driver
+CINDER_DRIVER=${CINDER_DRIVER:-default}
+
 # set up default directories
 CINDER_DIR=$DEST/cinder
 CINDERCLIENT_DIR=$DEST/python-cinderclient
@@ -145,6 +148,19 @@
         iniset $CINDER_CONF DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d"
         iniset $CINDER_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s TRACE %(name)s %(instance)s"
     fi
+
+    if [ "$CINDER_DRIVER" == "XenAPINFS" ]; then
+        (
+            set -u
+            iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.xenapi_sm.XenAPINFSDriver"
+            iniset $CINDER_CONF DEFAULT xenapi_connection_url "$CINDER_XENAPI_CONNECTION_URL"
+            iniset $CINDER_CONF DEFAULT xenapi_connection_username "$CINDER_XENAPI_CONNECTION_USERNAME"
+            iniset $CINDER_CONF DEFAULT xenapi_connection_password "$CINDER_XENAPI_CONNECTION_PASSWORD"
+            iniset $CINDER_CONF DEFAULT xenapi_nfs_server "$CINDER_XENAPI_NFS_SERVER"
+            iniset $CINDER_CONF DEFAULT xenapi_nfs_serverpath "$CINDER_XENAPI_NFS_SERVERPATH"
+        )
+        [ $? -ne 0 ] && exit 1
+    fi
 }
 
 # init_cinder() - Initialize database and volume group
@@ -221,7 +237,7 @@
 # start_cinder() - Start running processes, including screen
 function start_cinder() {
     if is_service_enabled c-vol; then
-        if [[ "$os_PACKAGE" = "deb" ]]; then
+        if is_ubuntu; then
             _configure_tgt_for_config_d
             if [[ ! -f /etc/tgt/conf.d/cinder.conf ]]; then
                echo "include $CINDER_STATE_PATH/volumes/*" | sudo tee /etc/tgt/conf.d/cinder.conf
diff --git a/lib/databases/mysql b/lib/databases/mysql
index fc6a3b7..60ea143 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -20,7 +20,7 @@
 function configure_database_mysql {
     echo_summary "Configuring and starting MySQL"
 
-    if [[ "$os_PACKAGE" = "deb" ]]; then
+    if is_ubuntu; then
         MY_CONF=/etc/mysql/my.cnf
         MYSQL=mysql
     else
@@ -61,7 +61,7 @@
 }
 
 function install_database_mysql {
-    if [[ "$os_PACKAGE" = "deb" ]]; then
+    if is_ubuntu; then
         # Seed configuration with mysql password so that apt-get install doesn't
         # prompt us for a password upon install.
         cat <<MYSQL_PRESEED | sudo debconf-set-selections
@@ -84,7 +84,11 @@
         chmod 0600 $HOME/.my.cnf
     fi
     # Install mysql-server
-    install_package mysql-server
+    if is_suse; then
+        install_package mysql-community-server
+    else
+        install_package mysql-server
+    fi
 }
 
 function database_connection_url_mysql {
diff --git a/lib/glance b/lib/glance
index 60026d5..b02a4b6 100644
--- a/lib/glance
+++ b/lib/glance
@@ -70,13 +70,6 @@
     setup_develop $GLANCECLIENT_DIR
 }
 
-# durable_glance_queues() - Determine if RabbitMQ queues are durable or not
-function durable_glance_queues() {
-    test `rabbitmqctl list_queues name durable | grep true | wc -l` -gt 0 && return 0
-    test `rabbitmqctl list_exchanges name durable | grep true | wc -l` -gt 0 && return 0
-    return 1
-}
-
 # configure_glance() - Set config files, create data dirs, etc
 function configure_glance() {
     setup_develop $GLANCE_DIR
@@ -127,12 +120,6 @@
         iniset $GLANCE_API_CONF DEFAULT notifier_strategy rabbit
         iniset $GLANCE_API_CONF DEFAULT rabbit_host $RABBIT_HOST
         iniset $GLANCE_API_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
-        if [[ durable_glance_queues -eq 0 ]]; then
-            # This gets around https://bugs.launchpad.net/glance/+bug/1074132
-            # that results in a g-api server becoming unresponsive during
-            # startup...
-            iniset $GLANCE_API_CONF DEFAULT rabbit_durable_queues True
-        fi
     fi
     if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then
         iniset $GLANCE_API_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/api
diff --git a/lib/heat b/lib/heat
index 396c8a0..b640fbc 100644
--- a/lib/heat
+++ b/lib/heat
@@ -1,7 +1,7 @@
 # lib/heat
 # Install and start Heat service
 # To enable, add the following to localrc
-# ENABLED_SERVICES+=,heat,h-api-cfn,h-api-cw,h-eng,h-meta
+# ENABLED_SERVICES+=,heat,h-api-cfn,h-api-cw,h-eng
 
 # Dependencies:
 # - functions
@@ -52,8 +52,6 @@
     HEAT_API_CFN_PORT=${HEAT_API_CFN_PORT:-8000}
     HEAT_ENGINE_HOST=${HEAT_ENGINE_HOST:-$SERVICE_HOST}
     HEAT_ENGINE_PORT=${HEAT_ENGINE_PORT:-8001}
-    HEAT_METADATA_HOST=${HEAT_METADATA_HOST:-$SERVICE_HOST}
-    HEAT_METADATA_PORT=${HEAT_METADATA_PORT:-8002}
     HEAT_API_CW_HOST=${HEAT_API_CW_HOST:-$SERVICE_HOST}
     HEAT_API_CW_PORT=${HEAT_API_CW_PORT:-8003}
     HEAT_API_HOST=${HEAT_API_HOST:-$SERVICE_HOST}
@@ -126,7 +124,7 @@
     iniset $HEAT_ENGINE_CONF DEFAULT bind_host $HEAT_ENGINE_HOST
     iniset $HEAT_ENGINE_CONF DEFAULT bind_port $HEAT_ENGINE_PORT
     iniset $HEAT_ENGINE_CONF DEFAULT heat_metadata_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT
-    iniset $HEAT_ENGINE_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_METADATA_HOST:$HEAT_METADATA_PORT
+    iniset $HEAT_ENGINE_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_CFN_HOST:$HEAT_CFN_PORT/v1/waitcondition
     iniset $HEAT_ENGINE_CONF DEFAULT heat_watch_server_url http://$HEAT_API_CW_HOST:$HEAT_API_CW_PORT
     local dburl
     database_connection_url dburl heat
@@ -141,26 +139,6 @@
         iniset $HEAT_ENGINE_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid
     fi
 
-    # metadata api
-    HEAT_METADATA_CONF=$HEAT_CONF_DIR/heat-metadata.conf
-    cp $HEAT_DIR/etc/heat/heat-metadata.conf $HEAT_METADATA_CONF
-    iniset $HEAT_METADATA_CONF DEFAULT debug True
-    inicomment $HEAT_METADATA_CONF DEFAULT log_file
-    iniset $HEAT_METADATA_CONF DEFAULT use_syslog $SYSLOG
-    iniset $HEAT_METADATA_CONF DEFAULT bind_host $HEAT_METADATA_HOST
-    iniset $HEAT_METADATA_CONF DEFAULT bind_port $HEAT_METADATA_PORT
-
-    if is_service_enabled rabbit; then
-        iniset $HEAT_METADATA_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_kombu
-        iniset $HEAT_METADATA_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
-        iniset $HEAT_METADATA_CONF DEFAULT rabbit_host $RABBIT_HOST
-    elif is_service_enabled qpid; then
-        iniset $HEAT_METADATA_CONF DEFAULT rpc_backend heat.openstack.common.rpc.impl_qpid
-    fi
-
-    HEAT_METADATA_PASTE_INI=$HEAT_CONF_DIR/heat-metadata-paste.ini
-    cp $HEAT_DIR/etc/heat/heat-metadata-paste.ini $HEAT_METADATA_PASTE_INI
-
     # cloudwatch api
     HEAT_API_CW_CONF=$HEAT_CONF_DIR/heat-api-cloudwatch.conf
     cp $HEAT_DIR/etc/heat/heat-api-cloudwatch.conf $HEAT_API_CW_CONF
@@ -217,13 +195,12 @@
     screen_it h-api "cd $HEAT_DIR; bin/heat-api --config-dir=$HEAT_CONF_DIR/heat-api.conf"
     screen_it h-api-cfn "cd $HEAT_DIR; bin/heat-api-cfn --config-dir=$HEAT_CONF_DIR/heat-api-cfn.conf"
     screen_it h-api-cw "cd $HEAT_DIR; bin/heat-api-cloudwatch --config-dir=$HEAT_CONF_DIR/heat-api-cloudwatch.conf"
-    screen_it h-meta "cd $HEAT_DIR; bin/heat-metadata --config-dir=$HEAT_CONF_DIR/heat-metadata.conf"
 }
 
 # stop_heat() - Stop running processes
 function stop_heat() {
     # Kill the cinder screen windows
-    for serv in h-eng h-api-cfn h-api-cw h-meta; do
+    for serv in h-eng h-api-cfn h-api-cw; do
         screen -S $SCREEN_NAME -p $serv -X kill
     done
 }
diff --git a/lib/horizon b/lib/horizon
index af09f77..7321cbc 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -29,10 +29,10 @@
 # Set up default directories
 HORIZON_DIR=$DEST/horizon
 
-# Allow overriding the default Apache user and group, default both to
-# current user.
+# Allow overriding the default Apache user and group, default to
+# current user and his default group.
 APACHE_USER=${APACHE_USER:-$USER}
-APACHE_GROUP=${APACHE_GROUP:-$APACHE_USER}
+APACHE_GROUP=${APACHE_GROUP:-$(id -gn $APACHE_USER)}
 
 
 # Entry Points
@@ -71,7 +71,7 @@
     sudo mkdir -p $HORIZON_DIR/.blackhole
 
 
-    if [[ "$os_PACKAGE" = "deb" ]]; then
+    if is_ubuntu; then
         APACHE_NAME=apache2
         APACHE_CONF=sites-available/horizon
         # Clean up the old config name
@@ -79,6 +79,8 @@
         # Be a good citizen and use the distro tools here
         sudo touch /etc/$APACHE_NAME/$APACHE_CONF
         sudo a2ensite horizon
+        # WSGI doesn't enable by default, enable it
+        sudo a2enmod wsgi
     else
         # Install httpd, which is NOPRIME'd
         if is_suse; then
@@ -108,16 +110,18 @@
 # install_horizon() - Collect source and prepare
 function install_horizon() {
     # Apache installation, because we mark it NOPRIME
-    if [[ "$os_PACKAGE" = "deb" ]]; then
+    if is_ubuntu; then
         # Install apache2, which is NOPRIME'd
         install_package apache2 libapache2-mod-wsgi
+    elif is_suse; then
+        install_package apache2 apache2-mod_wsgi
     else
         sudo rm -f /etc/httpd/conf.d/000-*
         install_package httpd mod_wsgi
     fi
 
     # NOTE(sdague) quantal changed the name of the node binary
-    if [[ "$os_PACKAGE" = "deb" ]]; then
+    if is_ubuntu; then
         if [[ ! -e "/usr/bin/node" ]]; then
             install_package nodejs-legacy
         fi
diff --git a/lib/keystone b/lib/keystone
index ae89056..f6a6d66 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -15,6 +15,7 @@
 # configure_keystone
 # init_keystone
 # start_keystone
+# create_keystone_accounts
 # stop_keystone
 # cleanup_keystone
 
@@ -45,7 +46,6 @@
 KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-PKI}
 
 # Set Keystone interface configuration
-KEYSTONE_API_PORT=${KEYSTONE_API_PORT:-5000}
 KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST}
 KEYSTONE_AUTH_PORT=${KEYSTONE_AUTH_PORT:-35357}
 KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-http}
@@ -144,6 +144,100 @@
 
 }
 
+# create_keystone_accounts() - Sets up common required keystone accounts
+
+# Tenant               User       Roles
+# ------------------------------------------------------------------
+# service              --         --
+# --                   --         Member
+# admin                admin      admin
+# demo                 admin      admin
+# demo                 demo       Member, anotherrole
+# invisible_to_admin   demo       Member
+
+# Migrated from keystone_data.sh
+create_keystone_accounts() {
+
+    # admin
+    ADMIN_TENANT=$(keystone tenant-create \
+        --name admin \
+        | grep " id " | get_field 2)
+    ADMIN_USER=$(keystone user-create \
+        --name admin \
+        --pass "$ADMIN_PASSWORD" \
+        --email admin@example.com \
+        | grep " id " | get_field 2)
+    ADMIN_ROLE=$(keystone role-create \
+        --name admin \
+        | grep " id " | get_field 2)
+    keystone user-role-add \
+        --user_id $ADMIN_USER \
+        --role_id $ADMIN_ROLE \
+        --tenant_id $ADMIN_TENANT
+
+    # service
+    SERVICE_TENANT=$(keystone tenant-create \
+        --name $SERVICE_TENANT_NAME \
+        | grep " id " | get_field 2)
+
+    # The Member role is used by Horizon and Swift so we need to keep it:
+    MEMBER_ROLE=$(keystone role-create --name=Member | grep " id " | get_field 2)
+    # ANOTHER_ROLE demonstrates that an arbitrary role may be created and used
+    # TODO(sleepsonthefloor): show how this can be used for rbac in the future!
+    ANOTHER_ROLE=$(keystone role-create --name=anotherrole | grep " id " | get_field 2)
+
+    # invisible tenant - admin can't see this one
+    INVIS_TENANT=$(keystone tenant-create --name=invisible_to_admin | grep " id " | get_field 2)
+
+    # demo
+    DEMO_TENANT=$(keystone tenant-create \
+        --name=demo \
+        | grep " id " | get_field 2)
+    DEMO_USER=$(keystone user-create \
+        --name demo \
+        --pass "$ADMIN_PASSWORD" \
+        --email demo@example.com \
+        | grep " id " | get_field 2)
+    keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $DEMO_TENANT
+    keystone user-role-add --user_id $ADMIN_USER --role_id $ADMIN_ROLE --tenant_id $DEMO_TENANT
+    keystone user-role-add --user_id $DEMO_USER --role_id $ANOTHER_ROLE --tenant_id $DEMO_TENANT
+    keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $INVIS_TENANT
+
+    # Keystone
+    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+        KEYSTONE_SERVICE=$(keystone service-create \
+            --name keystone \
+            --type identity \
+            --description "Keystone Identity Service" \
+            | grep " id " | get_field 2)
+        keystone endpoint-create \
+            --region RegionOne \
+            --service_id $KEYSTONE_SERVICE \
+            --publicurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:\$(public_port)s/v2.0" \
+            --adminurl "$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:\$(admin_port)s/v2.0" \
+            --internalurl "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:\$(public_port)s/v2.0"
+    fi
+
+    # TODO(dtroyer): This is part of a series of changes...remove these when
+    #                complete if they are really unused
+#    KEYSTONEADMIN_ROLE=$(keystone role-create \
+#        --name KeystoneAdmin \
+#        | grep " id " | get_field 2)
+#    KEYSTONESERVICE_ROLE=$(keystone role-create \
+#        --name KeystoneServiceAdmin \
+#        | grep " id " | get_field 2)
+
+    # TODO(termie): these two might be dubious
+#    keystone user-role-add \
+#        --user_id $ADMIN_USER \
+#        --role_id $KEYSTONEADMIN_ROLE \
+#        --tenant_id $ADMIN_TENANT
+#    keystone user-role-add \
+#        --user_id $ADMIN_USER \
+#        --role_id $KEYSTONESERVICE_ROLE \
+#        --tenant_id $ADMIN_TENANT
+}
+
 # init_keystone() - Initialize databases, etc.
 function init_keystone() {
     # (Re)create keystone database
@@ -176,6 +270,11 @@
 function start_keystone() {
     # Start Keystone in a screen window
     screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug"
+    echo "Waiting for keystone to start..."
+    if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ >/dev/null; do sleep 1; done"; then
+      echo "keystone did not start"
+      exit 1
+    fi
 }
 
 # stop_keystone() - Stop running processes
diff --git a/lib/nova b/lib/nova
index d15d9e3..3a4d34d 100644
--- a/lib/nova
+++ b/lib/nova
@@ -202,7 +202,7 @@
         # splitting a system into many smaller parts.  LXC uses cgroups and chroot
         # to simulate multiple systems.
         if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then
-            if [[ "$os_PACKAGE" = "deb" ]]; then
+            if is_ubuntu; then
                 if [[ ! "$DISTRO" > natty ]]; then
                     cgline="none /cgroup cgroup cpuacct,memory,devices,cpu,freezer,blkio 0 0"
                     sudo mkdir -p /cgroup
@@ -228,11 +228,11 @@
 EOF
         fi
 
-        if [[ "$os_PACKAGE" = "deb" ]]; then
+        if is_ubuntu; then
             LIBVIRT_DAEMON=libvirt-bin
         else
             # http://wiki.libvirt.org/page/SSHPolicyKitSetup
-            if ! grep ^libvirtd: /etc/group >/dev/null; then
+            if ! getent group libvirtd >/dev/null; then
                 sudo groupadd libvirtd
             fi
             sudo bash -c 'cat <<EOF >/etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla
@@ -393,7 +393,7 @@
 # install_nova() - Collect source and prepare
 function install_nova() {
     if is_service_enabled n-cpu; then
-        if [[ "$os_PACKAGE" = "deb" ]]; then
+        if is_ubuntu; then
             LIBVIRT_PKG_NAME=libvirt-bin
         else
             LIBVIRT_PKG_NAME=libvirt
@@ -403,7 +403,7 @@
         # splitting a system into many smaller parts.  LXC uses cgroups and chroot
         # to simulate multiple systems.
         if [[ "$LIBVIRT_TYPE" == "lxc" ]]; then
-            if [[ "$os_PACKAGE" = "deb" ]]; then
+            if is_ubuntu; then
                 if [[ "$DISTRO" > natty ]]; then
                     install_package cgroup-lite
                 fi
@@ -423,6 +423,7 @@
     # The group **libvirtd** is added to the current user in this script.
     # Use 'sg' to execute nova-compute as a member of the **libvirtd** group.
     # ``screen_it`` checks ``is_service_enabled``, it is not needed here
+    screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor"
     screen_it n-cpu "cd $NOVA_DIR && sg libvirtd $NOVA_BIN_DIR/nova-compute"
     screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert"
     screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network"
@@ -430,7 +431,6 @@
     screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --config-file $NOVA_CONF --web ."
     screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $NOVA_CONF"
     screen_it n-cauth "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-consoleauth"
-    screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor"
 }
 
 # stop_nova() - Stop running processes (non-screen)
diff --git a/lib/quantum b/lib/quantum
index ba98b64..14a3a4a 100644
--- a/lib/quantum
+++ b/lib/quantum
@@ -5,7 +5,19 @@
 XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
+QUANTUM_DIR=$DEST/quantum
 export QUANTUM_TEST_CONFIG_FILE=${QUANTUM_TEST_CONFIG_FILE:-"/etc/quantum/debug.ini"}
+QUANTUM_AUTH_CACHE_DIR=${QUANTUM_AUTH_CACHE_DIR:-/var/cache/quantum}
+
+if is_service_enabled quantum; then
+    Q_CONF_FILE=/etc/quantum/quantum.conf
+    Q_RR_CONF_FILE=/etc/quantum/rootwrap.conf
+    if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then
+        Q_RR_COMMAND="sudo"
+    else
+        Q_RR_COMMAND="sudo $QUANTUM_DIR/bin/quantum-rootwrap $Q_RR_CONF_FILE"
+    fi
+fi
 
 # Configures keystone integration for quantum service and agents
 function quantum_setup_keystone() {
@@ -22,6 +34,12 @@
     iniset $conf_file $section admin_tenant_name $SERVICE_TENANT_NAME
     iniset $conf_file $section admin_user $Q_ADMIN_USERNAME
     iniset $conf_file $section admin_password $SERVICE_PASSWORD
+    if [[ "$KEYSTONE_TOKEN_FORMAT" == "PKI" ]]; then
+        iniset $conf_file $section signing_dir $QUANTUM_AUTH_CACHE_DIR
+        # Create cache dir
+        sudo mkdir -p $QUANTUM_AUTH_CACHE_DIR
+        sudo chown `whoami` $QUANTUM_AUTH_CACHE_DIR
+    fi
 }
 
 function quantum_setup_ovs_bridge() {
@@ -67,7 +85,7 @@
     local from_net="$1"
     net_id=`_get_net_id $from_net`
     probe_id=`quantum-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}' | head -n 1`
-    echo "sudo ip netns exec qprobe-$probe_id"
+    echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id"
 }
 
 function delete_probe() {
@@ -85,9 +103,9 @@
     local check_command=""
     probe_cmd=`_get_probe_cmd_prefix $from_net`
     if [[ "$expected" = "True" ]]; then
-        check_command="while ! $probe_cmd ping -c1 -w1 $ip; do sleep 1; done"
+        check_command="while ! $probe_cmd ping -w 1 -c 1 $ip; do sleep 1; done"
     else
-        check_command="while $probe_cmd ping -c1 -w1 $ip; do sleep 1; done"
+        check_command="while $probe_cmd ping -w 1 -c 1 $ip; do sleep 1; done"
     fi
     if ! timeout $timeout_sec sh -c "$check_command"; then
         if [[ "$expected" = "True" ]]; then
diff --git a/lib/swift b/lib/swift
new file mode 100644
index 0000000..140e5e9
--- /dev/null
+++ b/lib/swift
@@ -0,0 +1,366 @@
+# lib/swift
+# Functions to control the configuration and operation of the swift service
+
+# Dependencies:
+# ``functions`` file
+# ``DEST``, ``SCREEN_NAME``, `SWIFT_HASH` must be defined
+# ``SWIFT_DATA_DIR`` or ``DATA_DIR`` must be defined
+# ``lib/keystone`` file
+# ``stack.sh`` calls the entry points in this order:
+#
+# install_swift
+# configure_swift
+# init_swift
+# start_swift
+# stop_swift
+# cleanup_swift
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+
+# <define global variables here that belong to this project>
+
+# Set up default directories
+
+SWIFT_DIR=$DEST/swift
+SWIFTCLIENT_DIR=$DEST/python-swiftclient
+
+# TODO: add logging to different location.
+
+# Set ``SWIFT_DATA_DIR`` to the location of swift drives and objects.
+# Default is the common DevStack data directory.
+SWIFT_DATA_DIR=${SWIFT_DATA_DIR:-${DATA_DIR}/swift}
+
+# Set ``SWIFT_CONFIG_DIR`` to the location of the configuration files.
+# Default is ``/etc/swift``.
+SWIFT_CONFIG_DIR=${SWIFT_CONFIG_DIR:-/etc/swift}
+
+# DevStack will create a loop-back disk formatted as XFS to store the
+# swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in bytes.
+# Default is 1 gigabyte.
+SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000}
+
+# The ring uses a configurable number of bits from a path’s MD5 hash as
+# a partition index that designates a device. The number of bits kept
+# from the hash is known as the partition power, and 2 to the partition
+# power indicates the partition count. Partitioning the full MD5 hash
+# ring allows other parts of the cluster to work in batches of items at
+# once which ends up either more efficient or at least less complex than
+# working with each item separately or the entire cluster all at once.
+# By default we define 9 for the partition count (which mean 512).
+SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9}
+
+# Set ``SWIFT_REPLICAS`` to configure how many replicas are to be
+# configured for your Swift cluster.  By default the three replicas would need a
+# bit of IO and Memory on a VM you may want to lower that to 1 if you want to do
+# only some quick testing.
+SWIFT_REPLICAS=${SWIFT_REPLICAS:-3}
+SWIFT_REPLICAS_SEQ=$(seq ${SWIFT_REPLICAS})
+
+# Set ``OBJECT_PORT_BASE``, ``CONTAINER_PORT_BASE``, ``ACCOUNT_PORT_BASE``
+# Port bases used in port number calclution for the service "nodes"
+# The specified port number will be used, the additinal ports calculated by
+# base_port + node_num * 10
+OBJECT_PORT_BASE=6010
+CONTAINER_PORT_BASE=6011
+ACCOUNT_PORT_BASE=6012
+
+# Entry Points
+# ------------
+
+# cleanup_swift() - Remove residual data files
+function cleanup_swift() {
+   rm -f ${SWIFT_CONFIG_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz}
+   if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
+      sudo umount ${SWIFT_DATA_DIR}/drives/sdb1
+   fi
+   if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then
+      rm ${SWIFT_DATA_DIR}/drives/images/swift.img
+   fi
+}
+
+# configure_swift() - Set config files, create data dirs and loop image
+function configure_swift() {
+    local swift_auth_server
+    local node_number
+    local swift_node_config
+    local swift_log_dir
+
+    setup_develop $SWIFT_DIR
+
+    # Make sure to kill all swift processes first
+    swift-init all stop || true
+
+    # First do a bit of setup by creating the directories and
+    # changing the permissions so we can run it as our user.
+
+    USER_GROUP=$(id -g)
+    sudo mkdir -p ${SWIFT_DATA_DIR}/drives
+    sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_DIR}
+
+    # Create a loopback disk and format it to XFS.
+    if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then
+        if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
+            sudo umount ${SWIFT_DATA_DIR}/drives/sdb1
+        fi
+    else
+        mkdir -p  ${SWIFT_DATA_DIR}/drives/images
+        sudo touch  ${SWIFT_DATA_DIR}/drives/images/swift.img
+        sudo chown $USER: ${SWIFT_DATA_DIR}/drives/images/swift.img
+
+        dd if=/dev/zero of=${SWIFT_DATA_DIR}/drives/images/swift.img \
+            bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE}
+    fi
+
+    # Make a fresh XFS filesystem
+    mkfs.xfs -f -i size=1024  ${SWIFT_DATA_DIR}/drives/images/swift.img
+
+    # Mount the disk with mount options to make it as efficient as possible
+    mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1
+    if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
+        sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8  \
+            ${SWIFT_DATA_DIR}/drives/images/swift.img ${SWIFT_DATA_DIR}/drives/sdb1
+    fi
+
+    # Create a link to the above mount and
+    # create all of the directories needed to emulate a few different servers
+    for node_number in ${SWIFT_REPLICAS_SEQ}; do
+        sudo ln -sf ${SWIFT_DATA_DIR}/drives/sdb1/$node_number ${SWIFT_DATA_DIR}/$node_number;
+        drive=${SWIFT_DATA_DIR}/drives/sdb1/${node_number}
+        node=${SWIFT_DATA_DIR}/${node_number}/node
+        node_device=${node}/sdb1
+        [[ -d $node ]] && continue
+        [[ -d $drive ]] && continue
+        sudo install -o ${USER} -g $USER_GROUP -d $drive
+        sudo install -o ${USER} -g $USER_GROUP -d $node_device
+        sudo chown -R $USER: ${node}
+    done
+
+   sudo mkdir -p ${SWIFT_CONFIG_DIR}/{object,container,account}-server /var/run/swift
+   sudo chown -R $USER: ${SWIFT_CONFIG_DIR} /var/run/swift
+
+    if [[ "$SWIFT_CONFIG_DIR" != "/etc/swift" ]]; then
+        # Some swift tools are hard-coded to use ``/etc/swift`` and are apparently not going to be fixed.
+        # Create a symlink if the config dir is moved
+        sudo ln -sf ${SWIFT_CONFIG_DIR} /etc/swift
+    fi
+
+    # Swift use rsync to synchronize between all the different
+    # partitions (which make more sense when you have a multi-node
+    # setup) we configure it with our version of rsync.
+    sed -e "
+        s/%GROUP%/${USER_GROUP}/;
+        s/%USER%/$USER/;
+        s,%SWIFT_DATA_DIR%,$SWIFT_DATA_DIR,;
+    " $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf
+    # rsyncd.conf just prepared for 4 nodes
+    if is_ubuntu; then
+        sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync
+    else
+        sudo sed -i '/disable *= *yes/ { s/yes/no/ }' /etc/xinetd.d/rsync
+    fi
+
+    if is_service_enabled swift3;then
+        swift_auth_server="s3token "
+    fi
+
+    # By default Swift will be installed with the tempauth middleware
+    # which has some default username and password if you have
+    # configured keystone it will checkout the directory.
+    if is_service_enabled key; then
+        swift_auth_server+="authtoken keystoneauth"
+    else
+        swift_auth_server=tempauth
+    fi
+
+    SWIFT_CONFIG_PROXY_SERVER=${SWIFT_CONFIG_DIR}/proxy-server.conf
+    cp ${SWIFT_DIR}/etc/proxy-server.conf-sample ${SWIFT_CONFIG_PROXY_SERVER}
+
+    iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user ${USER}
+
+    iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir ${SWIFT_CONFIG_DIR}
+
+    iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers 1
+
+    iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level DEBUG
+
+    iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080}
+
+    # Only enable Swift3 if we have it enabled in ENABLED_SERVICES
+    is_service_enabled swift3 && swift3=swift3 || swift3=""
+
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} pipeline:main pipeline "catch_errors healthcheck cache ratelimit ${swift3} ${swift_auth_server} proxy-logging proxy-server"
+
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true
+
+    # Configure Keystone
+    sed -i '/^# \[filter:authtoken\]/,/^# \[filter:keystoneauth\]$/ s/^#[ \t]*//' ${SWIFT_CONFIG_PROXY_SERVER}
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_host $KEYSTONE_AUTH_HOST
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_port $KEYSTONE_AUTH_PORT
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_user swift
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_password $SERVICE_PASSWORD
+
+    iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth use
+    iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles "Member, admin"
+
+    if is_service_enabled swift3; then
+        cat <<EOF >>${SWIFT_CONFIG_PROXY_SERVER}
+# NOTE(chmou): s3token middleware is not updated yet to use only
+# username and password.
+[filter:s3token]
+paste.filter_factory = keystone.middleware.s3_token:filter_factory
+auth_port = ${KEYSTONE_AUTH_PORT}
+auth_host = ${KEYSTONE_AUTH_HOST}
+auth_protocol = ${KEYSTONE_AUTH_PROTOCOL}
+auth_token = ${SERVICE_TOKEN}
+admin_token = ${SERVICE_TOKEN}
+
+[filter:swift3]
+use = egg:swift3#swift3
+EOF
+    fi
+
+    cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONFIG_DIR}/swift.conf
+    iniset ${SWIFT_CONFIG_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH}
+
+    # This function generates an object/account/proxy configuration
+    # emulating 4 nodes on different ports
+    function generate_swift_config() {
+        local swift_node_config=$1
+        local node_id=$2
+        local bind_port=$3
+
+        log_facility=$[ node_id - 1 ]
+        node_path=${SWIFT_DATA_DIR}/${node_number}
+
+        iniuncomment ${swift_node_config} DEFAULT user
+        iniset ${swift_node_config} DEFAULT user ${USER}
+
+        iniuncomment ${swift_node_config} DEFAULT bind_port
+        iniset ${swift_node_config} DEFAULT bind_port ${bind_port}
+
+        iniuncomment ${swift_node_config} DEFAULT swift_dir
+        iniset ${swift_node_config} DEFAULT swift_dir ${SWIFT_CONFIG_DIR}
+
+        iniuncomment ${swift_node_config} DEFAULT devices
+        iniset ${swift_node_config} DEFAULT devices ${node_path}
+
+        iniuncomment ${swift_node_config} DEFAULT log_facility
+        iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility}
+
+        iniuncomment ${swift_node_config} DEFAULT mount_check
+        iniset ${swift_node_config} DEFAULT mount_check false
+
+        iniuncomment ${swift_node_config} ${server_type}-replicator vm_test_mode
+        iniset ${swift_node_config} ${server_type}-replicator vm_test_mode yes
+    }
+
+    for node_number in ${SWIFT_REPLICAS_SEQ}; do
+        swift_node_config=${SWIFT_CONFIG_DIR}/object-server/${node_number}.conf
+        cp ${SWIFT_DIR}/etc/object-server.conf-sample ${swift_node_config}
+        generate_swift_config ${swift_node_config} ${node_number} $[OBJECT_PORT_BASE + 10 * (node_number - 1)]
+
+        swift_node_config=${SWIFT_CONFIG_DIR}/container-server/${node_number}.conf
+        cp ${SWIFT_DIR}/etc/container-server.conf-sample ${swift_node_config}
+        generate_swift_config ${swift_node_config} ${node_number} $[CONTAINER_PORT_BASE + 10 * (node_number - 1)]
+        iniuncomment ${swift_node_config} app:container-server allow_versions
+        iniset ${swift_node_config} app:container-server allow_versions  "true"
+
+        swift_node_config=${SWIFT_CONFIG_DIR}/account-server/${node_number}.conf
+        cp ${SWIFT_DIR}/etc/account-server.conf-sample ${swift_node_config}
+        generate_swift_config ${swift_node_config} ${node_number} $[ACCOUNT_PORT_BASE + 10 * (node_number - 1)]
+    done
+
+    swift_log_dir=${SWIFT_DATA_DIR}/logs
+    rm -rf ${swift_log_dir}
+    mkdir -p ${swift_log_dir}/hourly
+    sudo chown -R $USER:adm ${swift_log_dir}
+    sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \
+        tee /etc/rsyslog.d/10-swift.conf
+
+}
+
+# configure_swiftclient() - Set config files, create data dirs, etc
+function configure_swiftclient() {
+    setup_develop $SWIFTCLIENT_DIR
+}
+
+# init_swift() - Initialize rings
+function init_swift() {
+    local node_number
+    # Make sure to kill all swift processes first
+    swift-init all stop || true
+
+    # This is where we create three different rings for swift with
+    # different object servers binding on different ports.
+    pushd ${SWIFT_CONFIG_DIR} >/dev/null && {
+
+        rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz
+
+        swift-ring-builder object.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1
+        swift-ring-builder container.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1
+        swift-ring-builder account.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1
+
+        for node_number in ${SWIFT_REPLICAS_SEQ}; do
+            swift-ring-builder object.builder add z${node_number}-127.0.0.1:$[OBJECT_PORT_BASE + 10 * (node_number - 1)]/sdb1 1
+            swift-ring-builder container.builder add z${node_number}-127.0.0.1:$[CONTAINER_PORT_BASE + 10 * (node_number - 1)]/sdb1 1
+            swift-ring-builder account.builder add z${node_number}-127.0.0.1:$[ACCOUNT_PORT_BASE + 10 * (node_number - 1)]/sdb1 1
+        done
+        swift-ring-builder object.builder rebalance
+        swift-ring-builder container.builder rebalance
+        swift-ring-builder account.builder rebalance
+    } && popd >/dev/null
+
+}
+
+function install_swift() {
+    git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH
+}
+
+function install_swiftclient() {
+    git_clone $SWIFTCLIENT_REPO $SWIFTCLIENT_DIR $SWIFTCLIENT_BRANCH
+}
+
+
+# start_swift() - Start running processes, including screen
+function start_swift() {
+    # (re)start rsyslog
+    restart_service rsyslog
+    # Start rsync
+    if is_ubuntu; then
+        sudo /etc/init.d/rsync restart || :
+    else
+        sudo systemctl start xinetd.service
+    fi
+
+   # First spawn all the swift services then kill the
+   # proxy service so we can run it in foreground in screen.
+   # ``swift-init ... {stop|restart}`` exits with '1' if no servers are running,
+   # ignore it just in case
+   swift-init all restart || true
+   swift-init proxy stop || true
+   screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_DIR}/proxy-server.conf -v"
+}
+
+# stop_swift() - Stop running processes (non-screen)
+function stop_swift() {
+    # screen normally killed by unstack.sh
+    swift-init all stop || true
+}
+
+# Restore xtrace
+$XTRACE
diff --git a/lib/tempest b/lib/tempest
index 871e9e7..4bfdc50 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -52,7 +52,7 @@
 
     # Tempest doesn't satisfy its dependencies on its own, so
     # install them here instead.
-    sudo pip install -r $TEMPEST_DIR/tools/pip-requires
+    pip_install -r $TEMPEST_DIR/tools/pip-requires
 }
 
 
diff --git a/openrc b/openrc
index 4b6b9b2..08ef98b 100644
--- a/openrc
+++ b/openrc
@@ -72,6 +72,3 @@
 # set log level to DEBUG (helps debug issues)
 # export KEYSTONECLIENT_DEBUG=1
 # export NOVACLIENT_DEBUG=1
-
-# set quantum debug command
-export QUANTUM_TEST_CONFIG_FILE=${QUANTUM_TEST_CONFIG_FILE:-"/etc/quantum/debug.ini"}
diff --git a/stack.sh b/stack.sh
index 5c5ad2a..b38c579 100755
--- a/stack.sh
+++ b/stack.sh
@@ -105,7 +105,7 @@
 
 # Warn users who aren't on an explicitly supported distro, but allow them to
 # override check and attempt installation with ``FORCE=yes ./stack``
-if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17) ]]; then
+if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17|f18) ]]; then
     echo "WARNING: this script has not been tested on $DISTRO"
     if [[ "$FORCE" != "yes" ]]; then
         echo "If you wish to run this script anyway run with FORCE=yes"
@@ -196,8 +196,8 @@
         > /etc/sudoers.d/50_stack_sh )
 
     echo "Copying files to stack user"
-    STACK_DIR="$DEST/${PWD##*/}"
-    cp -r -f -T "$PWD" "$STACK_DIR"
+    STACK_DIR="$DEST/${TOP_DIR##*/}"
+    cp -r -f -T "$TOP_DIR" "$STACK_DIR"
     chown -R stack "$STACK_DIR"
     if [[ "$SHELL_AFTER_RUN" != "no" ]]; then
         exec su -c "set -e; cd $STACK_DIR; bash stack.sh; bash" stack
@@ -310,6 +310,7 @@
 source $TOP_DIR/lib/glance
 source $TOP_DIR/lib/nova
 source $TOP_DIR/lib/cinder
+source $TOP_DIR/lib/swift
 source $TOP_DIR/lib/ceilometer
 source $TOP_DIR/lib/heat
 source $TOP_DIR/lib/quantum
@@ -319,10 +320,7 @@
 HORIZON_DIR=$DEST/horizon
 OPENSTACKCLIENT_DIR=$DEST/python-openstackclient
 NOVNC_DIR=$DEST/noVNC
-SWIFT_DIR=$DEST/swift
 SWIFT3_DIR=$DEST/swift3
-SWIFTCLIENT_DIR=$DEST/python-swiftclient
-QUANTUM_DIR=$DEST/quantum
 QUANTUM_CLIENT_DIR=$DEST/python-quantumclient
 
 # Default Quantum Plugin
@@ -503,41 +501,6 @@
     read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT."
 fi
 
-
-# Swift
-# -----
-
-# TODO: add logging to different location.
-
-# Set ``SWIFT_DATA_DIR`` to the location of swift drives and objects.
-# Default is the common DevStack data directory.
-SWIFT_DATA_DIR=${SWIFT_DATA_DIR:-${DATA_DIR}/swift}
-
-# Set ``SWIFT_CONFIG_DIR`` to the location of the configuration files.
-# Default is ``/etc/swift``.
-SWIFT_CONFIG_DIR=${SWIFT_CONFIG_DIR:-/etc/swift}
-
-# DevStack will create a loop-back disk formatted as XFS to store the
-# swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in bytes.
-# Default is 1 gigabyte.
-SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000}
-
-# The ring uses a configurable number of bits from a path’s MD5 hash as
-# a partition index that designates a device. The number of bits kept
-# from the hash is known as the partition power, and 2 to the partition
-# power indicates the partition count. Partitioning the full MD5 hash
-# ring allows other parts of the cluster to work in batches of items at
-# once which ends up either more efficient or at least less complex than
-# working with each item separately or the entire cluster all at once.
-# By default we define 9 for the partition count (which mean 512).
-SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9}
-
-# Set ``SWIFT_REPLICAS`` to configure how many replicas are to be
-# configured for your Swift cluster.  By default the three replicas would need a
-# bit of IO and Memory on a VM you may want to lower that to 1 if you want to do
-# only some quick testing.
-SWIFT_REPLICAS=${SWIFT_REPLICAS:-3}
-
 if is_service_enabled swift; then
     # If we are using swift3, we can default the s3 port to swift instead
     # of nova-objectstore
@@ -713,14 +676,20 @@
 
 # Install package requirements
 echo_summary "Installing package prerequisites"
-if [[ "$os_PACKAGE" = "deb" ]]; then
+if is_ubuntu; then
     install_package $(get_packages $FILES/apts)
+elif is_suse; then
+    install_package $(get_packages $FILES/rpms-suse)
 else
     install_package $(get_packages $FILES/rpms)
 fi
 
 if [[ $SYSLOG != "False" ]]; then
-    install_package rsyslog-relp
+    if is_suse; then
+        install_package rsyslog-module-relp
+    else
+        install_package rsyslog-relp
+    fi
 fi
 
 if is_service_enabled rabbit; then
@@ -738,7 +707,11 @@
     fi
 elif is_service_enabled zeromq; then
     if [[ "$os_PACKAGE" = "rpm" ]]; then
-        install_package zeromq python-zmq
+        if is_suse; then
+            install_package libzmq1 python-pyzmq
+        else
+            install_package zeromq python-zmq
+        fi
     else
         install_package libzmq1 python-zmq
     fi
@@ -752,7 +725,7 @@
     if is_quantum_ovs_base_plugin "$Q_PLUGIN"; then
         # Install deps
         # FIXME add to ``files/apts/quantum``, but don't install if not needed!
-        if [[ "$os_PACKAGE" = "deb" ]]; then
+        if is_ubuntu; then
             kernel_version=`cat /proc/version | cut -d " " -f3`
             install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version
         else
@@ -789,7 +762,6 @@
 install_keystoneclient
 install_glanceclient
 install_novaclient
-
 # Check out the client libs that are used most
 git_clone $OPENSTACKCLIENT_REPO $OPENSTACKCLIENT_DIR $OPENSTACKCLIENT_BRANCH
 
@@ -798,16 +770,16 @@
     # unified auth system (manages accounts/tokens)
     install_keystone
 fi
+
 if is_service_enabled swift; then
-    # storage service
-    git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH
-    # storage service client and and Library
-    git_clone $SWIFTCLIENT_REPO $SWIFTCLIENT_DIR $SWIFTCLIENT_BRANCH
+    install_swiftclient
+    install_swift
     if is_service_enabled swift3; then
         # swift3 middleware to provide S3 emulation to Swift
         git_clone $SWIFT3_REPO $SWIFT3_DIR $SWIFT3_BRANCH
     fi
 fi
+
 if is_service_enabled g-api n-api; then
     # image catalog service
     install_glance
@@ -863,11 +835,11 @@
     configure_keystone
 fi
 if is_service_enabled swift; then
-    setup_develop $SWIFT_DIR
-    setup_develop $SWIFTCLIENT_DIR
-fi
-if is_service_enabled swift3; then
-    setup_develop $SWIFT3_DIR
+    configure_swift
+    configure_swiftclient
+    if is_service_enabled swift3; then
+        setup_develop $SWIFT3_DIR
+    fi
 fi
 if is_service_enabled g-api n-api; then
     configure_glance
@@ -986,15 +958,16 @@
     configure_keystone
     init_keystone
     start_keystone
-    echo "Waiting for keystone to start..."
-    if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/ >/dev/null; do sleep 1; done"; then
-      echo "keystone did not start"
-      exit 1
-    fi
 
-    # ``keystone_data.sh`` creates services, admin and demo users, and roles.
+    # Set up a temporary admin URI for Keystone
     SERVICE_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0
 
+    # Do the keystone-specific bits from keystone_data.sh
+    export OS_SERVICE_TOKEN=$SERVICE_TOKEN
+    export OS_SERVICE_ENDPOINT=$SERVICE_ENDPOINT
+    create_keystone_accounts
+
+    # ``keystone_data.sh`` creates services, admin and demo users, and roles.
     ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \
     SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \
     S3_SERVICE_PORT=$S3_SERVICE_PORT KEYSTONE_CATALOG_BACKEND=$KEYSTONE_CATALOG_BACKEND \
@@ -1007,6 +980,7 @@
     export OS_TENANT_NAME=admin
     export OS_USERNAME=admin
     export OS_PASSWORD=$ADMIN_PASSWORD
+    unset OS_SERVICE_TOKEN OS_SERVICE_ENDPOINT
 fi
 
 
@@ -1174,14 +1148,7 @@
     iniset /$Q_PLUGIN_CONF_FILE DATABASE sql_connection $dburl
     unset dburl
 
-    Q_CONF_FILE=/etc/quantum/quantum.conf
     cp $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE
-    Q_RR_CONF_FILE=/etc/quantum/rootwrap.conf
-    if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then
-        Q_RR_COMMAND="sudo"
-    else
-        Q_RR_COMMAND="sudo $QUANTUM_DIR/bin/quantum-rootwrap $Q_RR_CONF_FILE"
-    fi
     cp -p $QUANTUM_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE
 
     # Copy over the config and filter bits
@@ -1364,9 +1331,10 @@
     # Set debug
     iniset $Q_L3_CONF_FILE DEFAULT debug True
 
-    iniset $Q_L3_CONF_FILE DEFAULT metadata_ip $Q_META_DATA_IP
     iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
 
+    iniset $Q_L3_CONF_FILE DEFAULT state_path $DATA_DIR/quantum
+
     iniset $Q_L3_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND"
 
     quantum_setup_keystone $Q_L3_CONF_FILE DEFAULT set_auth_url
@@ -1387,6 +1355,27 @@
     fi
 fi
 
+#Quantum Metadata
+if is_service_enabled q-meta; then
+    AGENT_META_BINARY="$QUANTUM_DIR/bin/quantum-metadata-agent"
+    Q_META_CONF_FILE=/etc/quantum/metadata_agent.ini
+
+    cp $QUANTUM_DIR/etc/metadata_agent.ini $Q_META_CONF_FILE
+
+    # Set verbose
+    iniset $Q_META_CONF_FILE DEFAULT verbose True
+    # Set debug
+    iniset $Q_META_CONF_FILE DEFAULT debug True
+
+    iniset $Q_META_CONF_FILE DEFAULT state_path $DATA_DIR/quantum
+
+    iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP
+
+    iniset $Q_META_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND"
+
+    quantum_setup_keystone $Q_META_CONF_FILE DEFAULT set_auth_url
+fi
+
 # Quantum RPC support - must be updated prior to starting any of the services
 if is_service_enabled quantum; then
     iniset $Q_CONF_FILE DEFAULT control_exchange quantum
@@ -1399,13 +1388,22 @@
         iniset $Q_CONF_FILE DEFAULT rabbit_password $RABBIT_PASSWORD
     fi
     if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then
-        Q_DEBUG_CONF_FILE=/etc/quantum/debug.ini
-        cp $QUANTUM_DIR/etc/l3_agent.ini $Q_DEBUG_CONF_FILE
-        iniset $Q_L3_CONF_FILE DEFAULT verbose False
-        iniset $Q_L3_CONF_FILE DEFAULT debug False
-        iniset $Q_L3_CONF_FILE DEFAULT metadata_ip $Q_META_DATA_IP
-        iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
-        iniset $Q_L3_CONF_FILE DEFAULT root_helper "sudo"
+        cp $QUANTUM_DIR/etc/l3_agent.ini $QUANTUM_TEST_CONFIG_FILE
+        iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT verbose False
+        iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT debug False
+        iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
+        quantum_setup_keystone $QUANTUM_TEST_CONFIG_FILE DEFAULT set_auth_url
+        if [[ "$Q_PLUGIN" == "openvswitch" ]]; then
+            iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT interface_driver quantum.agent.linux.interface.OVSInterfaceDriver
+            iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE
+        elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
+            iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT interface_driver quantum.agent.linux.interface.BridgeInterfaceDriver
+            iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge ''
+        elif [[ "$Q_PLUGIN" = "ryu" ]]; then
+            iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT interface_driver quantum.agent.linux.interface.RyuInterfaceDriver
+            iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE
+            iniset $QUANTUM_TEST_CONFIG_FILE DEFAULT ryu_api_host $RYU_API_HOST:$RYU_API_PORT
+        fi
     fi
 fi
 
@@ -1435,253 +1433,7 @@
 
 if is_service_enabled swift; then
     echo_summary "Configuring Swift"
-
-    # Make sure to kill all swift processes first
-    swift-init all stop || true
-
-    # First do a bit of setup by creating the directories and
-    # changing the permissions so we can run it as our user.
-
-    USER_GROUP=$(id -g)
-    sudo mkdir -p ${SWIFT_DATA_DIR}/drives
-    sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_DIR}
-
-    # Create a loopback disk and format it to XFS.
-    if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then
-        if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
-            sudo umount ${SWIFT_DATA_DIR}/drives/sdb1
-        fi
-    else
-        mkdir -p  ${SWIFT_DATA_DIR}/drives/images
-        sudo touch  ${SWIFT_DATA_DIR}/drives/images/swift.img
-        sudo chown $USER: ${SWIFT_DATA_DIR}/drives/images/swift.img
-
-        dd if=/dev/zero of=${SWIFT_DATA_DIR}/drives/images/swift.img \
-            bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE}
-    fi
-
-    # Make a fresh XFS filesystem
-    mkfs.xfs -f -i size=1024  ${SWIFT_DATA_DIR}/drives/images/swift.img
-
-    # Mount the disk with mount options to make it as efficient as possible
-    mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1
-    if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
-        sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8  \
-            ${SWIFT_DATA_DIR}/drives/images/swift.img ${SWIFT_DATA_DIR}/drives/sdb1
-    fi
-
-    # Create a link to the above mount
-    for x in $(seq ${SWIFT_REPLICAS}); do
-        sudo ln -sf ${SWIFT_DATA_DIR}/drives/sdb1/$x ${SWIFT_DATA_DIR}/$x; done
-
-    # Create all of the directories needed to emulate a few different servers
-    for x in $(seq ${SWIFT_REPLICAS}); do
-            drive=${SWIFT_DATA_DIR}/drives/sdb1/${x}
-            node=${SWIFT_DATA_DIR}/${x}/node
-            node_device=${node}/sdb1
-            [[ -d $node ]] && continue
-            [[ -d $drive ]] && continue
-            sudo install -o ${USER} -g $USER_GROUP -d $drive
-            sudo install -o ${USER} -g $USER_GROUP -d $node_device
-            sudo chown -R $USER: ${node}
-    done
-
-   sudo mkdir -p ${SWIFT_CONFIG_DIR}/{object,container,account}-server /var/run/swift
-   sudo chown -R $USER: ${SWIFT_CONFIG_DIR} /var/run/swift
-
-    if [[ "$SWIFT_CONFIG_DIR" != "/etc/swift" ]]; then
-        # Some swift tools are hard-coded to use ``/etc/swift`` and are apparently not going to be fixed.
-        # Create a symlink if the config dir is moved
-        sudo ln -sf ${SWIFT_CONFIG_DIR} /etc/swift
-    fi
-
-    # Swift use rsync to synchronize between all the different
-    # partitions (which make more sense when you have a multi-node
-    # setup) we configure it with our version of rsync.
-    sed -e "
-        s/%GROUP%/${USER_GROUP}/;
-        s/%USER%/$USER/;
-        s,%SWIFT_DATA_DIR%,$SWIFT_DATA_DIR,;
-    " $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf
-    if [[ "$os_PACKAGE" = "deb" ]]; then
-        sudo sed -i '/^RSYNC_ENABLE=false/ { s/false/true/ }' /etc/default/rsync
-    else
-        sudo sed -i '/disable *= *yes/ { s/yes/no/ }' /etc/xinetd.d/rsync
-    fi
-
-    if is_service_enabled swift3;then
-        swift_auth_server="s3token "
-    fi
-
-    # By default Swift will be installed with the tempauth middleware
-    # which has some default username and password if you have
-    # configured keystone it will checkout the directory.
-    if is_service_enabled key; then
-        swift_auth_server+="authtoken keystoneauth"
-    else
-        swift_auth_server=tempauth
-    fi
-
-    SWIFT_CONFIG_PROXY_SERVER=${SWIFT_CONFIG_DIR}/proxy-server.conf
-    cp ${SWIFT_DIR}/etc/proxy-server.conf-sample ${SWIFT_CONFIG_PROXY_SERVER}
-
-    iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user
-    iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user ${USER}
-
-    iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir
-    iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir ${SWIFT_CONFIG_DIR}
-
-    iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers
-    iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers 1
-
-    iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level
-    iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level DEBUG
-
-    iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port
-    iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080}
-
-    # Only enable Swift3 if we have it enabled in ENABLED_SERVICES
-    is_service_enabled swift3 && swift3=swift3 || swift3=""
-
-    iniset ${SWIFT_CONFIG_PROXY_SERVER} pipeline:main pipeline "catch_errors healthcheck cache ratelimit ${swift3} ${swift_auth_server} proxy-logging proxy-server"
-
-    iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true
-
-    # Configure Keystone
-    sed -i '/^# \[filter:authtoken\]/,/^# \[filter:keystoneauth\]$/ s/^#[ \t]*//' ${SWIFT_CONFIG_PROXY_SERVER}
-    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_host $KEYSTONE_AUTH_HOST
-    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_port $KEYSTONE_AUTH_PORT
-    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
-    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
-    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME
-    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_user swift
-    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_password $SERVICE_PASSWORD
-
-    iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth use
-    iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles
-    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles "Member, admin"
-
-    if is_service_enabled swift3; then
-        cat <<EOF >>${SWIFT_CONFIG_PROXY_SERVER}
-# NOTE(chmou): s3token middleware is not updated yet to use only
-# username and password.
-[filter:s3token]
-paste.filter_factory = keystone.middleware.s3_token:filter_factory
-auth_port = ${KEYSTONE_AUTH_PORT}
-auth_host = ${KEYSTONE_AUTH_HOST}
-auth_protocol = ${KEYSTONE_AUTH_PROTOCOL}
-auth_token = ${SERVICE_TOKEN}
-admin_token = ${SERVICE_TOKEN}
-
-[filter:swift3]
-use = egg:swift3#swift3
-EOF
-    fi
-
-    cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONFIG_DIR}/swift.conf
-    iniset ${SWIFT_CONFIG_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH}
-
-    # This function generates an object/account/proxy configuration
-    # emulating 4 nodes on different ports
-    function generate_swift_configuration() {
-        local server_type=$1
-        local bind_port=$2
-        local log_facility=$3
-        local node_number
-        local swift_node_config
-
-        for node_number in $(seq ${SWIFT_REPLICAS}); do
-            node_path=${SWIFT_DATA_DIR}/${node_number}
-            swift_node_config=${SWIFT_CONFIG_DIR}/${server_type}-server/${node_number}.conf
-
-            cp ${SWIFT_DIR}/etc/${server_type}-server.conf-sample ${swift_node_config}
-
-            iniuncomment ${swift_node_config} DEFAULT user
-            iniset ${swift_node_config} DEFAULT user ${USER}
-
-            iniuncomment ${swift_node_config} DEFAULT bind_port
-            iniset ${swift_node_config} DEFAULT bind_port ${bind_port}
-
-            iniuncomment ${swift_node_config} DEFAULT swift_dir
-            iniset ${swift_node_config} DEFAULT swift_dir ${SWIFT_CONFIG_DIR}
-
-            iniuncomment ${swift_node_config} DEFAULT devices
-            iniset ${swift_node_config} DEFAULT devices ${node_path}
-
-            iniuncomment ${swift_node_config} DEFAULT log_facility
-            iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility}
-
-            iniuncomment ${swift_node_config} DEFAULT mount_check
-            iniset ${swift_node_config} DEFAULT mount_check false
-
-            iniuncomment ${swift_node_config} ${server_type}-replicator vm_test_mode
-            iniset ${swift_node_config} ${server_type}-replicator vm_test_mode yes
-
-            bind_port=$(( ${bind_port} + 10 ))
-            log_facility=$(( ${log_facility} + 1 ))
-        done
-    }
-    generate_swift_configuration object 6010 2
-    generate_swift_configuration container 6011 2
-    generate_swift_configuration account 6012 2
-
-    # Specific configuration for swift for rsyslog. See
-    # ``/etc/rsyslog.d/10-swift.conf`` for more info.
-    swift_log_dir=${SWIFT_DATA_DIR}/logs
-    rm -rf ${swift_log_dir}
-    mkdir -p ${swift_log_dir}/hourly
-    sudo chown -R $USER:adm ${swift_log_dir}
-    sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \
-        tee /etc/rsyslog.d/10-swift.conf
-    restart_service rsyslog
-
-    # This is where we create three different rings for swift with
-    # different object servers binding on different ports.
-    pushd ${SWIFT_CONFIG_DIR} >/dev/null && {
-
-        rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz
-
-        port_number=6010
-        swift-ring-builder object.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1
-        for x in $(seq ${SWIFT_REPLICAS}); do
-            swift-ring-builder object.builder add z${x}-127.0.0.1:${port_number}/sdb1 1
-            port_number=$[port_number + 10]
-        done
-        swift-ring-builder object.builder rebalance
-
-        port_number=6011
-        swift-ring-builder container.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1
-        for x in $(seq ${SWIFT_REPLICAS}); do
-            swift-ring-builder container.builder add z${x}-127.0.0.1:${port_number}/sdb1 1
-            port_number=$[port_number + 10]
-        done
-        swift-ring-builder container.builder rebalance
-
-        port_number=6012
-        swift-ring-builder account.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1
-        for x in $(seq ${SWIFT_REPLICAS}); do
-            swift-ring-builder account.builder add z${x}-127.0.0.1:${port_number}/sdb1 1
-            port_number=$[port_number + 10]
-        done
-        swift-ring-builder account.builder rebalance
-
-    } && popd >/dev/null
-
-   # Start rsync
-    if [[ "$os_PACKAGE" = "deb" ]]; then
-        sudo /etc/init.d/rsync restart || :
-    else
-        sudo systemctl start xinetd.service
-    fi
-
-   # First spawn all the swift services then kill the
-   # proxy service so we can run it in foreground in screen.
-   # ``swift-init ... {stop|restart}`` exits with '1' if no servers are running,
-   # ignore it just in case
-   swift-init all restart || true
-   swift-init proxy stop || true
-
-   unset s swift_hash swift_auth_server
+    init_swift
 fi
 
 
@@ -1721,6 +1473,9 @@
         fi
         add_nova_opt "libvirt_vif_driver=$NOVA_VIF_DRIVER"
         add_nova_opt "linuxnet_interface_driver=$LINUXNET_VIF_DRIVER"
+        if is_service_enabled q-meta; then
+            add_nova_opt "service_quantum_metadata_proxy=True"
+        fi
     elif is_service_enabled n-net; then
         add_nova_opt "network_manager=nova.network.manager.$NET_MAN"
         add_nova_opt "public_interface=$PUBLIC_INTERFACE"
@@ -1798,6 +1553,12 @@
 
 # Only run the services specified in ``ENABLED_SERVICES``
 
+# Launch Swift Services
+if is_service_enabled swift; then
+    echo_summary "Starting Swift"
+    start_swift
+fi
+
 # Launch the Glance services
 if is_service_enabled g-api g-reg; then
     echo_summary "Starting Glance"
@@ -1816,7 +1577,7 @@
     add_nova_opt "s3_affix_tenant=True"
 fi
 
-screen_it zeromq "cd $NOVA_DIR && $NOVA_DIR/bin/nova-rpc-zmq-receiver"
+screen_it zeromq "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-rpc-zmq-receiver"
 
 # Launch the nova-api and wait for it to answer before continuing
 if is_service_enabled n-api; then
@@ -1869,7 +1630,9 @@
             iniset $Q_L3_CONF_FILE DEFAULT router_id $ROUTER_ID
         fi
    fi
-
+   if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then
+      setup_quantum
+   fi
 elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then
     # Create a small network
     $NOVA_BIN_DIR/nova-manage network create "$PRIVATE_NETWORK_NAME" $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS
@@ -1884,6 +1647,7 @@
 # Start up the quantum agents if enabled
 screen_it q-agt "python $AGENT_BINARY --config-file $Q_CONF_FILE --config-file /$Q_PLUGIN_CONF_FILE"
 screen_it q-dhcp "python $AGENT_DHCP_BINARY --config-file $Q_CONF_FILE --config-file=$Q_DHCP_CONF_FILE"
+screen_it q-meta "python $AGENT_META_BINARY --config-file $Q_CONF_FILE --config-file=$Q_META_CONF_FILE"
 screen_it q-l3 "python $AGENT_L3_BINARY --config-file $Q_CONF_FILE --config-file=$Q_L3_CONF_FILE"
 
 if is_service_enabled nova; then
@@ -1901,8 +1665,6 @@
     start_ceilometer
 fi
 
-screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_DIR}/proxy-server.conf -v"
-
 # Starting the nova-objectstore only if swift3 service is not enabled.
 # Swift will act as s3 objectstore.
 is_service_enabled swift3 || \
@@ -1999,7 +1761,7 @@
 
 # If Keystone is present you can point ``nova`` cli to this server
 if is_service_enabled key; then
-    echo "Keystone is serving at $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/"
+    echo "Keystone is serving at $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/"
     echo "Examples on using novaclient command line is in exercise.sh"
     echo "The default users are: admin and demo"
     echo "The password: $ADMIN_PASSWORD"
diff --git a/stackrc b/stackrc
index 9588cf9..39d34b0 100644
--- a/stackrc
+++ b/stackrc
@@ -97,7 +97,7 @@
 TEMPEST_BRANCH=master
 
 # heat service
-HEAT_REPO=${GIT_BASE}/heat-api/heat.git
+HEAT_REPO=${GIT_BASE}/openstack/heat.git
 HEAT_BRANCH=master
 
 # python heat client library
diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh
index 9b543ab..298fa9b 100755
--- a/tools/configure_tempest.sh
+++ b/tools/configure_tempest.sh
@@ -185,7 +185,7 @@
 # Whitebox testing configuration for Compute...
 COMPUTE_WHITEBOX_ENABLED=True
 COMPUTE_SOURCE_DIR=$NOVA_SOURCE_DIR
-COMPUTE_BIN_DIR=/usr/bin/nova
+COMPUTE_BIN_DIR=$NOVA_BIN_DIR
 COMPUTE_CONFIG_PATH=/etc/nova/nova.conf
 # TODO(jaypipes): Create the key file here... right now, no whitebox
 # tests actually use a key.
@@ -209,6 +209,9 @@
 LIVE_MIGRATION_AVAILABLE=${LIVE_MIGRATION_AVAILABLE:-False}
 USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION=${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False}
 
+# Object Storage
+OBJECT_CATALOG_TYPE="object-store"
+
 # EC2 and S3 test configuration
 BOTO_EC2_URL="http://$IDENTITY_HOST:8773/services/Cloud"
 BOTO_S3_URL="http://$IDENTITY_HOST:3333"
@@ -281,6 +284,7 @@
     s,%VOLUME_BUILD_TIMEOUT%,$VOLUME_BUILD_TIMEOUT,g;
     s,%LIVE_MIGRATION_AVAILABLE%,$LIVE_MIGRATION_AVAILABLE,g;
     s,%USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION%,$USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION,g;
+    s,%OBJECT_CATALOG_TYPE%,$OBJECT_CATALOG_TYPE,g;
     s,%BOTO_EC2_URL%,$BOTO_EC2_URL,g;
     s,%BOTO_S3_URL%,$BOTO_S3_URL,g;
     s,%BOTO_AWS_ACCESS%,$BOTO_AWS_ACCESS,g;
diff --git a/tools/get_uec_image.sh b/tools/get_uec_image.sh
index 156fd43..3c62064 100755
--- a/tools/get_uec_image.sh
+++ b/tools/get_uec_image.sh
@@ -65,7 +65,7 @@
 
 case $DIST_NAME in
     quantal)    ;;
-    percise)    ;;
+    precise)    ;;
     oneiric)    ;;
     natty)      ;;
     maverick)   ;;
diff --git a/tools/info.sh b/tools/info.sh
index 5c9a1d3..583a994 100755
--- a/tools/info.sh
+++ b/tools/info.sh
@@ -88,7 +88,7 @@
 # - We are going to check packages only for the services needed.
 # - We are parsing the packages files and detecting metadatas.
 
-if [[ "$os_PACKAGE" = "deb" ]]; then
+if is_ubuntu; then
     PKG_DIR=$FILES/apts
 else
     PKG_DIR=$FILES/rpms
@@ -107,11 +107,7 @@
 # Pips
 # ----
 
-if [[ "$os_PACKAGE" = "deb" ]]; then
-    CMD_PIP=/usr/bin/pip
-else
-    CMD_PIP=/usr/bin/pip-python
-fi
+CMD_PIP=$(get_pip_command)
 
 # Pip tells us what is currently installed
 FREEZE_FILE=$(mktemp --tmpdir freeze.XXXXXX)
diff --git a/unstack.sh b/unstack.sh
index 0040cf1..34195c2 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -27,6 +27,7 @@
 # Get project function libraries
 source $TOP_DIR/lib/cinder
 source $TOP_DIR/lib/horizon
+source $TOP_DIR/lib/swift
 
 # Determine what system we are running on.  This provides ``os_VENDOR``,
 # ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME``
@@ -36,6 +37,12 @@
     UNSTACK_ALL=${UNSTACK_ALL:-1}
 fi
 
+if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then
+    source $TOP_DIR/openrc
+    source $TOP_DIR/lib/quantum
+    teardown_quantum
+fi
+
 # Shut down devstack's screen to get the bulk of OpenStack services in one shot
 SCREEN=$(which screen)
 if [[ -n "$SCREEN" ]]; then
@@ -47,7 +54,7 @@
 
 # Swift runs daemons
 if is_service_enabled swift; then
-    swift-init all stop 2>/dev/null || true
+    stop_swift
 fi
 
 # Apache has the WSGI processes
@@ -64,7 +71,7 @@
         # If tgt driver isn't running this won't work obviously
         # So check the response and restart if need be
         echo "tgtd seems to be in a bad state, restarting..."
-        if [[ "$os_PACKAGE" = "deb" ]]; then
+        if is_ubuntu; then
             restart_service tgt
         else
             restart_service tgtd
@@ -84,7 +91,7 @@
         sudo rm -rf $CINDER_STATE_PATH/volumes/*
     fi
 
-    if [[ "$os_PACKAGE" = "deb" ]]; then
+    if is_ubuntu; then
         stop_service tgt
     else
         stop_service tgtd