Merge "fix syntax error in the if test"
diff --git a/.gitignore b/.gitignore
index c8d2560..83c5419 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,6 @@
 proto
 *~
+.*.sw[nop]
 *.log
 src
 localrc
diff --git a/AUTHORS b/AUTHORS
index f9aa9ea..b5f972f 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -5,6 +5,7 @@
 Armando Migliaccio <armando.migliaccio@eu.citrix.com>
 Brad Hall <brad@nicira.com>
 Chmouel Boudjnah <chmouel@chmouel.com>
+Dan Prince <dprince@redhat.com>
 Dean Troyer <dtroyer@gmail.com>
 Devin Carlen <devin.carlen@gmail.com>
 Eddie Hebert <edhebert@gmail.com>
@@ -26,6 +27,7 @@
 Kiall Mac Innes <kiall@managedit.ie>
 Russell Bryant <rbryant@redhat.com>
 Scott Moser <smoser@ubuntu.com>
+Thierry Carrez <thierry@openstack.org>
 Todd Willey <xtoddx@gmail.com>
 Tres Henry <tres@treshenry.net>
 Vishvananda Ishaya <vishvananda@gmail.com>
diff --git a/exerciserc b/exerciserc
index b41714d..82c74b7 100644
--- a/exerciserc
+++ b/exerciserc
@@ -20,3 +20,9 @@
 
 # Max time to wait for a vm to terminate
 export TERMINATE_TIMEOUT=${TERMINATE_TIMEOUT:-30}
+
+# Max time to wait for a euca-volume command to propogate
+export VOLUME_TIMEOUT=${VOLUME_TIMEOUT:-30}
+
+# Max time to wait for a euca-delete command to propogate
+export VOLUME_DELETE_TIMEOUT=${SNAPSHOT_DELETE_TIMEOUT:-60}
diff --git a/exercises/euca.sh b/exercises/euca.sh
index 76e5202..4a538c6 100755
--- a/exercises/euca.sh
+++ b/exercises/euca.sh
@@ -24,6 +24,9 @@
 # Keep track of the current directory
 EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
 TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
+VOLUME_ZONE=cinder
+VOLUME_SIZE=1
+ATTACH_DEVICE=/dev/vdc
 
 # Import common functions
 source $TOP_DIR/functions
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index 6749558..0f25355 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -33,6 +33,10 @@
 # Import exercise configuration
 source $TOP_DIR/exerciserc
 
+# If cinder or n-vol are not enabled we exit with exitcode 55 which mean
+# exercise is skipped.
+is_service_enabled cinder n-vol || exit 55
+
 # Instance type to create
 DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
 
diff --git a/files/apache-horizon.template b/files/apache-horizon.template
index e54f16c..fb98471 100644
--- a/files/apache-horizon.template
+++ b/files/apache-horizon.template
@@ -1,6 +1,7 @@
 <VirtualHost *:80>
     WSGIScriptAlias / %HORIZON_DIR%/openstack_dashboard/wsgi/django.wsgi
     WSGIDaemonProcess horizon user=%USER% group=%GROUP% processes=3 threads=10 home=%HORIZON_DIR%
+    WSGIApplicationGroup %{GLOBAL}
 
     SetEnv APACHE_RUN_USER %USER%
     SetEnv APACHE_RUN_GROUP %GROUP%
diff --git a/files/apts/cinder b/files/apts/cinder
new file mode 100644
index 0000000..5db06ea
--- /dev/null
+++ b/files/apts/cinder
@@ -0,0 +1,2 @@
+tgt
+lvm2
diff --git a/files/apts/horizon b/files/apts/horizon
index 9b1c9ee..53bddf0 100644
--- a/files/apts/horizon
+++ b/files/apts/horizon
@@ -1,5 +1,6 @@
 apache2  # NOPRIME
 libapache2-mod-wsgi  # NOPRIME
+python-beautifulsoup
 python-dateutil
 python-paste
 python-pastedeploy
@@ -18,8 +19,5 @@
 python-kombu
 python-coverage
 python-cherrypy3 # why?
-python-django-mailer
-python-django-nose
-python-django-registration
 python-migrate
 nodejs
diff --git a/files/keystone_data.sh b/files/keystone_data.sh
index 2cdc2fa..7886072 100755
--- a/files/keystone_data.sh
+++ b/files/keystone_data.sh
@@ -9,6 +9,7 @@
 # service              nova      admin, [ResellerAdmin (swift only)]
 # service              quantum   admin        # if enabled
 # service              swift     admin        # if enabled
+# service              cinder    admin        # if enabled
 # demo                 admin     admin
 # demo                 demo      Member, anotherrole
 # invisible_to_admin   demo      Member
@@ -19,8 +20,13 @@
 # SERVICE_TOKEN - aka admin_token in keystone.conf
 # SERVICE_ENDPOINT - local Keystone admin endpoint
 # SERVICE_TENANT_NAME - name of tenant containing service accounts
+# SERVICE_HOST - host used for endpoint creation
 # ENABLED_SERVICES - stack.sh's list of services to start
 # DEVSTACK_DIR - Top-level DevStack directory
+# KEYSTONE_CATALOG_BACKEND - used to determine service catalog creation
+
+# Defaults
+# --------
 
 ADMIN_PASSWORD=${ADMIN_PASSWORD:-secrete}
 SERVICE_PASSWORD=${SERVICE_PASSWORD:-$ADMIN_PASSWORD}
@@ -29,10 +35,13 @@
 SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service}
 
 function get_id () {
-    echo `$@ | awk '/ id / { print $4 }'`
+    echo `"$@" | awk '/ id / { print $4 }'`
 }
 
+
 # Tenants
+# -------
+
 ADMIN_TENANT=$(get_id keystone tenant-create --name=admin)
 SERVICE_TENANT=$(get_id keystone tenant-create --name=$SERVICE_TENANT_NAME)
 DEMO_TENANT=$(get_id keystone tenant-create --name=demo)
@@ -40,6 +49,8 @@
 
 
 # Users
+# -----
+
 ADMIN_USER=$(get_id keystone user-create --name=admin \
                                          --pass="$ADMIN_PASSWORD" \
                                          --email=admin@example.com)
@@ -49,6 +60,8 @@
 
 
 # Roles
+# -----
+
 ADMIN_ROLE=$(get_id keystone role-create --name=admin)
 KEYSTONEADMIN_ROLE=$(get_id keystone role-create --name=KeystoneAdmin)
 KEYSTONESERVICE_ROLE=$(get_id keystone role-create --name=KeystoneServiceAdmin)
@@ -73,58 +86,201 @@
 keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $INVIS_TENANT
 
 
-# Configure service users/roles
-NOVA_USER=$(get_id keystone user-create --name=nova \
-                                        --pass="$SERVICE_PASSWORD" \
-                                        --tenant_id $SERVICE_TENANT \
-                                        --email=nova@example.com)
-keystone user-role-add --tenant_id $SERVICE_TENANT \
-                       --user_id $NOVA_USER \
-                       --role_id $ADMIN_ROLE
+# Services
+# --------
 
-GLANCE_USER=$(get_id keystone user-create --name=glance \
-                                          --pass="$SERVICE_PASSWORD" \
-                                          --tenant_id $SERVICE_TENANT \
-                                          --email=glance@example.com)
-keystone user-role-add --tenant_id $SERVICE_TENANT \
-                       --user_id $GLANCE_USER \
-                       --role_id $ADMIN_ROLE
+# Keystone
+if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+	KEYSTONE_SERVICE=$(get_id keystone service-create \
+		--name=keystone \
+		--type=identity \
+		--description="Keystone Identity Service")
+	keystone endpoint-create \
+	    --region RegionOne \
+		--service_id $KEYSTONE_SERVICE \
+		--publicurl "http://$SERVICE_HOST:\$(public_port)s/v2.0" \
+		--adminurl "http://$SERVICE_HOST:\$(admin_port)s/v2.0" \
+		--internalurl "http://$SERVICE_HOST:\$(admin_port)s/v2.0"
+fi
 
-if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
-    SWIFT_USER=$(get_id keystone user-create --name=swift \
-                                             --pass="$SERVICE_PASSWORD" \
-                                             --tenant_id $SERVICE_TENANT \
-                                             --email=swift@example.com)
-    keystone user-role-add --tenant_id $SERVICE_TENANT \
-                           --user_id $SWIFT_USER \
-                           --role_id $ADMIN_ROLE
+# Nova
+if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then
+    NOVA_USER=$(get_id keystone user-create \
+        --name=nova \
+        --pass="$SERVICE_PASSWORD" \
+        --tenant_id $SERVICE_TENANT \
+        --email=nova@example.com)
+    keystone user-role-add \
+        --tenant_id $SERVICE_TENANT \
+        --user_id $NOVA_USER \
+        --role_id $ADMIN_ROLE
+    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+        NOVA_SERVICE=$(get_id keystone service-create \
+            --name=nova \
+            --type=compute \
+            --description="Nova Compute Service")
+        keystone endpoint-create \
+            --region RegionOne \
+            --service_id $NOVA_SERVICE \
+            --publicurl "http://$SERVICE_HOST:\$(compute_port)s/v1.1/\$(tenant_id)s" \
+            --adminurl "http://$SERVICE_HOST:\$(compute_port)s/v1.1/\$(tenant_id)s" \
+            --internalurl "http://$SERVICE_HOST:\$(compute_port)s/v1.1/\$(tenant_id)s"
+    fi
     # Nova needs ResellerAdmin role to download images when accessing
     # swift through the s3 api. The admin role in swift allows a user
     # to act as an admin for their tenant, but ResellerAdmin is needed
     # for a user to act as any tenant. The name of this role is also
     # configurable in swift-proxy.conf
     RESELLER_ROLE=$(get_id keystone role-create --name=ResellerAdmin)
-    keystone user-role-add --tenant_id $SERVICE_TENANT \
-                           --user_id $NOVA_USER \
-                           --role_id $RESELLER_ROLE
+    keystone user-role-add \
+        --tenant_id $SERVICE_TENANT \
+        --user_id $NOVA_USER \
+        --role_id $RESELLER_ROLE
 fi
 
-if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then
-    QUANTUM_USER=$(get_id keystone user-create --name=quantum \
-                                               --pass="$SERVICE_PASSWORD" \
-                                               --tenant_id $SERVICE_TENANT \
-                                               --email=quantum@example.com)
-    keystone user-role-add --tenant_id $SERVICE_TENANT \
-                           --user_id $QUANTUM_USER \
-                           --role_id $ADMIN_ROLE
+# Volume
+if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then
+    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+        VOLUME_SERVICE=$(get_id keystone service-create \
+            --name=volume \
+            --type=volume \
+            --description="Volume Service")
+        keystone endpoint-create \
+            --region RegionOne \
+            --service_id $VOLUME_SERVICE \
+            --publicurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" \
+            --adminurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" \
+            --internalurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s"
+    fi
+fi
+
+# Glance
+if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then
+    GLANCE_USER=$(get_id keystone user-create \
+        --name=glance \
+        --pass="$SERVICE_PASSWORD" \
+        --tenant_id $SERVICE_TENANT \
+        --email=glance@example.com)
+    keystone user-role-add \
+        --tenant_id $SERVICE_TENANT \
+        --user_id $GLANCE_USER \
+        --role_id $ADMIN_ROLE
+    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+        GLANCE_SERVICE=$(get_id keystone service-create \
+            --name=glance \
+            --type=image \
+            --description="Glance Image Service")
+        keystone endpoint-create \
+            --region RegionOne \
+            --service_id $GLANCE_SERVICE \
+            --publicurl "http://$SERVICE_HOST:9292/v1" \
+            --adminurl "http://$SERVICE_HOST:9292/v1" \
+            --internalurl "http://$SERVICE_HOST:9292/v1"
+    fi
+fi
+
+# Swift
+if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
+    SWIFT_USER=$(get_id keystone user-create \
+        --name=swift \
+        --pass="$SERVICE_PASSWORD" \
+        --tenant_id $SERVICE_TENANT \
+        --email=swift@example.com)
+    keystone user-role-add \
+        --tenant_id $SERVICE_TENANT \
+        --user_id $SWIFT_USER \
+        --role_id $ADMIN_ROLE
+    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+        SWIFT_SERVICE=$(get_id keystone service-create \
+            --name=swift \
+            --type="object-store" \
+            --description="Swift Service")
+        keystone endpoint-create \
+            --region RegionOne \
+            --service_id $SWIFT_SERVICE \
+            --publicurl "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" \
+            --adminurl "http://$SERVICE_HOST:8080/v1" \
+            --internalurl "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s"
+    fi
+fi
+
+if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then
+    QUANTUM_USER=$(get_id keystone user-create \
+        --name=quantum \
+        --pass="$SERVICE_PASSWORD" \
+        --tenant_id $SERVICE_TENANT \
+        --email=quantum@example.com)
+    keystone user-role-add \
+        --tenant_id $SERVICE_TENANT \
+        --user_id $QUANTUM_USER \
+        --role_id $ADMIN_ROLE
+    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+        QUANTUM_SERVICE=$(get_id keystone service-create \
+            --name=quantum \
+            --type=network \
+            --description="Quantum Service")
+        keystone endpoint-create \
+            --region RegionOne \
+            --service_id $QUANTUM_SERVICE \
+            --publicurl "http://$SERVICE_HOST:9696/" \
+            --adminurl "http://$SERVICE_HOST:9696/" \
+            --internalurl "http://$SERVICE_HOST:9696/"
+    fi
+fi
+
+# EC2
+if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
+    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+        EC2_SERVICE=$(get_id keystone service-create \
+            --name=ec2 \
+            --type=ec2 \
+            --description="EC2 Compatibility Layer")
+        keystone endpoint-create \
+            --region RegionOne \
+            --service_id $EC2_SERVICE \
+            --publicurl "http://$SERVICE_HOST:8773/services/Cloud" \
+            --adminurl "http://$SERVICE_HOST:8773/services/Admin" \
+            --internalurl "http://$SERVICE_HOST:8773/services/Cloud"
+    fi
+fi
+
+# S3
+if [[ "$ENABLED_SERVICES" =~ "n-obj" || "$ENABLED_SERVICES" =~ "swift" ]]; then
+    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+        S3_SERVICE=$(get_id keystone service-create \
+            --name=s3 \
+            --type=s3 \
+            --description="S3")
+        keystone endpoint-create \
+            --region RegionOne \
+            --service_id $S3_SERVICE \
+            --publicurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \
+            --adminurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \
+            --internalurl "http://$SERVICE_HOST:$S3_SERVICE_PORT"
+    fi
 fi
 
 if [[ "$ENABLED_SERVICES" =~ "tempest" ]]; then
     # Tempest has some tests that validate various authorization checks
     # between two regular users in separate tenants
-    ALT_DEMO_TENANT=$(get_id keystone tenant-create --name=alt_demo)
-    ALT_DEMO_USER=$(get_id keystone user-create --name=alt_demo \
-                                        --pass="$ADMIN_PASSWORD" \
-                                        --email=alt_demo@example.com)
-    keystone user-role-add --user $ALT_DEMO_USER --role $MEMBER_ROLE --tenant_id $ALT_DEMO_TENANT
+    ALT_DEMO_TENANT=$(get_id keystone tenant-create \
+        --name=alt_demo)
+    ALT_DEMO_USER=$(get_id keystone user-create \
+        --name=alt_demo \
+        --pass="$ADMIN_PASSWORD" \
+        --email=alt_demo@example.com)
+    keystone user-role-add \
+        --tenant_id $ALT_DEMO_TENANT \
+        --user_id $ALT_DEMO_USER \
+        --role_id $MEMBER_ROLE
+fi
+
+if [[ "$ENABLED_SERVICES" =~ "cinder" ]]; then
+    CINDER_USER=$(get_id keystone user-create --name=cinder \
+                                              --pass="$SERVICE_PASSWORD" \
+                                              --tenant_id $SERVICE_TENANT \
+                                              --email=cinder@example.com)
+    keystone user-role-add --tenant_id $SERVICE_TENANT \
+                           --user_id $CINDER_USER \
+                           --role_id $ADMIN_ROLE
 fi
diff --git a/files/pips/general b/files/pips/general
deleted file mode 100644
index deb2d14..0000000
--- a/files/pips/general
+++ /dev/null
@@ -1 +0,0 @@
-prettytable
diff --git a/files/pips/horizon b/files/pips/horizon
deleted file mode 100644
index 309a5fe..0000000
--- a/files/pips/horizon
+++ /dev/null
@@ -1,6 +0,0 @@
-django>=1.4
-django-mailer    # dist:f16
-django-nose      # dist:f16
-django-nose-selenium
-pycrypto==2.3
-python-cloudfiles
diff --git a/files/pips/keystone b/files/pips/keystone
deleted file mode 100644
index 09636e4..0000000
--- a/files/pips/keystone
+++ /dev/null
@@ -1 +0,0 @@
-PassLib
diff --git a/files/pips/tempest b/files/pips/tempest
deleted file mode 100644
index 6eeb5b9..0000000
--- a/files/pips/tempest
+++ /dev/null
@@ -1,2 +0,0 @@
-pika
-nosexunit  # For use by jenkins in producing reports
diff --git a/files/rpms/cinder b/files/rpms/cinder
new file mode 100644
index 0000000..df861aa
--- /dev/null
+++ b/files/rpms/cinder
@@ -0,0 +1,2 @@
+lvm2
+scsi-target-utils
diff --git a/files/rpms/glance b/files/rpms/glance
index e38f239..eff6c2c 100644
--- a/files/rpms/glance
+++ b/files/rpms/glance
@@ -1,5 +1,6 @@
 libxml2-devel
 python-argparse
+python-devel
 python-eventlet
 python-greenlet
 python-paste-deploy
diff --git a/files/rpms/horizon b/files/rpms/horizon
index 3c5fbc1..5e36820 100644
--- a/files/rpms/horizon
+++ b/files/rpms/horizon
@@ -5,6 +5,7 @@
 mod_wsgi  # NOPRIME
 pylint
 python-anyjson
+python-BeautifulSoup
 python-boto
 python-coverage
 python-dateutil
diff --git a/files/swift/account-server.conf b/files/swift/account-server.conf
deleted file mode 100644
index 763c306..0000000
--- a/files/swift/account-server.conf
+++ /dev/null
@@ -1,20 +0,0 @@
-[DEFAULT]
-devices = %NODE_PATH%/node
-mount_check = false
-bind_port = %BIND_PORT%
-user = %USER%
-log_facility = LOG_LOCAL%LOG_FACILITY%
-swift_dir = %SWIFT_CONFIG_DIR%
-
-[pipeline:main]
-pipeline = account-server
-
-[app:account-server]
-use = egg:swift#account
-
-[account-replicator]
-vm_test_mode = yes
-
-[account-auditor]
-
-[account-reaper]
diff --git a/files/swift/container-server.conf b/files/swift/container-server.conf
deleted file mode 100644
index 106dcab..0000000
--- a/files/swift/container-server.conf
+++ /dev/null
@@ -1,22 +0,0 @@
-[DEFAULT]
-devices = %NODE_PATH%/node
-mount_check = false
-bind_port = %BIND_PORT%
-user = %USER%
-log_facility = LOG_LOCAL%LOG_FACILITY%
-swift_dir = %SWIFT_CONFIG_DIR%
-
-[pipeline:main]
-pipeline = container-server
-
-[app:container-server]
-use = egg:swift#container
-
-[container-replicator]
-vm_test_mode = yes
-
-[container-updater]
-
-[container-auditor]
-
-[container-sync]
diff --git a/files/swift/object-server.conf b/files/swift/object-server.conf
deleted file mode 100644
index 7eea67d..0000000
--- a/files/swift/object-server.conf
+++ /dev/null
@@ -1,22 +0,0 @@
-[DEFAULT]
-devices = %NODE_PATH%/node
-mount_check = false
-bind_port = %BIND_PORT%
-user = %USER%
-log_facility = LOG_LOCAL%LOG_FACILITY%
-swift_dir = %SWIFT_CONFIG_DIR%
-
-[pipeline:main]
-pipeline = object-server
-
-[app:object-server]
-use = egg:swift#object
-
-[object-replicator]
-vm_test_mode = yes
-
-[object-updater]
-
-[object-auditor]
-
-[object-expirer]
diff --git a/files/swift/proxy-server.conf b/files/swift/proxy-server.conf
deleted file mode 100644
index 84bf9cd..0000000
--- a/files/swift/proxy-server.conf
+++ /dev/null
@@ -1,57 +0,0 @@
-[DEFAULT]
-bind_port = 8080
-user = %USER%
-swift_dir = %SWIFT_CONFIG_DIR%
-workers = 1
-log_name = swift
-log_facility = LOG_LOCAL1
-log_level = DEBUG
-
-[pipeline:main]
-pipeline = healthcheck cache swift3 %AUTH_SERVER% proxy-server
-
-[app:proxy-server]
-use = egg:swift#proxy
-allow_account_management = true
-account_autocreate = true
-
-[filter:keystone]
-paste.filter_factory = keystone.middleware.swift_auth:filter_factory
-operator_roles = Member,admin
-
-# NOTE(chmou): s3token middleware is not updated yet to use only
-# username and password.
-[filter:s3token]
-paste.filter_factory = keystone.middleware.s3_token:filter_factory
-auth_port = %KEYSTONE_AUTH_PORT%
-auth_host = %KEYSTONE_AUTH_HOST%
-auth_protocol = %KEYSTONE_AUTH_PROTOCOL%
-auth_token = %SERVICE_TOKEN%
-admin_token = %SERVICE_TOKEN%
-
-[filter:authtoken]
-paste.filter_factory = keystone.middleware.auth_token:filter_factory
-auth_host = %KEYSTONE_AUTH_HOST%
-auth_port = %KEYSTONE_AUTH_PORT%
-auth_protocol = %KEYSTONE_AUTH_PROTOCOL%
-auth_uri = %KEYSTONE_SERVICE_PROTOCOL%://%KEYSTONE_SERVICE_HOST%:%KEYSTONE_SERVICE_PORT%/
-admin_tenant_name = %SERVICE_TENANT_NAME%
-admin_user = %SERVICE_USERNAME%
-admin_password = %SERVICE_PASSWORD%
-
-[filter:swift3]
-use = egg:swift3#middleware
-
-[filter:tempauth]
-use = egg:swift#tempauth
-user_admin_admin = admin .admin .reseller_admin
-user_test_tester = testing .admin
-user_test2_tester2 = testing2 .admin
-user_test_tester3 = testing3
-bind_ip = 0.0.0.0
-
-[filter:healthcheck]
-use = egg:swift#healthcheck
-
-[filter:cache]
-use = egg:swift#memcache
diff --git a/files/swift/swift.conf b/files/swift/swift.conf
deleted file mode 100644
index 98df466..0000000
--- a/files/swift/swift.conf
+++ /dev/null
@@ -1,3 +0,0 @@
-[swift-hash]
-# random unique string that can never change (DO NOT LOSE)
-swift_hash_path_suffix = %SWIFT_HASH%
diff --git a/functions b/functions
index 7072fdd..a3e9537 100644
--- a/functions
+++ b/functions
@@ -142,6 +142,8 @@
 # be owned by the installation user, we create the directory and change the
 # ownership to the proper user.
 # Set global RECLONE=yes to simulate a clone when dest-dir exists
+# Set global ERROR_ON_CLONE=True to abort execution with an error if the git repo
+# does not exist (default is False, meaning the repo will be cloned).
 # git_clone remote dest-dir branch
 function git_clone {
     [[ "$OFFLINE" = "True" ]] && return
@@ -153,6 +155,7 @@
     if echo $GIT_BRANCH | egrep -q "^refs"; then
         # If our branch name is a gerrit style refs/changes/...
         if [[ ! -d $GIT_DEST ]]; then
+            [[ "$ERROR_ON_CLONE" = "True" ]] && exit 1
             git clone $GIT_REMOTE $GIT_DEST
         fi
         cd $GIT_DEST
@@ -160,6 +163,7 @@
     else
         # do a full clone only if the directory doesn't exist
         if [[ ! -d $GIT_DEST ]]; then
+            [[ "$ERROR_ON_CLONE" = "True" ]] && exit 1
             git clone $GIT_REMOTE $GIT_DEST
             cd $GIT_DEST
             # This checkout syntax works for both branches and tags
@@ -184,7 +188,7 @@
 
 
 # Comment an option in an INI file
-# iniset config-file section option
+# inicomment config-file section option
 function inicomment() {
     local file=$1
     local section=$2
@@ -192,6 +196,15 @@
     sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" $file
 }
 
+# Uncomment an option in an INI file
+# iniuncomment config-file section option
+function iniuncomment() {
+    local file=$1
+    local section=$2
+    local option=$3
+    sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" $file
+}
+
 
 # Get an option from an INI file
 # iniget config-file section option
@@ -244,6 +257,7 @@
     for service in ${services}; do
         [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && return 0
         [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && return 0
+        [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && return 0
         [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0
         [[ ${service} == "quantum" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0
     done
@@ -309,6 +323,23 @@
 }
 
 
+# pip install the dependencies of the package before we do the setup.py
+# develop, so that pip and not distutils process the dependency chain
+# setup_develop directory
+function setup_develop() {
+    (cd $1; \
+        python setup.py egg_info; \
+        raw_links=$(awk '/^.+/ {print "-f " $1}' *.egg-info/dependency_links.txt); \
+        depend_links=$(echo $raw_links | xargs); \
+        pip_install -r *-info/requires.txt $depend_links; \
+        sudo \
+            HTTP_PROXY=$http_proxy \
+            HTTPS_PROXY=$https_proxy \
+            python setup.py develop \
+    )
+}
+
+
 # Service wrapper to start services
 # start_service service-name
 function start_service() {
diff --git a/lib/cinder b/lib/cinder
new file mode 100644
index 0000000..f0715a4
--- /dev/null
+++ b/lib/cinder
@@ -0,0 +1,154 @@
+# lib/cinder
+# Install and start Cinder volume service
+
+# Dependencies:
+# - functions
+# - KEYSTONE_AUTH_* must be defined
+# SERVICE_{TENANT_NAME|PASSWORD} must be defined
+
+# stack.sh
+# ---------
+# install_XXX
+# configure_XXX
+# init_XXX
+# start_XXX
+# stop_XXX
+# cleanup_XXX
+
+# Print the commands being run so that we can see the command that triggers
+# an error.  It is also useful for following along as the install occurs.
+set -o xtrace
+
+
+# Defaults
+# --------
+
+# set up default directories
+CINDER_DIR=$DEST/cinder
+CINDERCLIENT_DIR=$DEST/python-cinderclient
+CINDER_CONF_DIR=/etc/cinder
+CINDER_CONF=$CINDER_CONF_DIR/cinder.conf
+
+# Name of the lvm volume group to use/create for iscsi volumes
+VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
+VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
+
+# cleanup_cinder() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_cinder() {
+    # This function intentionally left blank
+    :
+}
+
+# configure_cinder() - Set config files, create data dirs, etc
+function configure_cinder() {
+    setup_develop $CINDER_DIR
+    setup_develop $CINDERCLIENT_DIR
+
+    if [[ ! -d $CINDER_CONF_DIR ]]; then
+        sudo mkdir -p $CINDER_CONF_DIR
+    fi
+    sudo chown `whoami` $CINDER_CONF_DIR
+
+    cp -p $CINDER_DIR/etc/cinder/policy.json $CINDER_CONF_DIR
+
+    CINDER_API_PASTE_INI=$CINDER_CONF_DIR/api-paste.ini
+    cp $CINDER_DIR/etc/cinder/api-paste.ini $CINDER_API_PASTE_INI
+    iniset $CINDER_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST
+    iniset $CINDER_API_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT
+    iniset $CINDER_API_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
+    iniset $CINDER_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME
+    iniset $CINDER_API_PASTE_INI filter:authtoken admin_user cinder
+    iniset $CINDER_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD
+
+    cp $CINDER_DIR/etc/cinder/cinder.conf.sample $CINDER_CONF
+    iniset $CINDER_CONF DEFAULT auth_strategy keystone
+    iniset $CINDER_CONF DEFAULT verbose True
+    iniset $CINDER_CONF DEFAULT volume_group $VOLUME_GROUP
+    iniset $CINDER_CONF DEFAULT volume_name_template ${VOLUME_NAME_PREFIX}%s
+    iniset $CINDER_CONF DEFAULT iscsi_helper tgtadm
+    iniset $CINDER_CONF DEFAULT sql_connection $BASE_SQL_CONN/cinder?charset=utf8
+    iniset $CINDER_CONF DEFAULT rabbit_host $RABBIT_HOST
+    iniset $CINDER_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
+    iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI
+}
+
+# init_cinder() - Initialize database and volume group
+function init_cinder() {
+    # Force nova volumes off
+    NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/osapi_volume,//")
+
+    if is_service_enabled mysql; then
+        # (re)create cinder database
+        mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS cinder;'
+        mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE cinder;'
+
+        # (re)create cinder database
+        $CINDER_DIR/bin/cinder-manage db sync
+    fi
+
+    if is_service_enabled c-vol; then
+        # Configure a default volume group called '`stack-volumes`' for the volume
+        # service if it does not yet exist.  If you don't wish to use a file backed
+        # volume group, create your own volume group called ``stack-volumes`` before
+        # invoking ``stack.sh``.
+        #
+        # By default, the backing file is 2G in size, and is stored in ``/opt/stack/data``.
+
+        if ! sudo vgs $VOLUME_GROUP; then
+            VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DATA_DIR/${VOLUME_GROUP}-backing-file}
+            VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-2052M}
+            # Only create if the file doesn't already exists
+            [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE
+            DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE`
+            # Only create if the loopback device doesn't contain $VOLUME_GROUP
+            if ! sudo vgs $VOLUME_GROUP; then sudo vgcreate $VOLUME_GROUP $DEV; fi
+        fi
+
+        if sudo vgs $VOLUME_GROUP; then
+            # Remove iscsi targets
+            sudo tgtadm --op show --mode target | grep $VOLUME_NAME_PREFIX | grep Target | cut -f3 -d ' ' | sudo xargs -n1 tgt-admin --delete || true
+            # Clean out existing volumes
+            for lv in `sudo lvs --noheadings -o lv_name $VOLUME_GROUP`; do
+                # VOLUME_NAME_PREFIX prefixes the LVs we want
+                if [[ "${lv#$VOLUME_NAME_PREFIX}" != "$lv" ]]; then
+                    sudo lvremove -f $VOLUME_GROUP/$lv
+                fi
+            done
+        fi
+    fi
+}
+
+# install_cinder() - Collect source and prepare
+function install_cinder() {
+    git_clone $CINDER_REPO $CINDER_DIR $CINDER_BRANCH
+    git_clone $CINDERCLIENT_REPO $CINDERCLIENT_DIR $CINDERCLIENT_BRANCH
+}
+
+# start_cinder() - Start running processes, including screen
+function start_cinder() {
+    if is_service_enabled c-vol; then
+        if [[ "$os_PACKAGE" = "deb" ]]; then
+            # tgt in oneiric doesn't restart properly if tgtd isn't running
+            # do it in two steps
+            sudo stop tgt || true
+            sudo start tgt
+        else
+            # bypass redirection to systemctl during restart
+            sudo /sbin/service --skip-redirect tgtd restart
+        fi
+    fi
+
+    screen_it c-api "cd $CINDER_DIR && $CINDER_DIR/bin/cinder-api --config-file $CINDER_CONF"
+    screen_it c-vol "cd $CINDER_DIR && $CINDER_DIR/bin/cinder-volume --config-file $CINDER_CONF"
+    screen_it c-sch "cd $CINDER_DIR && $CINDER_DIR/bin/cinder-scheduler --config-file $CINDER_CONF"
+}
+
+# stop_cinder() - Stop running processes (non-screen)
+function stop_cinder() {
+    # FIXME(dtroyer): stop only the cinder screen window?
+
+    if is_service_enabled c-vol; then
+        stop_service tgt
+    fi
+}
diff --git a/stack.sh b/stack.sh
index 776ff86..ade8710 100755
--- a/stack.sh
+++ b/stack.sh
@@ -64,18 +64,23 @@
 # repositories and branches to configure.  ``stackrc`` sources ``localrc`` to
 # allow you to safely override those settings without being overwritten
 # when updating DevStack.
+if [[ ! -r $TOP_DIR/stackrc ]]; then
+    echo "ERROR: missing $TOP_DIR/stackrc - did you grab more than just stack.sh?"
+    exit 1
+fi
+source $TOP_DIR/stackrc
 
 # HTTP and HTTPS proxy servers are supported via the usual environment variables
 # ``http_proxy`` and ``https_proxy``.  They can be set in ``localrc`` if necessary
 # or on the command line::
 #
 #     http_proxy=http://proxy.example.com:3128/ ./stack.sh
-
-if [[ ! -r $TOP_DIR/stackrc ]]; then
-    echo "ERROR: missing $TOP_DIR/stackrc - did you grab more than just stack.sh?"
-    exit 1
+if [[ -n "$http_proxy" ]]; then
+    export http_proxy=$http_proxy
 fi
-source $TOP_DIR/stackrc
+if [[ -n "$https_proxy" ]]; then
+    export https_proxy=$https_proxy
+fi
 
 # Destination path for installation ``DEST``
 DEST=${DEST:-/opt/stack}
@@ -84,6 +89,16 @@
 # Sanity Check
 # ============
 
+# We are looking for services with a - at the beginning to force
+# excluding those services. For example if you want to install all the default
+# services but not nova-volume (n-vol) you can have this set in your localrc :
+# ENABLED_SERVICES+=",-n-vol"
+for service in ${ENABLED_SERVICES//,/ }; do
+    if [[ ${service} == -* ]]; then
+        ENABLED_SERVICES=$(echo ${ENABLED_SERVICES}|sed -r "s/(,)?(-)?${service#-}(,)?/,/g")
+    fi
+done
+
 # Warn users who aren't on an explicitly supported distro, but allow them to
 # override check and attempt installation with ``FORCE=yes ./stack``
 if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|f16) ]]; then
@@ -107,6 +122,13 @@
     NOVA_ROOTWRAP=/usr/bin/nova-rootwrap
 fi
 
+# ``stack.sh`` keeps function libraries here
+# Make sure ``$TOP_DIR/lib`` directory is present
+if [ ! -d $TOP_DIR/lib ]; then
+    echo "ERROR: missing devstack/lib - did you grab more than just stack.sh?"
+    exit 1
+fi
+
 # stack.sh keeps the list of ``apt`` and ``pip`` dependencies in external
 # files, along with config templates and other useful files.  You can find these
 # in the ``files`` directory (next to this script).  We will reference this
@@ -125,6 +147,12 @@
     exit 1
 fi
 
+# Make sure we only have one volume service enabled.
+if is_service_enabled cinder && is_service_enabled n-vol; then
+    echo "ERROR: n-vol and cinder must not be enabled at the same time"
+    exit 1
+fi
+
 # OpenStack is designed to be run as a regular user (Horizon will fail to run
 # as root, since apache refused to startup serve content from root user).  If
 # ``stack.sh`` is run as **root**, it automatically creates a **stack** user with
@@ -187,13 +215,6 @@
     sudo chown root:root $TEMPFILE
     sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh
 
-    # Set up the rootwrap sudoers
-    TEMPFILE=`mktemp`
-    echo "$USER ALL=(root) NOPASSWD: $NOVA_ROOTWRAP" >$TEMPFILE
-    chmod 0440 $TEMPFILE
-    sudo chown root:root $TEMPFILE
-    sudo mv $TEMPFILE /etc/sudoers.d/nova-rootwrap
-
     # Remove old file
     sudo rm -f /etc/sudoers.d/stack_sh_nova
 fi
@@ -203,6 +224,24 @@
 # prerequisites and initialize ``$DEST``.
 OFFLINE=`trueorfalse False $OFFLINE`
 
+# Set True to configure ``stack.sh`` to exit with an error code if it is asked
+# to clone any git repositories.  If devstack is used in a testing environment,
+# this may be used to ensure that the correct code is being tested.
+ERROR_ON_CLONE=`trueorfalse False $ERROR_ON_CLONE`
+
+# Destination path for service data
+DATA_DIR=${DATA_DIR:-${DEST}/data}
+sudo mkdir -p $DATA_DIR
+sudo chown `whoami` $DATA_DIR
+
+
+# Projects
+# --------
+
+# Get project function libraries
+source $TOP_DIR/lib/cinder
+
+
 # Set the destination directories for openstack projects
 NOVA_DIR=$DEST/nova
 HORIZON_DIR=$DEST/horizon
@@ -215,6 +254,7 @@
 NOVNC_DIR=$DEST/noVNC
 SWIFT_DIR=$DEST/swift
 SWIFT3_DIR=$DEST/swift3
+SWIFTCLIENT_DIR=$DEST/python-swiftclient
 QUANTUM_DIR=$DEST/quantum
 QUANTUM_CLIENT_DIR=$DEST/python-quantumclient
 MELANGE_DIR=$DEST/melange
@@ -235,7 +275,7 @@
 M_MAC_RANGE=${M_MAC_RANGE:-FE-EE-DD-00-00-00/24}
 
 # Name of the lvm volume group to use/create for iscsi volumes
-VOLUME_GROUP=${VOLUME_GROUP:-nova-volumes}
+VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
 VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
 INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-}
 
@@ -268,6 +308,9 @@
 SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP}
 SYSLOG_PORT=${SYSLOG_PORT:-516}
 
+# Use color for logging output
+LOG_COLOR=`trueorfalse True $LOG_COLOR`
+
 # Service startup timeout
 SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60}
 
@@ -605,6 +648,10 @@
             if [[ ! $file_to_parse =~ glance ]]; then
                 file_to_parse="${file_to_parse} glance"
             fi
+        elif [[ $service == c-* ]]; then
+            if [[ ! $file_to_parse =~ cinder ]]; then
+                file_to_parse="${file_to_parse} cinder"
+            fi
         elif [[ $service == n-* ]]; then
             if [[ ! $file_to_parse =~ nova ]]; then
                 file_to_parse="${file_to_parse} nova"
@@ -632,13 +679,13 @@
                 continue
             fi
 
-            if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then # We are using BASH regexp matching feature.
-                        package=${BASH_REMATCH[1]}
-                        distros=${BASH_REMATCH[2]}
-                        for distro in ${distros//,/ }; do  #In bash ${VAR,,} will lowecase VAR
-                            [[ ${distro,,} == ${DISTRO,,} ]] && echo $package
-                        done
-                        continue
+            if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then
+                # We are using BASH regexp matching feature.
+                package=${BASH_REMATCH[1]}
+                distros=${BASH_REMATCH[2]}
+                # In bash ${VAR,,} will lowecase VAR
+                [[ ${distros,,} =~ ${DISTRO,,} ]] && echo $package
+                continue
             fi
 
             echo ${line%#*}
@@ -674,6 +721,9 @@
 if is_service_enabled swift; then
     # storage service
     git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH
+    # storage service client and and Library
+    git_clone $SWIFTCLIENT_REPO $SWIFTCLIENT_DIR $SWIFTCLIENT_BRANCH
+    # swift3 middleware to provide S3 emulation to Swift
     git_clone $SWIFT3_REPO $SWIFT3_DIR $SWIFT3_BRANCH
 fi
 if is_service_enabled g-api n-api; then
@@ -699,10 +749,12 @@
     # melange
     git_clone $MELANGE_REPO $MELANGE_DIR $MELANGE_BRANCH
 fi
-
 if is_service_enabled melange; then
     git_clone $MELANGECLIENT_REPO $MELANGECLIENT_DIR $MELANGECLIENT_BRANCH
 fi
+if is_service_enabled cinder; then
+    install_cinder
+fi
 
 
 # Initialization
@@ -710,38 +762,41 @@
 
 # setup our checkouts so they are installed into python path
 # allowing ``import nova`` or ``import glance.client``
-cd $KEYSTONECLIENT_DIR; sudo python setup.py develop
-cd $NOVACLIENT_DIR; sudo python setup.py develop
-cd $OPENSTACKCLIENT_DIR; sudo python setup.py develop
+setup_develop $KEYSTONECLIENT_DIR
+setup_develop $NOVACLIENT_DIR
+setup_develop $OPENSTACKCLIENT_DIR
 if is_service_enabled key g-api n-api swift; then
-    cd $KEYSTONE_DIR; sudo python setup.py develop
+    setup_develop $KEYSTONE_DIR
 fi
 if is_service_enabled swift; then
-    cd $SWIFT_DIR; sudo python setup.py develop
-    cd $SWIFT3_DIR; sudo python setup.py develop
+    setup_develop $SWIFT_DIR
+    setup_develop $SWIFTCLIENT_DIR
+    setup_develop $SWIFT3_DIR
 fi
 if is_service_enabled g-api n-api; then
-    cd $GLANCE_DIR; sudo python setup.py develop
-fi
-cd $NOVA_DIR; sudo python setup.py develop
-if is_service_enabled horizon; then
-    cd $HORIZON_DIR; sudo python setup.py develop
-fi
-if is_service_enabled quantum; then
-    cd $QUANTUM_CLIENT_DIR; sudo python setup.py develop
-fi
-if is_service_enabled quantum; then
-    cd $QUANTUM_DIR; sudo python setup.py develop
-fi
-if is_service_enabled m-svc; then
-    cd $MELANGE_DIR; sudo python setup.py develop
-fi
-if is_service_enabled melange; then
-    cd $MELANGECLIENT_DIR; sudo python setup.py develop
+    setup_develop $GLANCE_DIR
 fi
 
 # Do this _after_ glance is installed to override the old binary
-cd $GLANCECLIENT_DIR; sudo python setup.py develop
+setup_develop $GLANCECLIENT_DIR
+
+setup_develop $NOVA_DIR
+if is_service_enabled horizon; then
+    setup_develop $HORIZON_DIR
+fi
+if is_service_enabled quantum; then
+    setup_develop $QUANTUM_CLIENT_DIR
+    setup_develop $QUANTUM_DIR
+fi
+if is_service_enabled m-svc; then
+    setup_develop $MELANGE_DIR
+fi
+if is_service_enabled melange; then
+    setup_develop $MELANGECLIENT_DIR
+fi
+if is_service_enabled cinder; then
+    configure_cinder
+fi
 
 
 # Syslog
@@ -1029,6 +1084,9 @@
 
     GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json
     cp $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON
+
+    $GLANCE_DIR/bin/glance-manage db_sync
+
 fi
 
 # Quantum (for controller or agent nodes)
@@ -1074,23 +1132,28 @@
             echo "OVS 1.4+ is required for tunneling between multiple hosts."
             exit 1
         fi
-        sudo sed -i -e "s/.*enable-tunneling = .*$/enable-tunneling = $OVS_ENABLE_TUNNELING/g" /$Q_PLUGIN_CONF_FILE
+        sudo sed -i -e "s/.*enable_tunneling = .*$/enable_tunneling = $OVS_ENABLE_TUNNELING/g" /$Q_PLUGIN_CONF_FILE
     fi
 fi
 
 # Quantum service (for controller node)
 if is_service_enabled q-svc; then
-    Q_PLUGIN_INI_FILE=/etc/quantum/plugins.ini
     Q_CONF_FILE=/etc/quantum/quantum.conf
-    # must remove this file from existing location, otherwise Quantum will prefer it
-    if [[ -e $QUANTUM_DIR/etc/plugins.ini ]]; then
-        sudo mv $QUANTUM_DIR/etc/plugins.ini $Q_PLUGIN_INI_FILE
-    fi
+    Q_API_PASTE_FILE=/etc/quantum/api-paste.ini
+    Q_POLICY_FILE=/etc/quantum/policy.json
 
     if [[ -e $QUANTUM_DIR/etc/quantum.conf ]]; then
       sudo mv $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE
     fi
 
+    if [[ -e $QUANTUM_DIR/etc/api-paste.ini ]]; then
+      sudo mv $QUANTUM_DIR/etc/api-paste.ini $Q_API_PASTE_FILE
+    fi
+
+    if [[ -e $QUANTUM_DIR/etc/policy.json ]]; then
+      sudo mv $QUANTUM_DIR/etc/policy.json $Q_POLICY_FILE
+    fi
+
     if is_service_enabled mysql; then
             mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e "DROP DATABASE IF EXISTS $Q_DB_NAME;"
             mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e "CREATE DATABASE IF NOT EXISTS $Q_DB_NAME CHARACTER SET utf8;"
@@ -1098,9 +1161,10 @@
             echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin."
             exit 1
     fi
-    sudo sed -i -e "s/^provider =.*$/provider = $Q_PLUGIN_CLASS/g" $Q_PLUGIN_INI_FILE
 
-    screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server $Q_CONF_FILE"
+    # Update either configuration file with plugin
+    sudo sed -i -e "s/^core_plugin =.*$/core_plugin = $Q_PLUGIN_CLASS/g" $Q_CONF_FILE
+    screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $Q_CONF_FILE"
 fi
 
 # Quantum agent (for compute nodes)
@@ -1124,7 +1188,7 @@
         sudo ovs-vsctl --no-wait -- --if-exists del-br $OVS_BRIDGE
         sudo ovs-vsctl --no-wait add-br $OVS_BRIDGE
         sudo ovs-vsctl --no-wait br-set-external-id $OVS_BRIDGE bridge-id br-int
-        sudo sed -i -e "s/.*local-ip = .*/local-ip = $HOST_IP/g" /$Q_PLUGIN_CONF_FILE
+        sudo sed -i -e "s/.*local_ip = .*/local_ip = $HOST_IP/g" /$Q_PLUGIN_CONF_FILE
         AGENT_BINARY=$QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py
     elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
        # Start up the quantum <-> linuxbridge agent
@@ -1174,6 +1238,36 @@
 
 cp -p $NOVA_DIR/etc/nova/policy.json $NOVA_CONF_DIR
 
+# If Nova ships the new rootwrap filters files, deploy them
+# (owned by root) and add a parameter to $NOVA_ROOTWRAP
+ROOTWRAP_SUDOER_CMD="$NOVA_ROOTWRAP"
+if [[ -d $NOVA_DIR/etc/nova/rootwrap.d ]]; then
+    # Wipe any existing rootwrap.d files first
+    if [[ -d $NOVA_CONF_DIR/rootwrap.d ]]; then
+        sudo rm -rf $NOVA_CONF_DIR/rootwrap.d
+    fi
+    # Deploy filters to /etc/nova/rootwrap.d
+    sudo mkdir -m 755 $NOVA_CONF_DIR/rootwrap.d
+    sudo cp $NOVA_DIR/etc/nova/rootwrap.d/*.filters $NOVA_CONF_DIR/rootwrap.d
+    sudo chown -R root:root $NOVA_CONF_DIR/rootwrap.d
+    sudo chmod 644 $NOVA_CONF_DIR/rootwrap.d/*
+    # Set up rootwrap.conf, pointing to /etc/nova/rootwrap.d
+    sudo cp $NOVA_DIR/etc/nova/rootwrap.conf $NOVA_CONF_DIR/
+    sudo sed -e "s:^filters_path=.*$:filters_path=$NOVA_CONF_DIR/rootwrap.d:" -i $NOVA_CONF_DIR/rootwrap.conf
+    sudo chown root:root $NOVA_CONF_DIR/rootwrap.conf
+    sudo chmod 0644 $NOVA_CONF_DIR/rootwrap.conf
+    # Specify rootwrap.conf as first parameter to nova-rootwrap
+    NOVA_ROOTWRAP="$NOVA_ROOTWRAP $NOVA_CONF_DIR/rootwrap.conf"
+    ROOTWRAP_SUDOER_CMD="$NOVA_ROOTWRAP *"
+fi
+
+# Set up the rootwrap sudoers
+TEMPFILE=`mktemp`
+echo "$USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE
+chmod 0440 $TEMPFILE
+sudo chown root:root $TEMPFILE
+sudo mv $TEMPFILE /etc/sudoers.d/nova-rootwrap
+
 if is_service_enabled n-api; then
     # Use the sample http middleware configuration supplied in the
     # Nova sources.  This paste config adds the configuration required
@@ -1356,6 +1450,9 @@
     # Install memcached for swift.
     install_package memcached
 
+    # We make sure to kill all swift processes first
+    pkill -f -9 swift-
+
     # We first do a bit of setup by creating the directories and
     # changing the permissions so we can run it as our user.
 
@@ -1428,34 +1525,69 @@
         sudo sed -i '/disable *= *yes/ { s/yes/no/ }' /etc/xinetd.d/rsync
     fi
 
-   # By default Swift will be installed with the tempauth middleware
-   # which has some default username and password if you have
-   # configured keystone it will checkout the directory.
-   if is_service_enabled key; then
-       swift_auth_server="s3token authtoken keystone"
-   else
-       swift_auth_server=tempauth
-   fi
+    # By default Swift will be installed with the tempauth middleware
+    # which has some default username and password if you have
+    # configured keystone it will checkout the directory.
+    if is_service_enabled key; then
+        swift_auth_server="s3token authtoken keystone"
+    else
+        swift_auth_server=tempauth
+    fi
 
-   # We do the install of the proxy-server and swift configuration
-   # replacing a few directives to match our configuration.
-   sed -e "
-       s,%SWIFT_CONFIG_DIR%,${SWIFT_CONFIG_DIR},g;
-       s,%USER%,$USER,g;
-       s,%SERVICE_TENANT_NAME%,$SERVICE_TENANT_NAME,g;
-       s,%SERVICE_USERNAME%,swift,g;
-       s,%SERVICE_PASSWORD%,$SERVICE_PASSWORD,g;
-       s,%KEYSTONE_SERVICE_PROTOCOL%,$KEYSTONE_SERVICE_PROTOCOL,g;
-       s,%SERVICE_TOKEN%,${SERVICE_TOKEN},g;
-       s,%KEYSTONE_API_PORT%,${KEYSTONE_API_PORT},g;
-       s,%KEYSTONE_AUTH_HOST%,${KEYSTONE_AUTH_HOST},g;
-       s,%KEYSTONE_AUTH_PORT%,${KEYSTONE_AUTH_PORT},g;
-       s,%KEYSTONE_AUTH_PROTOCOL%,${KEYSTONE_AUTH_PROTOCOL},g;
-       s/%AUTH_SERVER%/${swift_auth_server}/g;
-    " $FILES/swift/proxy-server.conf | \
-       sudo tee ${SWIFT_CONFIG_DIR}/proxy-server.conf
+    SWIFT_CONFIG_PROXY_SERVER=${SWIFT_CONFIG_DIR}/proxy-server.conf
+    cp ${SWIFT_DIR}/etc/proxy-server.conf-sample ${SWIFT_CONFIG_PROXY_SERVER}
 
-    sed -e "s/%SWIFT_HASH%/$SWIFT_HASH/" $FILES/swift/swift.conf > ${SWIFT_CONFIG_DIR}/swift.conf
+    iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user ${USER}
+
+    iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir ${SWIFT_CONFIG_DIR}
+
+    iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers 1
+
+    iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level DEBUG
+
+    iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080}
+
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} pipeline:main pipeline "catch_errors healthcheck cache ratelimit swift3 ${swift_auth_server} proxy-logging proxy-server"
+
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true
+
+    cat <<EOF>>${SWIFT_CONFIG_PROXY_SERVER}
+
+[filter:keystone]
+paste.filter_factory = keystone.middleware.swift_auth:filter_factory
+operator_roles = Member,admin
+
+# NOTE(chmou): s3token middleware is not updated yet to use only
+# username and password.
+[filter:s3token]
+paste.filter_factory = keystone.middleware.s3_token:filter_factory
+auth_port = ${KEYSTONE_AUTH_PORT}
+auth_host = ${KEYSTONE_AUTH_HOST}
+auth_protocol = ${KEYSTONE_AUTH_PROTOCOL}
+auth_token = ${SERVICE_TOKEN}
+admin_token = ${SERVICE_TOKEN}
+
+[filter:authtoken]
+paste.filter_factory = keystone.middleware.auth_token:filter_factory
+auth_host = ${KEYSTONE_AUTH_HOST}
+auth_port = ${KEYSTONE_AUTH_PORT}
+auth_protocol = ${KEYSTONE_AUTH_PROTOCOL}
+auth_uri = ${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}:${KEYSTONE_SERVICE_PORT}/
+admin_tenant_name = ${SERVICE_TENANT_NAME}
+admin_user = swift
+admin_password = ${SERVICE_PASSWORD}
+
+[filter:swift3]
+use = egg:swift3#swift3
+EOF
+
+    cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONFIG_DIR}/swift.conf
+    iniset ${SWIFT_CONFIG_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH}
 
     # We need to generate a object/account/proxy configuration
     # emulating 4 nodes on different ports we have a little function
@@ -1465,16 +1597,35 @@
         local bind_port=$2
         local log_facility=$3
         local node_number
+        local swift_node_config
 
         for node_number in $(seq ${SWIFT_REPLICAS}); do
             node_path=${SWIFT_DATA_DIR}/${node_number}
-            sed -e "
-                s,%SWIFT_CONFIG_DIR%,${SWIFT_CONFIG_DIR},;
-                s,%USER%,$USER,;
-                s,%NODE_PATH%,${node_path},;
-                s,%BIND_PORT%,${bind_port},;
-                s,%LOG_FACILITY%,${log_facility},
-            " $FILES/swift/${server_type}-server.conf > ${SWIFT_CONFIG_DIR}/${server_type}-server/${node_number}.conf
+            swift_node_config=${SWIFT_CONFIG_DIR}/${server_type}-server/${node_number}.conf
+
+            cp ${SWIFT_DIR}/etc/${server_type}-server.conf-sample ${swift_node_config}
+
+            iniuncomment ${swift_node_config} DEFAULT user
+            iniset ${swift_node_config} DEFAULT user ${USER}
+
+            iniuncomment ${swift_node_config} DEFAULT bind_port
+            iniset ${swift_node_config} DEFAULT bind_port ${bind_port}
+
+            iniuncomment ${swift_node_config} DEFAULT swift_dir
+            iniset ${swift_node_config} DEFAULT swift_dir ${SWIFT_CONFIG_DIR}
+
+            iniuncomment ${swift_node_config} DEFAULT devices
+            iniset ${swift_node_config} DEFAULT devices ${node_path}
+
+            iniuncomment ${swift_node_config} DEFAULT log_facility
+            iniset ${swift_node_config} DEFAULT log_facility LOG_LOCAL${log_facility}
+
+            iniuncomment ${swift_node_config} DEFAULT mount_check
+            iniset ${swift_node_config} DEFAULT mount_check false
+
+            iniuncomment ${swift_node_config} ${server_type}-replicator vm_test_mode
+            iniset ${swift_node_config} ${server_type}-replicator vm_test_mode yes
+
             bind_port=$(( ${bind_port} + 10 ))
             log_facility=$(( ${log_facility} + 1 ))
         done
@@ -1483,48 +1634,47 @@
     generate_swift_configuration container 6011 2
     generate_swift_configuration account 6012 2
 
+    # We have some specific configuration for swift for rsyslog. See
+    # the file /etc/rsyslog.d/10-swift.conf for more info.
+    swift_log_dir=${SWIFT_DATA_DIR}/logs
+    rm -rf ${swift_log_dir}
+    mkdir -p ${swift_log_dir}/hourly
+    sudo chown -R $USER:adm ${swift_log_dir}
+    sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \
+        tee /etc/rsyslog.d/10-swift.conf
+    restart_service rsyslog
 
-   # We have some specific configuration for swift for rsyslog. See
-   # the file /etc/rsyslog.d/10-swift.conf for more info.
-   swift_log_dir=${SWIFT_DATA_DIR}/logs
-   rm -rf ${swift_log_dir}
-   mkdir -p ${swift_log_dir}/hourly
-   sudo chown -R $USER:adm ${swift_log_dir}
-   sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \
-       tee /etc/rsyslog.d/10-swift.conf
-   restart_service rsyslog
+    # This is where we create three different rings for swift with
+    # different object servers binding on different ports.
+    pushd ${SWIFT_CONFIG_DIR} >/dev/null && {
 
-   # This is where we create three different rings for swift with
-   # different object servers binding on different ports.
-   pushd ${SWIFT_CONFIG_DIR} >/dev/null && {
+        rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz
 
-       rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz
+        port_number=6010
+        swift-ring-builder object.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1
+        for x in $(seq ${SWIFT_REPLICAS}); do
+            swift-ring-builder object.builder add z${x}-127.0.0.1:${port_number}/sdb1 1
+            port_number=$[port_number + 10]
+        done
+        swift-ring-builder object.builder rebalance
 
-       port_number=6010
-       swift-ring-builder object.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1
-       for x in $(seq ${SWIFT_REPLICAS}); do
-           swift-ring-builder object.builder add z${x}-127.0.0.1:${port_number}/sdb1 1
-           port_number=$[port_number + 10]
-       done
-       swift-ring-builder object.builder rebalance
+        port_number=6011
+        swift-ring-builder container.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1
+        for x in $(seq ${SWIFT_REPLICAS}); do
+            swift-ring-builder container.builder add z${x}-127.0.0.1:${port_number}/sdb1 1
+            port_number=$[port_number + 10]
+        done
+        swift-ring-builder container.builder rebalance
 
-       port_number=6011
-       swift-ring-builder container.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1
-       for x in $(seq ${SWIFT_REPLICAS}); do
-           swift-ring-builder container.builder add z${x}-127.0.0.1:${port_number}/sdb1 1
-           port_number=$[port_number + 10]
-       done
-       swift-ring-builder container.builder rebalance
+        port_number=6012
+        swift-ring-builder account.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1
+        for x in $(seq ${SWIFT_REPLICAS}); do
+            swift-ring-builder account.builder add z${x}-127.0.0.1:${port_number}/sdb1 1
+            port_number=$[port_number + 10]
+        done
+        swift-ring-builder account.builder rebalance
 
-       port_number=6012
-       swift-ring-builder account.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1
-       for x in $(seq ${SWIFT_REPLICAS}); do
-           swift-ring-builder account.builder add z${x}-127.0.0.1:${port_number}/sdb1 1
-           port_number=$[port_number + 10]
-       done
-       swift-ring-builder account.builder rebalance
-
-   } && popd >/dev/null
+    } && popd >/dev/null
 
    # We then can start rsync.
     if [[ "$os_PACKAGE" = "deb" ]]; then
@@ -1547,17 +1697,18 @@
 # Volume Service
 # --------------
 
-if is_service_enabled n-vol; then
-    #
-    # Configure a default volume group called 'nova-volumes' for the nova-volume
+if is_service_enabled cinder; then
+    init_cinder
+elif is_service_enabled n-vol; then
+    # Configure a default volume group called '`stack-volumes`' for the volume
     # service if it does not yet exist.  If you don't wish to use a file backed
-    # volume group, create your own volume group called 'nova-volumes' before
-    # invoking stack.sh.
+    # volume group, create your own volume group called ``stack-volumes`` before
+    # invoking ``stack.sh``.
     #
-    # By default, the backing file is 2G in size, and is stored in /opt/stack.
+    # By default, the backing file is 2G in size, and is stored in ``/opt/stack/data``.
 
     if ! sudo vgs $VOLUME_GROUP; then
-        VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DEST/nova-volumes-backing-file}
+        VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DATA_DIR/${VOLUME_GROUP}-backing-file}
         VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-2052M}
         # Only create if the file doesn't already exists
         [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE
@@ -1694,7 +1845,21 @@
 if [ "$API_RATE_LIMIT" != "True" ]; then
     add_nova_opt "api_rate_limit=False"
 fi
+if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
+    # Add color to logging output
+    add_nova_opt "logging_context_format_string=%(asctime)s %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s%(color)s] %(instance)s%(color)s%(message)s"
+    add_nova_opt "logging_default_format_string=%(asctime)s %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s"
+    add_nova_opt "logging_debug_format_suffix=from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d"
+    add_nova_opt "logging_exception_prefix=%(color)s%(asctime)s TRACE %(name)s %(instance)s"
+else
+    # Show user_name and project_name instead of user_id and project_id
+    add_nova_opt "logging_context_format_string=%(asctime)s %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s"
+fi
 
+# If cinder is enabled, use the cinder volume driver
+if is_service_enabled cinder; then
+    add_nova_opt "volume_api_class=nova.volume.cinder.API"
+fi
 
 # Provide some transition from EXTRA_FLAGS to EXTRA_OPTS
 if [[ -z "$EXTRA_OPTS" && -n "$EXTRA_FLAGS" ]]; then
@@ -1776,7 +1941,7 @@
 
     KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone}
     KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf
-    KEYSTONE_CATALOG=$KEYSTONE_CONF_DIR/default_catalog.templates
+    KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-template}
 
     if [[ ! -d $KEYSTONE_CONF_DIR ]]; then
         sudo mkdir -p $KEYSTONE_CONF_DIR
@@ -1787,41 +1952,49 @@
         cp -p $KEYSTONE_DIR/etc/keystone.conf.sample $KEYSTONE_CONF
         cp -p $KEYSTONE_DIR/etc/policy.json $KEYSTONE_CONF_DIR
     fi
-    cp -p $FILES/default_catalog.templates $KEYSTONE_CATALOG
 
     # Rewrite stock keystone.conf:
     iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN"
     iniset $KEYSTONE_CONF sql connection "$BASE_SQL_CONN/keystone?charset=utf8"
     iniset $KEYSTONE_CONF ec2 driver "keystone.contrib.ec2.backends.sql.Ec2"
-    # Configure keystone.conf to use templates
-    iniset $KEYSTONE_CONF catalog driver "keystone.catalog.backends.templated.TemplatedCatalog"
-    iniset $KEYSTONE_CONF catalog template_file "$KEYSTONE_CATALOG"
     sed -e "
         /^pipeline.*ec2_extension crud_/s|ec2_extension crud_extension|ec2_extension s3_extension crud_extension|;
     " -i $KEYSTONE_CONF
     # Append the S3 bits
     iniset $KEYSTONE_CONF filter:s3_extension paste.filter_factory "keystone.contrib.s3:S3Extension.factory"
 
-    # Add swift endpoints to service catalog if swift is enabled
-    if is_service_enabled swift; then
-        echo "catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
-        echo "catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:8080/" >> $KEYSTONE_CATALOG
-        echo "catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
-        echo "catalog.RegionOne.object_store.name = Swift Service" >> $KEYSTONE_CATALOG
-    fi
+    if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then
+        # Configure keystone.conf to use sql
+        iniset $KEYSTONE_CONF catalog driver keystone.catalog.backends.sql.Catalog
+        inicomment $KEYSTONE_CONF catalog template_file
+    else
+        KEYSTONE_CATALOG=$KEYSTONE_CONF_DIR/default_catalog.templates
+        cp -p $FILES/default_catalog.templates $KEYSTONE_CATALOG
+        # Add swift endpoints to service catalog if swift is enabled
+        if is_service_enabled swift; then
+            echo "catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
+            echo "catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:8080/" >> $KEYSTONE_CATALOG
+            echo "catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
+            echo "catalog.RegionOne.object_store.name = Swift Service" >> $KEYSTONE_CATALOG
+        fi
 
-    # Add quantum endpoints to service catalog if quantum is enabled
-    if is_service_enabled quantum; then
-        echo "catalog.RegionOne.network.publicURL = http://%SERVICE_HOST%:9696/" >> $KEYSTONE_CATALOG
-        echo "catalog.RegionOne.network.adminURL = http://%SERVICE_HOST%:9696/" >> $KEYSTONE_CATALOG
-        echo "catalog.RegionOne.network.internalURL = http://%SERVICE_HOST%:9696/" >> $KEYSTONE_CATALOG
-        echo "catalog.RegionOne.network.name = Quantum Service" >> $KEYSTONE_CATALOG
-    fi
+        # Add quantum endpoints to service catalog if quantum is enabled
+        if is_service_enabled quantum; then
+            echo "catalog.RegionOne.network.publicURL = http://%SERVICE_HOST%:9696/" >> $KEYSTONE_CATALOG
+            echo "catalog.RegionOne.network.adminURL = http://%SERVICE_HOST%:9696/" >> $KEYSTONE_CATALOG
+            echo "catalog.RegionOne.network.internalURL = http://%SERVICE_HOST%:9696/" >> $KEYSTONE_CATALOG
+            echo "catalog.RegionOne.network.name = Quantum Service" >> $KEYSTONE_CATALOG
+        fi
 
-    sudo sed -e "
-        s,%SERVICE_HOST%,$SERVICE_HOST,g;
-        s,%S3_SERVICE_PORT%,$S3_SERVICE_PORT,g;
-    " -i $KEYSTONE_CATALOG
+        sudo sed -e "
+            s,%SERVICE_HOST%,$SERVICE_HOST,g;
+            s,%S3_SERVICE_PORT%,$S3_SERVICE_PORT,g;
+        " -i $KEYSTONE_CATALOG
+
+        # Configure keystone.conf to use templates
+        iniset $KEYSTONE_CONF catalog driver "keystone.catalog.backends.templated.TemplatedCatalog"
+        iniset $KEYSTONE_CONF catalog template_file "$KEYSTONE_CATALOG"
+    fi
 
     # Set up logging
     LOGGING_ROOT="devel"
@@ -1833,25 +2006,31 @@
     iniset $KEYSTONE_CONF_DIR/logging.conf logger_root level "DEBUG"
     iniset $KEYSTONE_CONF_DIR/logging.conf logger_root handlers "devel,production"
 
-    # initialize keystone database
+    # Set up the keystone database
     $KEYSTONE_DIR/bin/keystone-manage db_sync
 
     # launch keystone and wait for it to answer before continuing
     screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug"
     echo "Waiting for keystone to start..."
-    if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -O- $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/ 2>&1 | grep -q '200 OK'; do sleep 1; done"; then
+    if ! timeout $SERVICE_TIMEOUT sh -c "while http_proxy= wget -O- $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/ 2>&1 | grep -q 'refused'; do sleep 1; done"; then
       echo "keystone did not start"
       exit 1
     fi
 
     # keystone_data.sh creates services, admin and demo users, and roles.
     SERVICE_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0
-    ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES \
+
+    ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \
+    SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \
+    S3_SERVICE_PORT=$S3_SERVICE_PORT KEYSTONE_CATALOG_BACKEND=$KEYSTONE_CATALOG_BACKEND \
+    DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES \
         bash $FILES/keystone_data.sh
 
     # create an access key and secret key for nova ec2 register image
     if is_service_enabled swift && is_service_enabled nova; then
-        CREDS=$(keystone --os_auth_url=$SERVICE_ENDPOINT --os_username=nova --os_password=$SERVICE_PASSWORD --os_tenant_name=$SERVICE_TENANT_NAME ec2-credentials-create)
+        NOVA_USER_ID=$(keystone user-list | grep ' nova ' | get_field 1)
+        NOVA_TENANT_ID=$(keystone tenant-list | grep " $SERVICE_TENANT_NAME " | get_field 1)
+        CREDS=$(keystone ec2-credentials-create --user $NOVA_USER_ID --tenant_id $NOVA_TENANT_ID)
         ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }')
         SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }')
         add_nova_opt "s3_access_key=$ACCESS_KEY"
@@ -1862,6 +2041,7 @@
 
 # launch the nova-api and wait for it to answer before continuing
 if is_service_enabled n-api; then
+    add_nova_opt "enabled_apis=$NOVA_ENABLED_APIS"
     screen_it n-api "cd $NOVA_DIR && $NOVA_DIR/bin/nova-api"
     echo "Waiting for nova-api to start..."
     if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:8774; do sleep 1; done"; then
@@ -1897,6 +2077,9 @@
 screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF --web ."
 screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF"
 screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth"
+if is_service_enabled cinder; then
+    start_cinder
+fi
 screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/$APACHE_NAME/horizon_error.log"
 screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_DIR}/proxy-server.conf -v"
 
@@ -1927,7 +2110,7 @@
 
     ADMIN_USER=admin
     ADMIN_TENANT=admin
-    TOKEN=`curl -s -d  "{\"auth\":{\"passwordCredentials\": {\"username\": \"$ADMIN_USER\", \"password\": \"$ADMIN_PASSWORD\"}, \"tenantName\": \"$ADMIN_TENANT\"}}" -H "Content-type: application/json" http://$HOST_IP:5000/v2.0/tokens | python -c "import sys; import json; tok = json.loads(sys.stdin.read()); print tok['access']['token']['id'];"`
+    TOKEN=$(keystone --os_tenant_name $ADMIN_TENANT --os_username $ADMIN_USER --os_password $ADMIN_PASSWORD --os_auth_url http://$HOST_IP:5000/v2.0 token-get | grep ' id ' | get_field 2)
 
     # Option to upload legacy ami-tty, which works with xenserver
     if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then
@@ -1937,7 +2120,7 @@
     for image_url in ${IMAGE_URLS//,/ }; do
         # Downloads the image (uec ami+aki style), then extracts it.
         IMAGE_FNAME=`basename "$image_url"`
-        if [ ! -f $FILES/$IMAGE_FNAME ]; then
+        if [[ ! -f $FILES/$IMAGE_FNAME || "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then
             wget -c $image_url -O $FILES/$IMAGE_FNAME
         fi
 
diff --git a/stackrc b/stackrc
index 98e6bd4..3a19cdb 100644
--- a/stackrc
+++ b/stackrc
@@ -6,9 +6,25 @@
 # by default you can append them in your ENABLED_SERVICES variable in
 # your localrc. For example for swift you can just add this in your
 # localrc to add it with the other services:
-# ENABLED_SERVICES="$ENABLED_SERVICES,swift"
+# ENABLED_SERVICES+=,swift
+#
+# If you like to explicitly remove services you can add a -$service in
+# ENABLED_SERVICES, for example in your localrc to install all defaults but not
+# nova-volume you would just need to set this :
+# ENABLED_SERVICES+=,-n-vol
 ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,mysql,rabbit
 
+# Set the default Nova APIs to enable
+NOVA_ENABLED_APIS=ec2,osapi_compute,osapi_volume,metadata
+
+# volume service
+CINDER_REPO=https://github.com/openstack/cinder
+CINDER_BRANCH=master
+
+# volume client
+CINDERCLIENT_REPO=https://github.com/openstack/python-cinderclient
+CINDERCLIENT_BRANCH=master
+
 # compute service
 NOVA_REPO=https://github.com/openstack/nova.git
 NOVA_BRANCH=master
@@ -20,6 +36,10 @@
 SWIFT3_BRANCH=master
 
 
+# python swift client library
+SWIFTCLIENT_REPO=https://github.com/openstack/python-swiftclient
+SWIFTCLIENT_BRANCH=master
+
 # image catalog service
 GLANCE_REPO=https://github.com/openstack/glance.git
 GLANCE_BRANCH=master
@@ -89,8 +109,10 @@
 #IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img" # cirros full disk image
 case "$LIBVIRT_TYPE" in
     lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc
+        DEFAULT_IMAGE_NAME=cirros-0.3.0-x86_64-rootfs
         IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-rootfs.img.gz";;
     *)  # otherwise, use the uec style image (with kernel, ramdisk, disk)
+        DEFAULT_IMAGE_NAME=cirros-0.3.0-x86_64-uec
         IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-uec.tar.gz";;
 esac
 
diff --git a/tests/functions.sh b/tests/functions.sh
index e7fbe0c..e436ed9 100755
--- a/tests/functions.sh
+++ b/tests/functions.sh
@@ -141,3 +141,5 @@
 else
     echo "inicomment failed: $VAL"
 fi
+
+rm test.ini
diff --git a/tools/rfc.sh b/tools/rfc.sh
deleted file mode 100755
index d4dc597..0000000
--- a/tools/rfc.sh
+++ /dev/null
@@ -1,145 +0,0 @@
-#!/bin/sh -e
-# Copyright (c) 2010-2011 Gluster, Inc. <http://www.gluster.com>
-# This initial version of this file was taken from the source tree
-# of GlusterFS. It was not directly attributed, but is assumed to be
-# Copyright (c) 2010-2011 Gluster, Inc and release GPLv3
-# Subsequent modifications are Copyright (c) 2011 OpenStack, LLC.
-#
-# GlusterFS is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published
-# by the Free Software Foundation; either version 3 of the License,
-# or (at your option) any later version.
-#
-# GlusterFS is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-# General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program.  If not, see
-# <http://www.gnu.org/licenses/>.
-
-
-branch="master";
-
-set_hooks_commit_msg()
-{
-    top_dir=`git rev-parse --show-toplevel`
-    f="${top_dir}/.git/hooks/commit-msg";
-    u="https://review.openstack.org/tools/hooks/commit-msg";
-
-    if [ -x "$f" ]; then
-        return;
-    fi
-
-    curl -o $f $u || wget -O $f $u;
-
-    chmod +x $f;
-
-    GIT_EDITOR=true git commit --amend
-}
-
-add_remote()
-{
-    username=$1
-    project=$2
-
-    echo "No remote set, testing ssh://$username@review.openstack.org:29418"
-    if project_list=`ssh -p29418 -o StrictHostKeyChecking=no $username@review.openstack.org gerrit ls-projects 2>/dev/null`
-    then
-        echo "$username@review.openstack.org:29418 worked."
-        if echo $project_list | grep $project >/dev/null
-        then
-            echo "Creating a git remote called gerrit that maps to:"
-            echo "  ssh://$username@review.openstack.org:29418/$project"
-            git remote add gerrit ssh://$username@review.openstack.org:29418/$project
-        else
-            echo "The current project name, $project, is not a known project."
-            echo "Please either reclone from github/gerrit or create a"
-            echo "remote named gerrit that points to the intended project."
-            return 1
-        fi
-
-        return 0
-    fi
-    return 1
-}
-
-check_remote()
-{
-    if ! git remote | grep gerrit >/dev/null 2>&1
-    then
-        origin_project=`git remote show origin | grep 'Fetch URL' | perl -nle '@fields = split(m|[:/]|); $len = $#fields; print $fields[$len-1], "/", $fields[$len];'`
-        if add_remote $USERNAME $origin_project
-        then
-            return 0
-        else
-            echo "Your local name doesn't work on Gerrit."
-            echo -n "Enter Gerrit username (same as launchpad): "
-            read gerrit_user
-            if add_remote $gerrit_user $origin_project
-            then
-                return 0
-            else
-                echo "Can't infer where gerrit is - please set a remote named"
-                echo "gerrit manually and then try again."
-                echo
-                echo "For more information, please see:"
-                echo "\thttp://wiki.openstack.org/GerritWorkflow"
-                exit 1
-            fi
-        fi
-    fi
-}
-
-rebase_changes()
-{
-    git fetch;
-
-    GIT_EDITOR=true git rebase -i origin/$branch || exit $?;
-}
-
-
-assert_diverge()
-{
-    if ! git diff origin/$branch..HEAD | grep -q .
-    then
-	echo "No changes between the current branch and origin/$branch."
-	exit 1
-    fi
-}
-
-
-main()
-{
-    set_hooks_commit_msg;
-
-    check_remote;
-
-    rebase_changes;
-
-    assert_diverge;
-
-    bug=$(git show --format='%s %b' | perl -nle 'if (/\b([Bb]ug|[Ll][Pp])\s*[#:]?\s*(\d+)/) {print "$2"; exit}')
-
-    bp=$(git show --format='%s %b' | perl -nle 'if (/\b([Bb]lue[Pp]rint|[Bb][Pp])\s*[#:]?\s*([0-9a-zA-Z-_]+)/) {print "$2"; exit}')
-
-    if [ "$DRY_RUN" = 1 ]; then
-        drier='echo -e Please use the following command to send your commits to review:\n\n'
-    else
-        drier=
-    fi
-
-    local_branch=`git branch | grep -Ei "\* (.*)" | cut -f2 -d' '`
-    if [ -z "$bug" ]; then
-	if [ -z "$bp" ]; then
-            $drier git push gerrit HEAD:refs/for/$branch/$local_branch;
-	else
-	    $drier git push gerrit HEAD:refs/for/$branch/bp/$bp;
-	fi
-    else
-        $drier git push gerrit HEAD:refs/for/$branch/bug/$bug;
-    fi
-}
-
-main "$@"
diff --git a/unstack.sh b/unstack.sh
index 7de0d74..879f842 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -25,9 +25,12 @@
 fi
 
 # Shut down devstack's screen to get the bulk of OpenStack services in one shot
-SESSION=$(screen -ls | grep "[0-9].stack" | awk '{ print $1 }')
-if [[ -n "$SESSION" ]]; then
-    screen -X -S $SESSION quit
+SCREEN=$(which screen)
+if [[ -n "$SCREEN" ]]; then
+    SESSION=$(screen -ls | awk '/[0-9].stack/ { print $1 }')
+    if [[ -n "$SESSION" ]]; then
+        screen -X -S $SESSION quit
+    fi
 fi
 
 # Swift runs daemons
@@ -41,7 +44,7 @@
 fi
 
 # Get the iSCSI volumes
-if is_service_enabled n-vol; then
+if is_service_enabled cinder n-vol; then
     TARGETS=$(sudo tgtadm --op show --mode target)
     if [[ -n "$TARGETS" ]]; then
         # FIXME(dtroyer): this could very well require more here to