Merge "Stop adding the stack user to the sudo or wheel group"
diff --git a/.gitignore b/.gitignore
index c8d2560..83c5419 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,6 @@
 proto
 *~
+.*.sw[nop]
 *.log
 src
 localrc
diff --git a/AUTHORS b/AUTHORS
index ff05f82..b5f972f 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -5,6 +5,7 @@
 Armando Migliaccio <armando.migliaccio@eu.citrix.com>
 Brad Hall <brad@nicira.com>
 Chmouel Boudjnah <chmouel@chmouel.com>
+Dan Prince <dprince@redhat.com>
 Dean Troyer <dtroyer@gmail.com>
 Devin Carlen <devin.carlen@gmail.com>
 Eddie Hebert <edhebert@gmail.com>
diff --git a/exerciserc b/exerciserc
index b41714d..82c74b7 100644
--- a/exerciserc
+++ b/exerciserc
@@ -20,3 +20,9 @@
 
 # Max time to wait for a vm to terminate
 export TERMINATE_TIMEOUT=${TERMINATE_TIMEOUT:-30}
+
+# Max time to wait for a euca-volume command to propogate
+export VOLUME_TIMEOUT=${VOLUME_TIMEOUT:-30}
+
+# Max time to wait for a euca-delete command to propogate
+export VOLUME_DELETE_TIMEOUT=${SNAPSHOT_DELETE_TIMEOUT:-60}
diff --git a/exercises/euca.sh b/exercises/euca.sh
index 76e5202..4a538c6 100755
--- a/exercises/euca.sh
+++ b/exercises/euca.sh
@@ -24,6 +24,9 @@
 # Keep track of the current directory
 EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
 TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
+VOLUME_ZONE=cinder
+VOLUME_SIZE=1
+ATTACH_DEVICE=/dev/vdc
 
 # Import common functions
 source $TOP_DIR/functions
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index 6749558..0f25355 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -33,6 +33,10 @@
 # Import exercise configuration
 source $TOP_DIR/exerciserc
 
+# If cinder or n-vol are not enabled we exit with exitcode 55 which mean
+# exercise is skipped.
+is_service_enabled cinder n-vol || exit 55
+
 # Instance type to create
 DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
 
diff --git a/files/apache-horizon.template b/files/apache-horizon.template
index e54f16c..fb98471 100644
--- a/files/apache-horizon.template
+++ b/files/apache-horizon.template
@@ -1,6 +1,7 @@
 <VirtualHost *:80>
     WSGIScriptAlias / %HORIZON_DIR%/openstack_dashboard/wsgi/django.wsgi
     WSGIDaemonProcess horizon user=%USER% group=%GROUP% processes=3 threads=10 home=%HORIZON_DIR%
+    WSGIApplicationGroup %{GLOBAL}
 
     SetEnv APACHE_RUN_USER %USER%
     SetEnv APACHE_RUN_GROUP %GROUP%
diff --git a/files/apts/cinder b/files/apts/cinder
new file mode 100644
index 0000000..5db06ea
--- /dev/null
+++ b/files/apts/cinder
@@ -0,0 +1,2 @@
+tgt
+lvm2
diff --git a/files/keystone_data.sh b/files/keystone_data.sh
index 2cdc2fa..7886072 100755
--- a/files/keystone_data.sh
+++ b/files/keystone_data.sh
@@ -9,6 +9,7 @@
 # service              nova      admin, [ResellerAdmin (swift only)]
 # service              quantum   admin        # if enabled
 # service              swift     admin        # if enabled
+# service              cinder    admin        # if enabled
 # demo                 admin     admin
 # demo                 demo      Member, anotherrole
 # invisible_to_admin   demo      Member
@@ -19,8 +20,13 @@
 # SERVICE_TOKEN - aka admin_token in keystone.conf
 # SERVICE_ENDPOINT - local Keystone admin endpoint
 # SERVICE_TENANT_NAME - name of tenant containing service accounts
+# SERVICE_HOST - host used for endpoint creation
 # ENABLED_SERVICES - stack.sh's list of services to start
 # DEVSTACK_DIR - Top-level DevStack directory
+# KEYSTONE_CATALOG_BACKEND - used to determine service catalog creation
+
+# Defaults
+# --------
 
 ADMIN_PASSWORD=${ADMIN_PASSWORD:-secrete}
 SERVICE_PASSWORD=${SERVICE_PASSWORD:-$ADMIN_PASSWORD}
@@ -29,10 +35,13 @@
 SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service}
 
 function get_id () {
-    echo `$@ | awk '/ id / { print $4 }'`
+    echo `"$@" | awk '/ id / { print $4 }'`
 }
 
+
 # Tenants
+# -------
+
 ADMIN_TENANT=$(get_id keystone tenant-create --name=admin)
 SERVICE_TENANT=$(get_id keystone tenant-create --name=$SERVICE_TENANT_NAME)
 DEMO_TENANT=$(get_id keystone tenant-create --name=demo)
@@ -40,6 +49,8 @@
 
 
 # Users
+# -----
+
 ADMIN_USER=$(get_id keystone user-create --name=admin \
                                          --pass="$ADMIN_PASSWORD" \
                                          --email=admin@example.com)
@@ -49,6 +60,8 @@
 
 
 # Roles
+# -----
+
 ADMIN_ROLE=$(get_id keystone role-create --name=admin)
 KEYSTONEADMIN_ROLE=$(get_id keystone role-create --name=KeystoneAdmin)
 KEYSTONESERVICE_ROLE=$(get_id keystone role-create --name=KeystoneServiceAdmin)
@@ -73,58 +86,201 @@
 keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $INVIS_TENANT
 
 
-# Configure service users/roles
-NOVA_USER=$(get_id keystone user-create --name=nova \
-                                        --pass="$SERVICE_PASSWORD" \
-                                        --tenant_id $SERVICE_TENANT \
-                                        --email=nova@example.com)
-keystone user-role-add --tenant_id $SERVICE_TENANT \
-                       --user_id $NOVA_USER \
-                       --role_id $ADMIN_ROLE
+# Services
+# --------
 
-GLANCE_USER=$(get_id keystone user-create --name=glance \
-                                          --pass="$SERVICE_PASSWORD" \
-                                          --tenant_id $SERVICE_TENANT \
-                                          --email=glance@example.com)
-keystone user-role-add --tenant_id $SERVICE_TENANT \
-                       --user_id $GLANCE_USER \
-                       --role_id $ADMIN_ROLE
+# Keystone
+if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+	KEYSTONE_SERVICE=$(get_id keystone service-create \
+		--name=keystone \
+		--type=identity \
+		--description="Keystone Identity Service")
+	keystone endpoint-create \
+	    --region RegionOne \
+		--service_id $KEYSTONE_SERVICE \
+		--publicurl "http://$SERVICE_HOST:\$(public_port)s/v2.0" \
+		--adminurl "http://$SERVICE_HOST:\$(admin_port)s/v2.0" \
+		--internalurl "http://$SERVICE_HOST:\$(admin_port)s/v2.0"
+fi
 
-if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
-    SWIFT_USER=$(get_id keystone user-create --name=swift \
-                                             --pass="$SERVICE_PASSWORD" \
-                                             --tenant_id $SERVICE_TENANT \
-                                             --email=swift@example.com)
-    keystone user-role-add --tenant_id $SERVICE_TENANT \
-                           --user_id $SWIFT_USER \
-                           --role_id $ADMIN_ROLE
+# Nova
+if [[ "$ENABLED_SERVICES" =~ "n-cpu" ]]; then
+    NOVA_USER=$(get_id keystone user-create \
+        --name=nova \
+        --pass="$SERVICE_PASSWORD" \
+        --tenant_id $SERVICE_TENANT \
+        --email=nova@example.com)
+    keystone user-role-add \
+        --tenant_id $SERVICE_TENANT \
+        --user_id $NOVA_USER \
+        --role_id $ADMIN_ROLE
+    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+        NOVA_SERVICE=$(get_id keystone service-create \
+            --name=nova \
+            --type=compute \
+            --description="Nova Compute Service")
+        keystone endpoint-create \
+            --region RegionOne \
+            --service_id $NOVA_SERVICE \
+            --publicurl "http://$SERVICE_HOST:\$(compute_port)s/v1.1/\$(tenant_id)s" \
+            --adminurl "http://$SERVICE_HOST:\$(compute_port)s/v1.1/\$(tenant_id)s" \
+            --internalurl "http://$SERVICE_HOST:\$(compute_port)s/v1.1/\$(tenant_id)s"
+    fi
     # Nova needs ResellerAdmin role to download images when accessing
     # swift through the s3 api. The admin role in swift allows a user
     # to act as an admin for their tenant, but ResellerAdmin is needed
     # for a user to act as any tenant. The name of this role is also
     # configurable in swift-proxy.conf
     RESELLER_ROLE=$(get_id keystone role-create --name=ResellerAdmin)
-    keystone user-role-add --tenant_id $SERVICE_TENANT \
-                           --user_id $NOVA_USER \
-                           --role_id $RESELLER_ROLE
+    keystone user-role-add \
+        --tenant_id $SERVICE_TENANT \
+        --user_id $NOVA_USER \
+        --role_id $RESELLER_ROLE
 fi
 
-if [[ "$ENABLED_SERVICES" =~ "quantum" ]]; then
-    QUANTUM_USER=$(get_id keystone user-create --name=quantum \
-                                               --pass="$SERVICE_PASSWORD" \
-                                               --tenant_id $SERVICE_TENANT \
-                                               --email=quantum@example.com)
-    keystone user-role-add --tenant_id $SERVICE_TENANT \
-                           --user_id $QUANTUM_USER \
-                           --role_id $ADMIN_ROLE
+# Volume
+if [[ "$ENABLED_SERVICES" =~ "n-vol" ]]; then
+    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+        VOLUME_SERVICE=$(get_id keystone service-create \
+            --name=volume \
+            --type=volume \
+            --description="Volume Service")
+        keystone endpoint-create \
+            --region RegionOne \
+            --service_id $VOLUME_SERVICE \
+            --publicurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" \
+            --adminurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s" \
+            --internalurl "http://$SERVICE_HOST:8776/v1/\$(tenant_id)s"
+    fi
+fi
+
+# Glance
+if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then
+    GLANCE_USER=$(get_id keystone user-create \
+        --name=glance \
+        --pass="$SERVICE_PASSWORD" \
+        --tenant_id $SERVICE_TENANT \
+        --email=glance@example.com)
+    keystone user-role-add \
+        --tenant_id $SERVICE_TENANT \
+        --user_id $GLANCE_USER \
+        --role_id $ADMIN_ROLE
+    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+        GLANCE_SERVICE=$(get_id keystone service-create \
+            --name=glance \
+            --type=image \
+            --description="Glance Image Service")
+        keystone endpoint-create \
+            --region RegionOne \
+            --service_id $GLANCE_SERVICE \
+            --publicurl "http://$SERVICE_HOST:9292/v1" \
+            --adminurl "http://$SERVICE_HOST:9292/v1" \
+            --internalurl "http://$SERVICE_HOST:9292/v1"
+    fi
+fi
+
+# Swift
+if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
+    SWIFT_USER=$(get_id keystone user-create \
+        --name=swift \
+        --pass="$SERVICE_PASSWORD" \
+        --tenant_id $SERVICE_TENANT \
+        --email=swift@example.com)
+    keystone user-role-add \
+        --tenant_id $SERVICE_TENANT \
+        --user_id $SWIFT_USER \
+        --role_id $ADMIN_ROLE
+    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+        SWIFT_SERVICE=$(get_id keystone service-create \
+            --name=swift \
+            --type="object-store" \
+            --description="Swift Service")
+        keystone endpoint-create \
+            --region RegionOne \
+            --service_id $SWIFT_SERVICE \
+            --publicurl "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" \
+            --adminurl "http://$SERVICE_HOST:8080/v1" \
+            --internalurl "http://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s"
+    fi
+fi
+
+if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then
+    QUANTUM_USER=$(get_id keystone user-create \
+        --name=quantum \
+        --pass="$SERVICE_PASSWORD" \
+        --tenant_id $SERVICE_TENANT \
+        --email=quantum@example.com)
+    keystone user-role-add \
+        --tenant_id $SERVICE_TENANT \
+        --user_id $QUANTUM_USER \
+        --role_id $ADMIN_ROLE
+    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+        QUANTUM_SERVICE=$(get_id keystone service-create \
+            --name=quantum \
+            --type=network \
+            --description="Quantum Service")
+        keystone endpoint-create \
+            --region RegionOne \
+            --service_id $QUANTUM_SERVICE \
+            --publicurl "http://$SERVICE_HOST:9696/" \
+            --adminurl "http://$SERVICE_HOST:9696/" \
+            --internalurl "http://$SERVICE_HOST:9696/"
+    fi
+fi
+
+# EC2
+if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
+    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+        EC2_SERVICE=$(get_id keystone service-create \
+            --name=ec2 \
+            --type=ec2 \
+            --description="EC2 Compatibility Layer")
+        keystone endpoint-create \
+            --region RegionOne \
+            --service_id $EC2_SERVICE \
+            --publicurl "http://$SERVICE_HOST:8773/services/Cloud" \
+            --adminurl "http://$SERVICE_HOST:8773/services/Admin" \
+            --internalurl "http://$SERVICE_HOST:8773/services/Cloud"
+    fi
+fi
+
+# S3
+if [[ "$ENABLED_SERVICES" =~ "n-obj" || "$ENABLED_SERVICES" =~ "swift" ]]; then
+    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+        S3_SERVICE=$(get_id keystone service-create \
+            --name=s3 \
+            --type=s3 \
+            --description="S3")
+        keystone endpoint-create \
+            --region RegionOne \
+            --service_id $S3_SERVICE \
+            --publicurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \
+            --adminurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \
+            --internalurl "http://$SERVICE_HOST:$S3_SERVICE_PORT"
+    fi
 fi
 
 if [[ "$ENABLED_SERVICES" =~ "tempest" ]]; then
     # Tempest has some tests that validate various authorization checks
     # between two regular users in separate tenants
-    ALT_DEMO_TENANT=$(get_id keystone tenant-create --name=alt_demo)
-    ALT_DEMO_USER=$(get_id keystone user-create --name=alt_demo \
-                                        --pass="$ADMIN_PASSWORD" \
-                                        --email=alt_demo@example.com)
-    keystone user-role-add --user $ALT_DEMO_USER --role $MEMBER_ROLE --tenant_id $ALT_DEMO_TENANT
+    ALT_DEMO_TENANT=$(get_id keystone tenant-create \
+        --name=alt_demo)
+    ALT_DEMO_USER=$(get_id keystone user-create \
+        --name=alt_demo \
+        --pass="$ADMIN_PASSWORD" \
+        --email=alt_demo@example.com)
+    keystone user-role-add \
+        --tenant_id $ALT_DEMO_TENANT \
+        --user_id $ALT_DEMO_USER \
+        --role_id $MEMBER_ROLE
+fi
+
+if [[ "$ENABLED_SERVICES" =~ "cinder" ]]; then
+    CINDER_USER=$(get_id keystone user-create --name=cinder \
+                                              --pass="$SERVICE_PASSWORD" \
+                                              --tenant_id $SERVICE_TENANT \
+                                              --email=cinder@example.com)
+    keystone user-role-add --tenant_id $SERVICE_TENANT \
+                           --user_id $CINDER_USER \
+                           --role_id $ADMIN_ROLE
 fi
diff --git a/files/rpms/cinder b/files/rpms/cinder
new file mode 100644
index 0000000..df861aa
--- /dev/null
+++ b/files/rpms/cinder
@@ -0,0 +1,2 @@
+lvm2
+scsi-target-utils
diff --git a/files/rpms/glance b/files/rpms/glance
index e38f239..eff6c2c 100644
--- a/files/rpms/glance
+++ b/files/rpms/glance
@@ -1,5 +1,6 @@
 libxml2-devel
 python-argparse
+python-devel
 python-eventlet
 python-greenlet
 python-paste-deploy
diff --git a/functions b/functions
index 32427a4..8cf7c74 100644
--- a/functions
+++ b/functions
@@ -73,6 +73,91 @@
 }
 
 
+# get_packages() collects a list of package names of any type from the
+# prerequisite files in ``files/{apts|pips}``.  The list is intended
+# to be passed to a package installer such as apt or pip.
+#
+# Only packages required for the services in ENABLED_SERVICES will be
+# included.  Two bits of metadata are recognized in the prerequisite files:
+# - ``# NOPRIME`` defers installation to be performed later in stack.sh
+# - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection
+#   of the package to the distros listed.  The distro names are case insensitive.
+#
+# get_packages dir
+function get_packages() {
+    local package_dir=$1
+    local file_to_parse
+    local service
+
+    if [[ -z "$package_dir" ]]; then
+        echo "No package directory supplied"
+        return 1
+    fi
+    if [[ -z "$DISTRO" ]]; then
+        echo "No distro set in DISTRO"
+        return 1
+    fi
+    for service in general ${ENABLED_SERVICES//,/ }; do
+        # Allow individual services to specify dependencies
+        if [[ -e ${package_dir}/${service} ]]; then
+            file_to_parse="${file_to_parse} $service"
+        fi
+        # NOTE(sdague) n-api needs glance for now because that's where
+        # glance client is
+        if [[ $service == n-api ]]; then
+            if [[ ! $file_to_parse =~ nova ]]; then
+                file_to_parse="${file_to_parse} nova"
+            fi
+            if [[ ! $file_to_parse =~ glance ]]; then
+                file_to_parse="${file_to_parse} glance"
+            fi
+        elif [[ $service == c-* ]]; then
+            if [[ ! $file_to_parse =~ cinder ]]; then
+                file_to_parse="${file_to_parse} cinder"
+            fi
+        elif [[ $service == n-* ]]; then
+            if [[ ! $file_to_parse =~ nova ]]; then
+                file_to_parse="${file_to_parse} nova"
+            fi
+        elif [[ $service == g-* ]]; then
+            if [[ ! $file_to_parse =~ glance ]]; then
+                file_to_parse="${file_to_parse} glance"
+            fi
+        elif [[ $service == key* ]]; then
+            if [[ ! $file_to_parse =~ keystone ]]; then
+                file_to_parse="${file_to_parse} keystone"
+            fi
+        fi
+    done
+
+    for file in ${file_to_parse}; do
+        local fname=${package_dir}/${file}
+        local OIFS line package distros distro
+        [[ -e $fname ]] || continue
+
+        OIFS=$IFS
+        IFS=$'\n'
+        for line in $(<${fname}); do
+            if [[ $line =~ "NOPRIME" ]]; then
+                continue
+            fi
+
+            if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then
+                # We are using BASH regexp matching feature.
+                package=${BASH_REMATCH[1]}
+                distros=${BASH_REMATCH[2]}
+                # In bash ${VAR,,} will lowecase VAR
+                [[ ${distros,,} =~ ${DISTRO,,} ]] && echo $package
+                continue
+            fi
+
+            echo ${line%#*}
+        done
+        IFS=$OIFS
+    done
+}
+
+
 # Determine OS Vendor, Release and Update
 # Tested with OS/X, Ubuntu, RedHat, CentOS, Fedora
 # Returns results in global variables:
@@ -142,6 +227,8 @@
 # be owned by the installation user, we create the directory and change the
 # ownership to the proper user.
 # Set global RECLONE=yes to simulate a clone when dest-dir exists
+# Set global ERROR_ON_CLONE=True to abort execution with an error if the git repo
+# does not exist (default is False, meaning the repo will be cloned).
 # git_clone remote dest-dir branch
 function git_clone {
     [[ "$OFFLINE" = "True" ]] && return
@@ -153,6 +240,7 @@
     if echo $GIT_BRANCH | egrep -q "^refs"; then
         # If our branch name is a gerrit style refs/changes/...
         if [[ ! -d $GIT_DEST ]]; then
+            [[ "$ERROR_ON_CLONE" = "True" ]] && exit 1
             git clone $GIT_REMOTE $GIT_DEST
         fi
         cd $GIT_DEST
@@ -160,6 +248,7 @@
     else
         # do a full clone only if the directory doesn't exist
         if [[ ! -d $GIT_DEST ]]; then
+            [[ "$ERROR_ON_CLONE" = "True" ]] && exit 1
             git clone $GIT_REMOTE $GIT_DEST
             cd $GIT_DEST
             # This checkout syntax works for both branches and tags
@@ -253,6 +342,7 @@
     for service in ${services}; do
         [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && return 0
         [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && return 0
+        [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && return 0
         [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0
         [[ ${service} == "quantum" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0
     done
diff --git a/lib/cinder b/lib/cinder
new file mode 100644
index 0000000..f0715a4
--- /dev/null
+++ b/lib/cinder
@@ -0,0 +1,154 @@
+# lib/cinder
+# Install and start Cinder volume service
+
+# Dependencies:
+# - functions
+# - KEYSTONE_AUTH_* must be defined
+# SERVICE_{TENANT_NAME|PASSWORD} must be defined
+
+# stack.sh
+# ---------
+# install_XXX
+# configure_XXX
+# init_XXX
+# start_XXX
+# stop_XXX
+# cleanup_XXX
+
+# Print the commands being run so that we can see the command that triggers
+# an error.  It is also useful for following along as the install occurs.
+set -o xtrace
+
+
+# Defaults
+# --------
+
+# set up default directories
+CINDER_DIR=$DEST/cinder
+CINDERCLIENT_DIR=$DEST/python-cinderclient
+CINDER_CONF_DIR=/etc/cinder
+CINDER_CONF=$CINDER_CONF_DIR/cinder.conf
+
+# Name of the lvm volume group to use/create for iscsi volumes
+VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
+VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
+
+# cleanup_cinder() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_cinder() {
+    # This function intentionally left blank
+    :
+}
+
+# configure_cinder() - Set config files, create data dirs, etc
+function configure_cinder() {
+    setup_develop $CINDER_DIR
+    setup_develop $CINDERCLIENT_DIR
+
+    if [[ ! -d $CINDER_CONF_DIR ]]; then
+        sudo mkdir -p $CINDER_CONF_DIR
+    fi
+    sudo chown `whoami` $CINDER_CONF_DIR
+
+    cp -p $CINDER_DIR/etc/cinder/policy.json $CINDER_CONF_DIR
+
+    CINDER_API_PASTE_INI=$CINDER_CONF_DIR/api-paste.ini
+    cp $CINDER_DIR/etc/cinder/api-paste.ini $CINDER_API_PASTE_INI
+    iniset $CINDER_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST
+    iniset $CINDER_API_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT
+    iniset $CINDER_API_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
+    iniset $CINDER_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME
+    iniset $CINDER_API_PASTE_INI filter:authtoken admin_user cinder
+    iniset $CINDER_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD
+
+    cp $CINDER_DIR/etc/cinder/cinder.conf.sample $CINDER_CONF
+    iniset $CINDER_CONF DEFAULT auth_strategy keystone
+    iniset $CINDER_CONF DEFAULT verbose True
+    iniset $CINDER_CONF DEFAULT volume_group $VOLUME_GROUP
+    iniset $CINDER_CONF DEFAULT volume_name_template ${VOLUME_NAME_PREFIX}%s
+    iniset $CINDER_CONF DEFAULT iscsi_helper tgtadm
+    iniset $CINDER_CONF DEFAULT sql_connection $BASE_SQL_CONN/cinder?charset=utf8
+    iniset $CINDER_CONF DEFAULT rabbit_host $RABBIT_HOST
+    iniset $CINDER_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
+    iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI
+}
+
+# init_cinder() - Initialize database and volume group
+function init_cinder() {
+    # Force nova volumes off
+    NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/osapi_volume,//")
+
+    if is_service_enabled mysql; then
+        # (re)create cinder database
+        mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS cinder;'
+        mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE cinder;'
+
+        # (re)create cinder database
+        $CINDER_DIR/bin/cinder-manage db sync
+    fi
+
+    if is_service_enabled c-vol; then
+        # Configure a default volume group called '`stack-volumes`' for the volume
+        # service if it does not yet exist.  If you don't wish to use a file backed
+        # volume group, create your own volume group called ``stack-volumes`` before
+        # invoking ``stack.sh``.
+        #
+        # By default, the backing file is 2G in size, and is stored in ``/opt/stack/data``.
+
+        if ! sudo vgs $VOLUME_GROUP; then
+            VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DATA_DIR/${VOLUME_GROUP}-backing-file}
+            VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-2052M}
+            # Only create if the file doesn't already exists
+            [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE
+            DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE`
+            # Only create if the loopback device doesn't contain $VOLUME_GROUP
+            if ! sudo vgs $VOLUME_GROUP; then sudo vgcreate $VOLUME_GROUP $DEV; fi
+        fi
+
+        if sudo vgs $VOLUME_GROUP; then
+            # Remove iscsi targets
+            sudo tgtadm --op show --mode target | grep $VOLUME_NAME_PREFIX | grep Target | cut -f3 -d ' ' | sudo xargs -n1 tgt-admin --delete || true
+            # Clean out existing volumes
+            for lv in `sudo lvs --noheadings -o lv_name $VOLUME_GROUP`; do
+                # VOLUME_NAME_PREFIX prefixes the LVs we want
+                if [[ "${lv#$VOLUME_NAME_PREFIX}" != "$lv" ]]; then
+                    sudo lvremove -f $VOLUME_GROUP/$lv
+                fi
+            done
+        fi
+    fi
+}
+
+# install_cinder() - Collect source and prepare
+function install_cinder() {
+    git_clone $CINDER_REPO $CINDER_DIR $CINDER_BRANCH
+    git_clone $CINDERCLIENT_REPO $CINDERCLIENT_DIR $CINDERCLIENT_BRANCH
+}
+
+# start_cinder() - Start running processes, including screen
+function start_cinder() {
+    if is_service_enabled c-vol; then
+        if [[ "$os_PACKAGE" = "deb" ]]; then
+            # tgt in oneiric doesn't restart properly if tgtd isn't running
+            # do it in two steps
+            sudo stop tgt || true
+            sudo start tgt
+        else
+            # bypass redirection to systemctl during restart
+            sudo /sbin/service --skip-redirect tgtd restart
+        fi
+    fi
+
+    screen_it c-api "cd $CINDER_DIR && $CINDER_DIR/bin/cinder-api --config-file $CINDER_CONF"
+    screen_it c-vol "cd $CINDER_DIR && $CINDER_DIR/bin/cinder-volume --config-file $CINDER_CONF"
+    screen_it c-sch "cd $CINDER_DIR && $CINDER_DIR/bin/cinder-scheduler --config-file $CINDER_CONF"
+}
+
+# stop_cinder() - Stop running processes (non-screen)
+function stop_cinder() {
+    # FIXME(dtroyer): stop only the cinder screen window?
+
+    if is_service_enabled c-vol; then
+        stop_service tgt
+    fi
+}
diff --git a/stack.sh b/stack.sh
index a1f89c1..bb6e1ae 100755
--- a/stack.sh
+++ b/stack.sh
@@ -89,6 +89,16 @@
 # Sanity Check
 # ============
 
+# We are looking for services with a - at the beginning to force
+# excluding those services. For example if you want to install all the default
+# services but not nova-volume (n-vol) you can have this set in your localrc :
+# ENABLED_SERVICES+=",-n-vol"
+for service in ${ENABLED_SERVICES//,/ }; do
+    if [[ ${service} == -* ]]; then
+        ENABLED_SERVICES=$(echo ${ENABLED_SERVICES}|sed -r "s/(,)?(-)?${service#-}(,)?/,/g")
+    fi
+done
+
 # Warn users who aren't on an explicitly supported distro, but allow them to
 # override check and attempt installation with ``FORCE=yes ./stack``
 if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|f16) ]]; then
@@ -112,6 +122,13 @@
     NOVA_ROOTWRAP=/usr/bin/nova-rootwrap
 fi
 
+# ``stack.sh`` keeps function libraries here
+# Make sure ``$TOP_DIR/lib`` directory is present
+if [ ! -d $TOP_DIR/lib ]; then
+    echo "ERROR: missing devstack/lib - did you grab more than just stack.sh?"
+    exit 1
+fi
+
 # stack.sh keeps the list of ``apt`` and ``pip`` dependencies in external
 # files, along with config templates and other useful files.  You can find these
 # in the ``files`` directory (next to this script).  We will reference this
@@ -130,6 +147,12 @@
     exit 1
 fi
 
+# Make sure we only have one volume service enabled.
+if is_service_enabled cinder && is_service_enabled n-vol; then
+    echo "ERROR: n-vol and cinder must not be enabled at the same time"
+    exit 1
+fi
+
 # OpenStack is designed to be run as a regular user (Horizon will fail to run
 # as root, since apache refused to startup serve content from root user).  If
 # ``stack.sh`` is run as **root**, it automatically creates a **stack** user with
@@ -194,11 +217,35 @@
     sudo rm -f /etc/sudoers.d/stack_sh_nova
 fi
 
+# Create the destination directory and ensure it is writable by the user
+sudo mkdir -p $DEST
+if [ ! -w $DEST ]; then
+    sudo chown `whoami` $DEST
+fi
+
 # Set True to configure ``stack.sh`` to run cleanly without Internet access.
 # ``stack.sh`` must have been previously run with Internet access to install
 # prerequisites and initialize ``$DEST``.
 OFFLINE=`trueorfalse False $OFFLINE`
 
+# Set True to configure ``stack.sh`` to exit with an error code if it is asked
+# to clone any git repositories.  If devstack is used in a testing environment,
+# this may be used to ensure that the correct code is being tested.
+ERROR_ON_CLONE=`trueorfalse False $ERROR_ON_CLONE`
+
+# Destination path for service data
+DATA_DIR=${DATA_DIR:-${DEST}/data}
+sudo mkdir -p $DATA_DIR
+sudo chown `whoami` $DATA_DIR
+
+
+# Projects
+# --------
+
+# Get project function libraries
+source $TOP_DIR/lib/cinder
+
+
 # Set the destination directories for openstack projects
 NOVA_DIR=$DEST/nova
 HORIZON_DIR=$DEST/horizon
@@ -232,7 +279,7 @@
 M_MAC_RANGE=${M_MAC_RANGE:-FE-EE-DD-00-00-00/24}
 
 # Name of the lvm volume group to use/create for iscsi volumes
-VOLUME_GROUP=${VOLUME_GROUP:-nova-volumes}
+VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
 VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
 INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-}
 
@@ -249,7 +296,7 @@
 HOST_IP_IFACE=${HOST_IP_IFACE:-eth0}
 # Use the eth0 IP unless an explicit is set by ``HOST_IP`` environment variable
 if [ -z "$HOST_IP" -o "$HOST_IP" == "dhcp" ]; then
-    HOST_IP=`LC_ALL=C /sbin/ifconfig ${HOST_IP_IFACE} | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'`
+    HOST_IP=`LC_ALL=C ip -f inet addr show ${HOST_IP_IFACE} | awk '/inet/ {split($2,parts,"/");  print parts[1]}' | head -n1`
     if [ "$HOST_IP" = "" ]; then
         echo "Could not determine host ip address."
         echo "Either localrc specified dhcp on ${HOST_IP_IFACE} or defaulted to eth0"
@@ -559,94 +606,12 @@
 # an error.  It is also useful for following along as the install occurs.
 set -o xtrace
 
-# create the destination directory and ensure it is writable by the user
-sudo mkdir -p $DEST
-if [ ! -w $DEST ]; then
-    sudo chown `whoami` $DEST
-fi
-
 
 # Install Packages
 # ================
 #
 # Openstack uses a fair number of other projects.
 
-# get_packages() collects a list of package names of any type from the
-# prerequisite files in ``files/{apts|pips}``.  The list is intended
-# to be passed to a package installer such as apt or pip.
-#
-# Only packages required for the services in ENABLED_SERVICES will be
-# included.  Two bits of metadata are recognized in the prerequisite files:
-# - ``# NOPRIME`` defers installation to be performed later in stack.sh
-# - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection
-#   of the package to the distros listed.  The distro names are case insensitive.
-#
-# get_packages dir
-function get_packages() {
-    local package_dir=$1
-    local file_to_parse
-    local service
-
-    if [[ -z "$package_dir" ]]; then
-        echo "No package directory supplied"
-        return 1
-    fi
-    for service in general ${ENABLED_SERVICES//,/ }; do
-        # Allow individual services to specify dependencies
-        if [[ -e ${package_dir}/${service} ]]; then
-            file_to_parse="${file_to_parse} $service"
-        fi
-        # NOTE(sdague) n-api needs glance for now because that's where
-        # glance client is
-        if [[ $service == n-api ]]; then
-            if [[ ! $file_to_parse =~ nova ]]; then
-                file_to_parse="${file_to_parse} nova"
-            fi
-            if [[ ! $file_to_parse =~ glance ]]; then
-                file_to_parse="${file_to_parse} glance"
-            fi
-        elif [[ $service == n-* ]]; then
-            if [[ ! $file_to_parse =~ nova ]]; then
-                file_to_parse="${file_to_parse} nova"
-            fi
-        elif [[ $service == g-* ]]; then
-            if [[ ! $file_to_parse =~ glance ]]; then
-                file_to_parse="${file_to_parse} glance"
-            fi
-        elif [[ $service == key* ]]; then
-            if [[ ! $file_to_parse =~ keystone ]]; then
-                file_to_parse="${file_to_parse} keystone"
-            fi
-        fi
-    done
-
-    for file in ${file_to_parse}; do
-        local fname=${package_dir}/${file}
-        local OIFS line package distros distro
-        [[ -e $fname ]] || continue
-
-        OIFS=$IFS
-        IFS=$'\n'
-        for line in $(<${fname}); do
-            if [[ $line =~ "NOPRIME" ]]; then
-                continue
-            fi
-
-            if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then
-                # We are using BASH regexp matching feature.
-                package=${BASH_REMATCH[1]}
-                distros=${BASH_REMATCH[2]}
-                # In bash ${VAR,,} will lowecase VAR
-                [[ ${distros,,} =~ ${DISTRO,,} ]] && echo $package
-                continue
-            fi
-
-            echo ${line%#*}
-        done
-        IFS=$OIFS
-    done
-}
-
 # install package requirements
 if [[ "$os_PACKAGE" = "deb" ]]; then
     apt_get update
@@ -702,10 +667,12 @@
     # melange
     git_clone $MELANGE_REPO $MELANGE_DIR $MELANGE_BRANCH
 fi
-
 if is_service_enabled melange; then
     git_clone $MELANGECLIENT_REPO $MELANGECLIENT_DIR $MELANGECLIENT_BRANCH
 fi
+if is_service_enabled cinder; then
+    install_cinder
+fi
 
 
 # Initialization
@@ -727,6 +694,10 @@
 if is_service_enabled g-api n-api; then
     setup_develop $GLANCE_DIR
 fi
+
+# Do this _after_ glance is installed to override the old binary
+setup_develop $GLANCECLIENT_DIR
+
 setup_develop $NOVA_DIR
 if is_service_enabled horizon; then
     setup_develop $HORIZON_DIR
@@ -741,9 +712,9 @@
 if is_service_enabled melange; then
     setup_develop $MELANGECLIENT_DIR
 fi
-
-# Do this _after_ glance is installed to override the old binary
-setup_develop $GLANCECLIENT_DIR
+if is_service_enabled cinder; then
+    configure_cinder
+fi
 
 
 # Syslog
@@ -1031,6 +1002,9 @@
 
     GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json
     cp $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON
+
+    $GLANCE_DIR/bin/glance-manage db_sync
+
 fi
 
 # Quantum (for controller or agent nodes)
@@ -1076,23 +1050,28 @@
             echo "OVS 1.4+ is required for tunneling between multiple hosts."
             exit 1
         fi
-        sudo sed -i -e "s/.*enable-tunneling = .*$/enable-tunneling = $OVS_ENABLE_TUNNELING/g" /$Q_PLUGIN_CONF_FILE
+        sudo sed -i -e "s/.*enable_tunneling = .*$/enable_tunneling = $OVS_ENABLE_TUNNELING/g" /$Q_PLUGIN_CONF_FILE
     fi
 fi
 
 # Quantum service (for controller node)
 if is_service_enabled q-svc; then
-    Q_PLUGIN_INI_FILE=/etc/quantum/plugins.ini
     Q_CONF_FILE=/etc/quantum/quantum.conf
-    # must remove this file from existing location, otherwise Quantum will prefer it
-    if [[ -e $QUANTUM_DIR/etc/plugins.ini ]]; then
-        sudo mv $QUANTUM_DIR/etc/plugins.ini $Q_PLUGIN_INI_FILE
-    fi
+    Q_API_PASTE_FILE=/etc/quantum/api-paste.ini
+    Q_POLICY_FILE=/etc/quantum/policy.json
 
     if [[ -e $QUANTUM_DIR/etc/quantum.conf ]]; then
       sudo mv $QUANTUM_DIR/etc/quantum.conf $Q_CONF_FILE
     fi
 
+    if [[ -e $QUANTUM_DIR/etc/api-paste.ini ]]; then
+      sudo mv $QUANTUM_DIR/etc/api-paste.ini $Q_API_PASTE_FILE
+    fi
+
+    if [[ -e $QUANTUM_DIR/etc/policy.json ]]; then
+      sudo mv $QUANTUM_DIR/etc/policy.json $Q_POLICY_FILE
+    fi
+
     if is_service_enabled mysql; then
             mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e "DROP DATABASE IF EXISTS $Q_DB_NAME;"
             mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e "CREATE DATABASE IF NOT EXISTS $Q_DB_NAME CHARACTER SET utf8;"
@@ -1100,9 +1079,10 @@
             echo "mysql must be enabled in order to use the $Q_PLUGIN Quantum plugin."
             exit 1
     fi
-    sudo sed -i -e "s/^provider =.*$/provider = $Q_PLUGIN_CLASS/g" $Q_PLUGIN_INI_FILE
 
-    screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server $Q_CONF_FILE"
+    # Update either configuration file with plugin
+    sudo sed -i -e "s/^core_plugin =.*$/core_plugin = $Q_PLUGIN_CLASS/g" $Q_CONF_FILE
+    screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $Q_CONF_FILE"
 fi
 
 # Quantum agent (for compute nodes)
@@ -1126,7 +1106,7 @@
         sudo ovs-vsctl --no-wait -- --if-exists del-br $OVS_BRIDGE
         sudo ovs-vsctl --no-wait add-br $OVS_BRIDGE
         sudo ovs-vsctl --no-wait br-set-external-id $OVS_BRIDGE bridge-id br-int
-        sudo sed -i -e "s/.*local-ip = .*/local-ip = $HOST_IP/g" /$Q_PLUGIN_CONF_FILE
+        sudo sed -i -e "s/.*local_ip = .*/local_ip = $HOST_IP/g" /$Q_PLUGIN_CONF_FILE
         AGENT_BINARY=$QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py
     elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
        # Start up the quantum <-> linuxbridge agent
@@ -1176,9 +1156,32 @@
 
 cp -p $NOVA_DIR/etc/nova/policy.json $NOVA_CONF_DIR
 
+# If Nova ships the new rootwrap filters files, deploy them
+# (owned by root) and add a parameter to $NOVA_ROOTWRAP
+ROOTWRAP_SUDOER_CMD="$NOVA_ROOTWRAP"
+if [[ -d $NOVA_DIR/etc/nova/rootwrap.d ]]; then
+    # Wipe any existing rootwrap.d files first
+    if [[ -d $NOVA_CONF_DIR/rootwrap.d ]]; then
+        sudo rm -rf $NOVA_CONF_DIR/rootwrap.d
+    fi
+    # Deploy filters to /etc/nova/rootwrap.d
+    sudo mkdir -m 755 $NOVA_CONF_DIR/rootwrap.d
+    sudo cp $NOVA_DIR/etc/nova/rootwrap.d/*.filters $NOVA_CONF_DIR/rootwrap.d
+    sudo chown -R root:root $NOVA_CONF_DIR/rootwrap.d
+    sudo chmod 644 $NOVA_CONF_DIR/rootwrap.d/*
+    # Set up rootwrap.conf, pointing to /etc/nova/rootwrap.d
+    sudo cp $NOVA_DIR/etc/nova/rootwrap.conf $NOVA_CONF_DIR/
+    sudo sed -e "s:^filters_path=.*$:filters_path=$NOVA_CONF_DIR/rootwrap.d:" -i $NOVA_CONF_DIR/rootwrap.conf
+    sudo chown root:root $NOVA_CONF_DIR/rootwrap.conf
+    sudo chmod 0644 $NOVA_CONF_DIR/rootwrap.conf
+    # Specify rootwrap.conf as first parameter to nova-rootwrap
+    NOVA_ROOTWRAP="$NOVA_ROOTWRAP $NOVA_CONF_DIR/rootwrap.conf"
+    ROOTWRAP_SUDOER_CMD="$NOVA_ROOTWRAP *"
+fi
+
 # Set up the rootwrap sudoers
 TEMPFILE=`mktemp`
-echo "$USER ALL=(root) NOPASSWD: $NOVA_ROOTWRAP" >$TEMPFILE
+echo "$USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE
 chmod 0440 $TEMPFILE
 sudo chown root:root $TEMPFILE
 sudo mv $TEMPFILE /etc/sudoers.d/nova-rootwrap
@@ -1365,6 +1368,9 @@
     # Install memcached for swift.
     install_package memcached
 
+    # We make sure to kill all swift processes first
+    pkill -f -9 swift-
+
     # We first do a bit of setup by creating the directories and
     # changing the permissions so we can run it as our user.
 
@@ -1495,7 +1501,7 @@
 admin_password = ${SERVICE_PASSWORD}
 
 [filter:swift3]
-use = egg:swift3#middleware
+use = egg:swift3#swift3
 EOF
 
     cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONFIG_DIR}/swift.conf
@@ -1609,17 +1615,18 @@
 # Volume Service
 # --------------
 
-if is_service_enabled n-vol; then
-    #
-    # Configure a default volume group called 'nova-volumes' for the nova-volume
+if is_service_enabled cinder; then
+    init_cinder
+elif is_service_enabled n-vol; then
+    # Configure a default volume group called '`stack-volumes`' for the volume
     # service if it does not yet exist.  If you don't wish to use a file backed
-    # volume group, create your own volume group called 'nova-volumes' before
-    # invoking stack.sh.
+    # volume group, create your own volume group called ``stack-volumes`` before
+    # invoking ``stack.sh``.
     #
-    # By default, the backing file is 2G in size, and is stored in /opt/stack.
+    # By default, the backing file is 2G in size, and is stored in ``/opt/stack/data``.
 
     if ! sudo vgs $VOLUME_GROUP; then
-        VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DEST/nova-volumes-backing-file}
+        VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DATA_DIR/${VOLUME_GROUP}-backing-file}
         VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-2052M}
         # Only create if the file doesn't already exists
         [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE
@@ -1767,6 +1774,10 @@
     add_nova_opt "logging_context_format_string=%(asctime)s %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s"
 fi
 
+# If cinder is enabled, use the cinder volume driver
+if is_service_enabled cinder; then
+    add_nova_opt "volume_api_class=nova.volume.cinder.API"
+fi
 
 # Provide some transition from EXTRA_FLAGS to EXTRA_OPTS
 if [[ -z "$EXTRA_OPTS" && -n "$EXTRA_FLAGS" ]]; then
@@ -1848,7 +1859,7 @@
 
     KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone}
     KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf
-    KEYSTONE_CATALOG=$KEYSTONE_CONF_DIR/default_catalog.templates
+    KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-template}
 
     if [[ ! -d $KEYSTONE_CONF_DIR ]]; then
         sudo mkdir -p $KEYSTONE_CONF_DIR
@@ -1859,41 +1870,49 @@
         cp -p $KEYSTONE_DIR/etc/keystone.conf.sample $KEYSTONE_CONF
         cp -p $KEYSTONE_DIR/etc/policy.json $KEYSTONE_CONF_DIR
     fi
-    cp -p $FILES/default_catalog.templates $KEYSTONE_CATALOG
 
     # Rewrite stock keystone.conf:
     iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN"
     iniset $KEYSTONE_CONF sql connection "$BASE_SQL_CONN/keystone?charset=utf8"
     iniset $KEYSTONE_CONF ec2 driver "keystone.contrib.ec2.backends.sql.Ec2"
-    # Configure keystone.conf to use templates
-    iniset $KEYSTONE_CONF catalog driver "keystone.catalog.backends.templated.TemplatedCatalog"
-    iniset $KEYSTONE_CONF catalog template_file "$KEYSTONE_CATALOG"
     sed -e "
         /^pipeline.*ec2_extension crud_/s|ec2_extension crud_extension|ec2_extension s3_extension crud_extension|;
     " -i $KEYSTONE_CONF
     # Append the S3 bits
     iniset $KEYSTONE_CONF filter:s3_extension paste.filter_factory "keystone.contrib.s3:S3Extension.factory"
 
-    # Add swift endpoints to service catalog if swift is enabled
-    if is_service_enabled swift; then
-        echo "catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
-        echo "catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:8080/" >> $KEYSTONE_CATALOG
-        echo "catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
-        echo "catalog.RegionOne.object_store.name = Swift Service" >> $KEYSTONE_CATALOG
-    fi
+    if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then
+        # Configure keystone.conf to use sql
+        iniset $KEYSTONE_CONF catalog driver keystone.catalog.backends.sql.Catalog
+        inicomment $KEYSTONE_CONF catalog template_file
+    else
+        KEYSTONE_CATALOG=$KEYSTONE_CONF_DIR/default_catalog.templates
+        cp -p $FILES/default_catalog.templates $KEYSTONE_CATALOG
+        # Add swift endpoints to service catalog if swift is enabled
+        if is_service_enabled swift; then
+            echo "catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
+            echo "catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:8080/" >> $KEYSTONE_CATALOG
+            echo "catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
+            echo "catalog.RegionOne.object_store.name = Swift Service" >> $KEYSTONE_CATALOG
+        fi
 
-    # Add quantum endpoints to service catalog if quantum is enabled
-    if is_service_enabled quantum; then
-        echo "catalog.RegionOne.network.publicURL = http://%SERVICE_HOST%:9696/" >> $KEYSTONE_CATALOG
-        echo "catalog.RegionOne.network.adminURL = http://%SERVICE_HOST%:9696/" >> $KEYSTONE_CATALOG
-        echo "catalog.RegionOne.network.internalURL = http://%SERVICE_HOST%:9696/" >> $KEYSTONE_CATALOG
-        echo "catalog.RegionOne.network.name = Quantum Service" >> $KEYSTONE_CATALOG
-    fi
+        # Add quantum endpoints to service catalog if quantum is enabled
+        if is_service_enabled quantum; then
+            echo "catalog.RegionOne.network.publicURL = http://%SERVICE_HOST%:9696/" >> $KEYSTONE_CATALOG
+            echo "catalog.RegionOne.network.adminURL = http://%SERVICE_HOST%:9696/" >> $KEYSTONE_CATALOG
+            echo "catalog.RegionOne.network.internalURL = http://%SERVICE_HOST%:9696/" >> $KEYSTONE_CATALOG
+            echo "catalog.RegionOne.network.name = Quantum Service" >> $KEYSTONE_CATALOG
+        fi
 
-    sudo sed -e "
-        s,%SERVICE_HOST%,$SERVICE_HOST,g;
-        s,%S3_SERVICE_PORT%,$S3_SERVICE_PORT,g;
-    " -i $KEYSTONE_CATALOG
+        sudo sed -e "
+            s,%SERVICE_HOST%,$SERVICE_HOST,g;
+            s,%S3_SERVICE_PORT%,$S3_SERVICE_PORT,g;
+        " -i $KEYSTONE_CATALOG
+
+        # Configure keystone.conf to use templates
+        iniset $KEYSTONE_CONF catalog driver "keystone.catalog.backends.templated.TemplatedCatalog"
+        iniset $KEYSTONE_CONF catalog template_file "$KEYSTONE_CATALOG"
+    fi
 
     # Set up logging
     LOGGING_ROOT="devel"
@@ -1905,25 +1924,37 @@
     iniset $KEYSTONE_CONF_DIR/logging.conf logger_root level "DEBUG"
     iniset $KEYSTONE_CONF_DIR/logging.conf logger_root handlers "devel,production"
 
-    # initialize keystone database
+    # Set up the keystone database
     $KEYSTONE_DIR/bin/keystone-manage db_sync
 
     # launch keystone and wait for it to answer before continuing
     screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug"
     echo "Waiting for keystone to start..."
-    if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -O- $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/ 2>&1 | grep -q '200 OK'; do sleep 1; done"; then
+    if ! timeout $SERVICE_TIMEOUT sh -c "while http_proxy= wget -O- $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/ 2>&1 | grep -q 'refused'; do sleep 1; done"; then
       echo "keystone did not start"
       exit 1
     fi
 
     # keystone_data.sh creates services, admin and demo users, and roles.
     SERVICE_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0
-    ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES \
+
+    ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \
+    SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \
+    S3_SERVICE_PORT=$S3_SERVICE_PORT KEYSTONE_CATALOG_BACKEND=$KEYSTONE_CATALOG_BACKEND \
+    DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES \
         bash $FILES/keystone_data.sh
 
+    # Set up auth creds now that keystone is bootstrapped
+    export OS_AUTH_URL=$SERVICE_ENDPOINT
+    export OS_TENANT_NAME=admin
+    export OS_USERNAME=admin
+    export OS_PASSWORD=$ADMIN_PASSWORD
+
     # create an access key and secret key for nova ec2 register image
     if is_service_enabled swift && is_service_enabled nova; then
-        CREDS=$(keystone --os_auth_url=$SERVICE_ENDPOINT --os_username=nova --os_password=$SERVICE_PASSWORD --os_tenant_name=$SERVICE_TENANT_NAME ec2-credentials-create)
+        NOVA_USER_ID=$(keystone user-list | grep ' nova ' | get_field 1)
+        NOVA_TENANT_ID=$(keystone tenant-list | grep " $SERVICE_TENANT_NAME " | get_field 1)
+        CREDS=$(keystone ec2-credentials-create --user_id $NOVA_USER_ID --tenant_id $NOVA_TENANT_ID)
         ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }')
         SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }')
         add_nova_opt "s3_access_key=$ACCESS_KEY"
@@ -1934,6 +1965,7 @@
 
 # launch the nova-api and wait for it to answer before continuing
 if is_service_enabled n-api; then
+    add_nova_opt "enabled_apis=$NOVA_ENABLED_APIS"
     screen_it n-api "cd $NOVA_DIR && $NOVA_DIR/bin/nova-api"
     echo "Waiting for nova-api to start..."
     if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://127.0.0.1:8774; do sleep 1; done"; then
@@ -1969,6 +2001,9 @@
 screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF --web ."
 screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF"
 screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth"
+if is_service_enabled cinder; then
+    start_cinder
+fi
 screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/$APACHE_NAME/horizon_error.log"
 screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_DIR}/proxy-server.conf -v"
 
@@ -1997,9 +2032,7 @@
     # Create a directory for the downloaded image tarballs.
     mkdir -p $FILES/images
 
-    ADMIN_USER=admin
-    ADMIN_TENANT=admin
-    TOKEN=$(keystone --os_tenant_name $ADMIN_TENANT --os_username $ADMIN_USER --os_password $ADMIN_PASSWORD --os_auth_url http://$HOST_IP:5000/v2.0 token-get | grep ' id ' | get_field 2)
+    TOKEN=$(keystone  token-get | grep ' id ' | get_field 2)
 
     # Option to upload legacy ami-tty, which works with xenserver
     if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then
diff --git a/stackrc b/stackrc
index 3387d43..3a19cdb 100644
--- a/stackrc
+++ b/stackrc
@@ -6,9 +6,25 @@
 # by default you can append them in your ENABLED_SERVICES variable in
 # your localrc. For example for swift you can just add this in your
 # localrc to add it with the other services:
-# ENABLED_SERVICES="$ENABLED_SERVICES,swift"
+# ENABLED_SERVICES+=,swift
+#
+# If you like to explicitly remove services you can add a -$service in
+# ENABLED_SERVICES, for example in your localrc to install all defaults but not
+# nova-volume you would just need to set this :
+# ENABLED_SERVICES+=,-n-vol
 ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,mysql,rabbit
 
+# Set the default Nova APIs to enable
+NOVA_ENABLED_APIS=ec2,osapi_compute,osapi_volume,metadata
+
+# volume service
+CINDER_REPO=https://github.com/openstack/cinder
+CINDER_BRANCH=master
+
+# volume client
+CINDERCLIENT_REPO=https://github.com/openstack/python-cinderclient
+CINDERCLIENT_BRANCH=master
+
 # compute service
 NOVA_REPO=https://github.com/openstack/nova.git
 NOVA_BRANCH=master
diff --git a/tests/functions.sh b/tests/functions.sh
index e7fbe0c..e436ed9 100755
--- a/tests/functions.sh
+++ b/tests/functions.sh
@@ -141,3 +141,5 @@
 else
     echo "inicomment failed: $VAL"
 fi
+
+rm test.ini
diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh
index 7f2f3e6..241296b 100755
--- a/tools/xen/scripts/install-os-vpx.sh
+++ b/tools/xen/scripts/install-os-vpx.sh
@@ -19,12 +19,7 @@
 
 set -eux
 
-if [ -a /etc/xensource-inventory]
-then
-    . /etc/xensource-inventory
-else
-    . /etc/xcp/inventory
-fi
+[[ -f "/etc/xensource-inventory" ]] && source "/etc/xensource-inventory" || source "/etc/xcp/inventory"
 
 NAME="XenServer OpenStack VPX"
 DATA_VDI_SIZE="500MiB"
diff --git a/unstack.sh b/unstack.sh
index 341270d..879f842 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -44,7 +44,7 @@
 fi
 
 # Get the iSCSI volumes
-if is_service_enabled n-vol; then
+if is_service_enabled cinder n-vol; then
     TARGETS=$(sudo tgtadm --op show --mode target)
     if [[ -n "$TARGETS" ]]; then
         # FIXME(dtroyer): this could very well require more here to