Merge "Upload XenServer style ovf directly to glance"
diff --git a/README.md b/README.md
index 9310758..483d1b0 100644
--- a/README.md
+++ b/README.md
@@ -60,11 +60,12 @@
 # Database Backend
 
 Multiple database backends are available. The available databases are defined in the lib/databases directory.
-To choose a database backend, add a line to your `localrc` like:
+`mysql` is the default database, choose a different one by putting the following in `localrc`:
 
-    use_database postgresql
+    disable_service mysql
+    enable_service postgresql
 
-By default, the mysql database backend is used.
+`mysql` is the default database.
 
 # RPC Backend
 
diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh
index deb1a03..a92c0d9 100755
--- a/exercises/aggregates.sh
+++ b/exercises/aggregates.sh
@@ -39,9 +39,8 @@
 # Import exercise configuration
 source $TOP_DIR/exerciserc
 
-# run test as the admin user
-_OLD_USERNAME=$OS_USERNAME
-OS_USERNAME=admin
+# Test as the admin user
+. $TOP_DIR/openrc admin admin
 
 
 # Create an aggregate
@@ -54,7 +53,7 @@
 exit_if_aggregate_present() {
     aggregate_name=$1
 
-    if [ `nova aggregate-list | grep -c " $aggregate_name "` == 0 ]; then
+    if [ $(nova aggregate-list | grep -c " $aggregate_name ") == 0 ]; then
         echo "SUCCESS $aggregate_name not present"
     else
         echo "ERROR found aggregate: $aggregate_name"
@@ -64,8 +63,8 @@
 
 exit_if_aggregate_present $AGGREGATE_NAME
 
-AGGREGATE_ID=`nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE_NAME " | get_field 1`
-AGGREGATE2_ID=`nova aggregate-create $AGGREGATE2_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE2_NAME " | get_field 1`
+AGGREGATE_ID=$(nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE_NAME " | get_field 1)
+AGGREGATE2_ID=$(nova aggregate-create $AGGREGATE2_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE2_NAME " | get_field 1)
 
 # check aggregate created
 nova aggregate-list | grep -q " $AGGREGATE_NAME " || die "Aggregate $AGGREGATE_NAME not created"
@@ -125,7 +124,7 @@
 if [ "$VIRT_DRIVER" == "xenserver" ]; then
     echo "TODO(johngarbutt) add tests for add/remove host from pool aggregate"
 fi
-FIRST_HOST=`nova host-list | grep compute | get_field 1 | head -1`
+FIRST_HOST=$(nova host-list | grep compute | get_field 1 | head -1)
 # Make sure can add two aggregates to same host
 nova aggregate-add-host $AGGREGATE_ID $FIRST_HOST
 nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST
@@ -142,12 +141,6 @@
 nova aggregate-delete $AGGREGATE2_ID
 exit_if_aggregate_present $AGGREGATE_NAME
 
-
-# Test complete
-# =============
-OS_USERNAME=$_OLD_USERNAME
-echo "AGGREGATE TEST PASSED"
-
 set +o xtrace
 echo "**************************************************"
 echo "End DevStack Exercise: $0"
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
index 5ada237..679091b 100755
--- a/exercises/boot_from_volume.sh
+++ b/exercises/boot_from_volume.sh
@@ -44,52 +44,80 @@
 # the exercise is skipped
 is_service_enabled cinder || exit 55
 
-# Boot this image, use first AMI image if unset
-DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami}
-
-# Instance type
+# Instance type to create
 DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
 
-# Default floating IP pool name
-DEFAULT_FLOATING_POOL=${DEFAULT_FLOATING_POOL:-nova}
-
-# Default user
-DEFAULT_INSTANCE_USER=${DEFAULT_INSTANCE_USER:-cirros}
+# Boot this image, use first AMI image if unset
+DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami}
 
 # Security group name
 SECGROUP=${SECGROUP:-boot_secgroup}
 
+# Instance and volume names
+VM_NAME=${VM_NAME:-ex-bfv-inst}
+VOL_NAME=${VOL_NAME:-ex-vol-bfv}
 
-# Launching servers
-# =================
+
+# Launching a server
+# ==================
+
+# List servers for tenant:
+nova list
+
+# Images
+# ------
+
+# List the images available
+glance image-list
 
 # Grab the id of the image to launch
-IMAGE=`glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1`
-die_if_not_set IMAGE "Failure getting image"
+IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
+die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
 
-# Instance and volume names
-VOL_INSTANCE_NAME=${VOL_INSTANCE_NAME:-test_vol_instance}
-VOL_NAME=${VOL_NAME:-test_volume}
+# Security Groups
+# ---------------
 
-# Clean-up from previous runs
-nova delete $VOL_INSTANCE_NAME || true
+# List security groups
+nova secgroup-list
 
-if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VOL_INSTANCE_NAME; do sleep 1; done"; then
-    echo "server didn't terminate!"
-    exit 1
+# Create a secgroup
+if ! nova secgroup-list | grep -q $SECGROUP; then
+    nova secgroup-create $SECGROUP "$SECGROUP description"
+    if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then
+        echo "Security group not created"
+        exit 1
+    fi
 fi
 
-# Configure Security Groups
-nova secgroup-delete $SECGROUP || true
-nova secgroup-create $SECGROUP "$SECGROUP description"
-nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0
-nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0
+# Configure Security Group Rules
+if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then
+    nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0
+fi
+if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then
+    nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0
+fi
 
-# Determinine instance type
-INSTANCE_TYPE=`nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | cut -d"|" -f2`
+# List secgroup rules
+nova secgroup-list-rules $SECGROUP
+
+# Set up instance
+# ---------------
+
+# List flavors
+nova flavor-list
+
+# Select a flavor
+INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1)
 if [[ -z "$INSTANCE_TYPE" ]]; then
     # grab the first flavor in the list to launch if default doesn't exist
-   INSTANCE_TYPE=`nova flavor-list | head -n 4 | tail -n 1 | cut -d"|" -f2`
+   INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1)
+fi
+
+# Clean-up from previous runs
+nova delete $VM_NAME || true
+if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then
+    echo "server didn't terminate!"
+    exit 1
 fi
 
 # Setup Keypair
@@ -99,78 +127,80 @@
 nova keypair-add $KEY_NAME > $KEY_FILE
 chmod 600 $KEY_FILE
 
-# Delete the old volume
+# Set up volume
+# -------------
+
+# Delete any old volume
 cinder delete $VOL_NAME || true
-
-# Free every floating ips - setting FREE_ALL_FLOATING_IPS=True in localrc will make life easier for testers
-if [ "$FREE_ALL_FLOATING_IPS" = "True" ]; then
-    nova floating-ip-list | grep nova | cut -d "|" -f2 | tr -d " " | xargs -n1 nova floating-ip-delete || true
-fi
-
-# Allocate floating ip
-FLOATING_IP=`nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | get_field 1`
-
-# Make sure the ip gets allocated
-if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $FLOATING_IP; do sleep 1; done"; then
-    echo "Floating IP not allocated"
+if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then
+    echo "Volume $VOL_NAME not deleted"
     exit 1
 fi
 
 # Create the bootable volume
-cinder create --display_name=$VOL_NAME --image-id $IMAGE $DEFAULT_VOLUME_SIZE
-
-# Wait for volume to activate
+start_time=$(date +%s)
+cinder create --image-id $IMAGE --display_name=$VOL_NAME --display_description "test bootable volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \
+    die "Failure creating volume $VOL_NAME"
 if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
     echo "Volume $VOL_NAME not created"
     exit 1
 fi
+end_time=$(date +%s)
+echo "Completed cinder create in $((end_time - start_time)) seconds"
 
-VOLUME_ID=`cinder list | grep $VOL_NAME  | get_field 1`
+# Get volume ID
+VOL_ID=$(cinder list | grep $VOL_NAME  | get_field 1)
+die_if_not_set VOL_ID "Failure retrieving volume ID for $VOL_NAME"
 
-# Boot instance from volume!  This is done with the --block_device_mapping param.
-# The format of mapping is:
+# Boot instance
+# -------------
+
+# Boot using the --block_device_mapping param. The format of mapping is:
 # <dev_name>=<id>:<type>:<size(GB)>:<delete_on_terminate>
 # Leaving the middle two fields blank appears to do-the-right-thing
-VOL_VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE --block_device_mapping vda=$VOLUME_ID:::0 --security_groups=$SECGROUP --key_name $KEY_NAME $VOL_INSTANCE_NAME | grep ' id ' | get_field 2`
-die_if_not_set VOL_VM_UUID "Failure launching $VOL_INSTANCE_NAME"
+VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --block-device-mapping vda=$VOL_ID --security_groups=$SECGROUP --key_name $KEY_NAME $VM_NAME | grep ' id ' | get_field 2)
+die_if_not_set VM_UUID "Failure launching $VM_NAME"
 
 # Check that the status is active within ACTIVE_TIMEOUT seconds
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VOL_VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
+if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
     echo "server didn't become active!"
     exit 1
 fi
 
-# Add floating ip to our server
-nova add-floating-ip $VOL_VM_UUID $FLOATING_IP
+# Get the instance IP
+IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2)
+die_if_not_set IP "Failure retrieving IP address"
 
-# Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds
-ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT
+# Private IPs can be pinged in single node deployments
+ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT
 
-# Make sure our volume-backed instance launched
-ssh_check "$PUBLIC_NETWORK_NAME" $KEY_FILE $FLOATING_IP $DEFAULT_INSTANCE_USER $ACTIVE_TIMEOUT
-
-# Remove floating ip from volume-backed instance
-nova remove-floating-ip $VOL_VM_UUID $FLOATING_IP
+# Clean up
+# --------
 
 # Delete volume backed instance
-nova delete $VOL_INSTANCE_NAME || \
-    die "Failure deleting instance volume $VOL_INSTANCE_NAME"
-
-# Wait till our volume is no longer in-use
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
-    echo "Volume $VOL_NAME not created"
+nova delete $VM_UUID || die "Failure deleting instance $VM_NAME"
+if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then
+    echo "Server $VM_NAME not deleted"
     exit 1
 fi
 
-# Delete the volume
-cinder delete $VOL_NAME || \
-    die "Failure deleting volume $VOLUME_NAME"
+# Wait for volume to be released
+if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
+    echo "Volume $VOL_NAME not released"
+    exit 1
+fi
 
-# De-allocate the floating ip
-nova floating-ip-delete $FLOATING_IP || \
-    die "Failure deleting floating IP $FLOATING_IP"
+# Delete volume
+start_time=$(date +%s)
+cinder delete $VOL_ID || die "Failure deleting volume $VOLUME_NAME"
+if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then
+    echo "Volume $VOL_NAME not deleted"
+    exit 1
+fi
+end_time=$(date +%s)
+echo "Completed cinder delete in $((end_time - start_time)) seconds"
 
-# Delete a secgroup
+# Delete secgroup
 nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP"
 
 set +o xtrace
diff --git a/exercises/client-args.sh b/exercises/client-args.sh
index b3e2ad8..894da74 100755
--- a/exercises/client-args.sh
+++ b/exercises/client-args.sh
@@ -8,6 +8,14 @@
 echo "Begin DevStack Exercise: $0"
 echo "*********************************************************************"
 
+# This script exits on an error so that errors don't compound and you see
+# only the first error that occured.
+set -o errexit
+
+# Print the commands being run so that we can see the command that triggers
+# an error.  It is also useful for following allowing as the install occurs.
+set -o xtrace
+
 
 # Settings
 # ========
@@ -63,7 +71,7 @@
         STATUS_KEYSTONE="Skipped"
     else
         echo -e "\nTest Keystone"
-        if keystone $TENANT_ARG $ARGS catalog --service identity; then
+        if keystone $TENANT_ARG_DASH $ARGS_DASH catalog --service identity; then
             STATUS_KEYSTONE="Succeeded"
         else
             STATUS_KEYSTONE="Failed"
@@ -82,7 +90,7 @@
     else
         # Test OSAPI
         echo -e "\nTest Nova"
-        if nova $TENANT_ARG $ARGS flavor-list; then
+        if nova $TENANT_ARG_DASH $ARGS_DASH flavor-list; then
             STATUS_NOVA="Succeeded"
         else
             STATUS_NOVA="Failed"
@@ -91,6 +99,23 @@
     fi
 fi
 
+# Cinder client
+# -------------
+
+if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then
+    if [[ "$SKIP_EXERCISES" =~ "c-api" ]] ; then
+        STATUS_CINDER="Skipped"
+    else
+        echo -e "\nTest Cinder"
+        if cinder $TENANT_ARG_DASH $ARGS_DASH list; then
+            STATUS_CINDER="Succeeded"
+        else
+            STATUS_CINDER="Failed"
+            RETURN=1
+        fi
+    fi
+fi
+
 # Glance client
 # -------------
 
@@ -116,7 +141,7 @@
         STATUS_SWIFT="Skipped"
     else
         echo -e "\nTest Swift"
-        if swift $TENANT_ARG $ARGS stat; then
+        if swift $TENANT_ARG_DASH $ARGS_DASH stat; then
             STATUS_SWIFT="Succeeded"
         else
             STATUS_SWIFT="Failed"
@@ -125,6 +150,8 @@
     fi
 fi
 
+set +o xtrace
+
 # Results
 # -------
 
@@ -137,6 +164,7 @@
 echo -e "\n"
 report "Keystone" $STATUS_KEYSTONE
 report "Nova" $STATUS_NOVA
+report "Cinder" $STATUS_CINDER
 report "Glance" $STATUS_GLANCE
 report "Swift" $STATUS_SWIFT
 
diff --git a/exercises/client-env.sh b/exercises/client-env.sh
index 68c0e5a..c84e84e 100755
--- a/exercises/client-env.sh
+++ b/exercises/client-env.sh
@@ -8,6 +8,14 @@
 echo "Begin DevStack Exercise: $0"
 echo "*********************************************************************"
 
+# This script exits on an error so that errors don't compound and you see
+# only the first error that occured.
+set -o errexit
+
+# Print the commands being run so that we can see the command that triggers
+# an error.  It is also useful for following allowing as the install occurs.
+set -o xtrace
+
 
 # Settings
 # ========
@@ -99,6 +107,23 @@
     fi
 fi
 
+# Cinder client
+# -------------
+
+if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then
+    if [[ "$SKIP_EXERCISES" =~ "c-api" ]] ; then
+        STATUS_CINDER="Skipped"
+    else
+        echo -e "\nTest Cinder"
+        if cinder list; then
+            STATUS_CINDER="Succeeded"
+        else
+            STATUS_CINDER="Failed"
+            RETURN=1
+        fi
+    fi
+fi
+
 # Glance client
 # -------------
 
@@ -133,6 +158,8 @@
     fi
 fi
 
+set +o xtrace
+
 # Results
 # -------
 
@@ -146,6 +173,7 @@
 report "Keystone" $STATUS_KEYSTONE
 report "Nova" $STATUS_NOVA
 report "EC2" $STATUS_EC2
+report "Cinder" $STATUS_CINDER
 report "Glance" $STATUS_GLANCE
 report "Swift" $STATUS_SWIFT
 
diff --git a/exercises/euca.sh b/exercises/euca.sh
index 7b35f6f..8b15da8 100755
--- a/exercises/euca.sh
+++ b/exercises/euca.sh
@@ -44,7 +44,7 @@
 # Instance type to create
 DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
 
-# Boot this image, use first AMI-format image if unset
+# Boot this image, use first AMI image if unset
 DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami}
 
 # Security group name
@@ -56,6 +56,7 @@
 
 # Find a machine image to boot
 IMAGE=`euca-describe-images | grep machine | grep ${DEFAULT_IMAGE_NAME} | cut -f2 | head -n1`
+die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
 
 # Add a secgroup
 if ! euca-describe-groups | grep -q $SECGROUP; then
@@ -174,7 +175,7 @@
     exit 1
 fi
 
-# Delete group
+# Delete secgroup
 euca-delete-group $SECGROUP || die "Failure deleting security group $SECGROUP"
 
 set +o xtrace
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index 8b18e6f..34ab69d 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -2,8 +2,7 @@
 
 # **floating_ips.sh** - using the cloud can be fun
 
-# we will use the ``nova`` cli tool provided by the ``python-novaclient``
-# package to work out the instance connectivity
+# Test instance connectivity with the ``nova`` command from ``python-novaclient``
 
 echo "*********************************************************************"
 echo "Begin DevStack Exercise: $0"
@@ -42,7 +41,7 @@
 # Instance type to create
 DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
 
-# Boot this image, use first AMi image if unset
+# Boot this image, use first AMI image if unset
 DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami}
 
 # Security group name
@@ -54,6 +53,9 @@
 # Additional floating IP pool and range
 TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test}
 
+# Instance name
+VM_NAME="ex-float"
+
 
 # Launching a server
 # ==================
@@ -64,19 +66,17 @@
 # Images
 # ------
 
-# Nova has a **deprecated** way of listing images.
-nova image-list
-
-# But we recommend using glance directly
+# List the images available
 glance image-list
 
 # Grab the id of the image to launch
 IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
+die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
 
 # Security Groups
 # ---------------
 
-# List of secgroups:
+# List security groups
 nova secgroup-list
 
 # Create a secgroup
@@ -88,81 +88,79 @@
     fi
 fi
 
-# Determinine instance type
-# -------------------------
-
-# List of instance types:
-nova flavor-list
-
-INSTANCE_TYPE=`nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1`
-if [[ -z "$INSTANCE_TYPE" ]]; then
-    # grab the first flavor in the list to launch if default doesn't exist
-   INSTANCE_TYPE=`nova flavor-list | head -n 4 | tail -n 1 | get_field 1`
+# Configure Security Group Rules
+if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then
+    nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0
+fi
+if ! nova secgroup-list-rules $SECGROUP | grep -q " tcp .* 22 "; then
+    nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0
 fi
 
-NAME="ex-float"
+# List secgroup rules
+nova secgroup-list-rules $SECGROUP
 
-VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE $NAME --security_groups=$SECGROUP | grep ' id ' | get_field 2`
-die_if_not_set VM_UUID "Failure launching $NAME"
+# Set up instance
+# ---------------
 
+# List flavors
+nova flavor-list
 
-# Testing
-# =======
+# Select a flavor
+INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1)
+if [[ -z "$INSTANCE_TYPE" ]]; then
+    # grab the first flavor in the list to launch if default doesn't exist
+   INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1)
+fi
 
-# First check if it spins up (becomes active and responds to ping on
-# internal ip).  If you run this script from a nova node, you should
-# bypass security groups and have direct access to the server.
+# Clean-up from previous runs
+nova delete $VM_NAME || true
+if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then
+    echo "server didn't terminate!"
+    exit 1
+fi
 
-# Waiting for boot
-# ----------------
+# Boot instance
+# -------------
 
-# check that the status is active within ACTIVE_TIMEOUT seconds
+VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2)
+die_if_not_set VM_UUID "Failure launching $VM_NAME"
+
+# Check that the status is active within ACTIVE_TIMEOUT seconds
 if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
     echo "server didn't become active!"
     exit 1
 fi
 
-# get the IP of the server
-IP=`nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2`
+# Get the instance IP
+IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2)
 die_if_not_set IP "Failure retrieving IP address"
 
+# Private IPs can be pinged in single node deployments
 ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT
 
-# Security Groups & Floating IPs
-# ------------------------------
+# Floating IPs
+# ------------
 
-if ! nova secgroup-list-rules $SECGROUP | grep -q icmp; then
-    # allow icmp traffic (ping)
-    nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0
-    if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list-rules $SECGROUP | grep -q icmp; do sleep 1; done"; then
-        echo "Security group rule not created"
-        exit 1
-    fi
-fi
+# Allocate a floating IP from the default pool
+FLOATING_IP=$(nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | get_field 1)
+die_if_not_set FLOATING_IP "Failure creating floating IP from pool $DEFAULT_FLOATING_POOL"
 
-# List rules for a secgroup
-nova secgroup-list-rules $SECGROUP
-
-# allocate a floating ip from default pool
-FLOATING_IP=`nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | get_field 1`
-die_if_not_set FLOATING_IP "Failure creating floating IP"
-
-# list floating addresses
+# List floating addresses
 if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $FLOATING_IP; do sleep 1; done"; then
     echo "Floating IP not allocated"
     exit 1
 fi
 
-# add floating ip to our server
+# Add floating IP to our server
 nova add-floating-ip $VM_UUID $FLOATING_IP || \
-    die "Failure adding floating IP $FLOATING_IP to $NAME"
+    die "Failure adding floating IP $FLOATING_IP to $VM_NAME"
 
-# test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds
+# Test we can ping our floating IP within ASSOCIATE_TIMEOUT seconds
 ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT
 
 if ! is_service_enabled quantum; then
     # Allocate an IP from second floating pool
-    TEST_FLOATING_IP=`nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | get_field 1`
+    TEST_FLOATING_IP=$(nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | get_field 1)
     die_if_not_set TEST_FLOATING_IP "Failure creating floating IP in $TEST_FLOATING_POOL"
 
     # list floating addresses
@@ -172,34 +170,40 @@
      fi
 fi
 
-# dis-allow icmp traffic (ping)
-nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || die "Failure deleting security group rule from $SECGROUP"
+# Dis-allow icmp traffic (ping)
+nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || \
+    die "Failure deleting security group rule from $SECGROUP"
 
 # FIXME (anthony): make xs support security groups
 if [ "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then
-    # test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds
+    # Test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds
     ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT Fail
 fi
 
+# Clean up
+# --------
+
 if ! is_service_enabled quantum; then
     # Delete second floating IP
-    nova floating-ip-delete $TEST_FLOATING_IP || die "Failure deleting floating IP $TEST_FLOATING_IP"
+    nova floating-ip-delete $TEST_FLOATING_IP || \
+        die "Failure deleting floating IP $TEST_FLOATING_IP"
 fi
 
-# de-allocate the floating ip
-nova floating-ip-delete $FLOATING_IP || die "Failure deleting floating IP $FLOATING_IP"
+# Delete the floating ip
+nova floating-ip-delete $FLOATING_IP || \
+    die "Failure deleting floating IP $FLOATING_IP"
 
-# Shutdown the server
-nova delete $VM_UUID || die "Failure deleting instance $NAME"
-
+# Delete instance
+nova delete $VM_UUID || die "Failure deleting instance $VM_NAME"
 # Wait for termination
 if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then
-    echo "Server $NAME not deleted"
+    echo "Server $VM_NAME not deleted"
     exit 1
 fi
 
-# Delete a secgroup
-nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP"
+# Delete secgroup
+nova secgroup-delete $SECGROUP || \
+    die "Failure deleting security group $SECGROUP"
 
 set +o xtrace
 echo "*********************************************************************"
diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh
index fbd9c8e..a33c9c6 100755
--- a/exercises/sec_groups.sh
+++ b/exercises/sec_groups.sh
@@ -2,7 +2,7 @@
 
 # **sec_groups.sh**
 
-# Test security groups via the command line tools that ship with it.
+# Test security groups via the command line
 
 echo "*********************************************************************"
 echo "Begin DevStack Exercise: $0"
@@ -41,7 +41,7 @@
 nova secgroup-list
 
 # Create random name for new sec group and create secgroup of said name
-SEC_GROUP_NAME="sec-group-$(openssl rand -hex 4)"
+SEC_GROUP_NAME="ex-secgroup-$(openssl rand -hex 4)"
 nova secgroup-create $SEC_GROUP_NAME 'a test security group'
 
 # Add some rules to the secgroup
@@ -65,8 +65,10 @@
 for RULE in "${RULES_TO_ADD[@]}"; do
     nova secgroup-delete-rule $SEC_GROUP_NAME tcp $RULE $RULE 0.0.0.0/0
 done
-nova secgroup-delete $SEC_GROUP_NAME
 
+# Delete secgroup
+nova secgroup-delete $SEC_GROUP_NAME || \
+    die "Failure deleting security group $SEC_GROUP_NAME"
 
 set +o xtrace
 echo "*********************************************************************"
diff --git a/exercises/swift.sh b/exercises/swift.sh
index 4cd487b..a75f955 100755
--- a/exercises/swift.sh
+++ b/exercises/swift.sh
@@ -2,7 +2,7 @@
 
 # **swift.sh**
 
-# Test swift via the command line tools that ship with it.
+# Test swift via the ``swift`` command line from ``python-swiftclient`
 
 echo "*********************************************************************"
 echo "Begin DevStack Exercise: $0"
@@ -33,13 +33,13 @@
 # Import exercise configuration
 source $TOP_DIR/exerciserc
 
-# Container name
-CONTAINER=ex-swift
-
 # If swift is not enabled we exit with exitcode 55 which mean
 # exercise is skipped.
 is_service_enabled swift || exit 55
 
+# Container name
+CONTAINER=ex-swift
+
 
 # Testing Swift
 # =============
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index 45b8645..45cb0c8 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -2,7 +2,7 @@
 
 # **volumes.sh**
 
-# Test cinder volumes with the cinder command from python-cinderclient
+# Test cinder volumes with the ``cinder`` command from ``python-cinderclient``
 
 echo "*********************************************************************"
 echo "Begin DevStack Exercise: $0"
@@ -45,12 +45,16 @@
 # Instance type to create
 DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
 
-# Boot this image, use first AMi image if unset
+# Boot this image, use first AMI image if unset
 DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami}
 
 # Security group name
 SECGROUP=${SECGROUP:-vol_secgroup}
 
+# Instance and volume names
+VM_NAME=${VM_NAME:-ex-vol-inst}
+VOL_NAME="ex-vol-$(openssl rand -hex 4)"
+
 
 # Launching a server
 # ==================
@@ -61,19 +65,17 @@
 # Images
 # ------
 
-# Nova has a **deprecated** way of listing images.
-nova image-list
-
-# But we recommend using glance directly
+# List the images available
 glance image-list
 
 # Grab the id of the image to launch
 IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
+die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
 
 # Security Groups
 # ---------------
 
-# List of secgroups:
+# List security groups
 nova secgroup-list
 
 # Create a secgroup
@@ -93,126 +95,122 @@
     nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0
 fi
 
-# determinine instance type
-# -------------------------
+# List secgroup rules
+nova secgroup-list-rules $SECGROUP
 
-# List of instance types:
+# Set up instance
+# ---------------
+
+# List flavors
 nova flavor-list
 
-INSTANCE_TYPE=`nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1`
+# Select a flavor
+INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1)
 if [[ -z "$INSTANCE_TYPE" ]]; then
     # grab the first flavor in the list to launch if default doesn't exist
-   INSTANCE_TYPE=`nova flavor-list | head -n 4 | tail -n 1 | get_field 1`
+   INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1)
 fi
 
-NAME="ex-vol"
+# Clean-up from previous runs
+nova delete $VM_NAME || true
+if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then
+    echo "server didn't terminate!"
+    exit 1
+fi
 
-VM_UUID=`nova boot --flavor $INSTANCE_TYPE --image $IMAGE $NAME --security_groups=$SECGROUP | grep ' id ' | get_field 2`
-die_if_not_set VM_UUID "Failure launching $NAME"
+# Boot instance
+# -------------
 
+VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2)
+die_if_not_set VM_UUID "Failure launching $VM_NAME"
 
-# Testing
-# =======
-
-# First check if it spins up (becomes active and responds to ping on
-# internal ip).  If you run this script from a nova node, you should
-# bypass security groups and have direct access to the server.
-
-# Waiting for boot
-# ----------------
-
-# check that the status is active within ACTIVE_TIMEOUT seconds
+# Check that the status is active within ACTIVE_TIMEOUT seconds
 if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
     echo "server didn't become active!"
     exit 1
 fi
 
-# get the IP of the server
-IP=`nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2`
+# Get the instance IP
+IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2)
 die_if_not_set IP "Failure retrieving IP address"
 
-# for single node deployments, we can ping private ips
+# Private IPs can be pinged in single node deployments
 ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT
 
 # Volumes
 # -------
 
-VOL_NAME="myvol-$(openssl rand -hex 4)"
-
 # Verify it doesn't exist
-if [[ -n "`cinder list | grep $VOL_NAME | head -1 | get_field 2`" ]]; then
+if [[ -n $(cinder list | grep $VOL_NAME | head -1 | get_field 2) ]]; then
     echo "Volume $VOL_NAME already exists"
     exit 1
 fi
 
 # Create a new volume
-cinder create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE
-if [[ $? != 0 ]]; then
-    echo "Failure creating volume $VOL_NAME"
-    exit 1
-fi
-
-start_time=`date +%s`
+start_time=$(date +%s)
+cinder create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \
+    die "Failure creating volume $VOL_NAME"
 if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
     echo "Volume $VOL_NAME not created"
     exit 1
 fi
-end_time=`date +%s`
+end_time=$(date +%s)
 echo "Completed cinder create in $((end_time - start_time)) seconds"
 
 # Get volume ID
-VOL_ID=`cinder list | grep $VOL_NAME | head -1 | get_field 1`
+VOL_ID=$(cinder list | grep $VOL_NAME | head -1 | get_field 1)
 die_if_not_set VOL_ID "Failure retrieving volume ID for $VOL_NAME"
 
 # Attach to server
 DEVICE=/dev/vdb
-start_time=`date +%s`
+start_time=$(date +%s)
 nova volume-attach $VM_UUID $VOL_ID $DEVICE || \
-    die "Failure attaching volume $VOL_NAME to $NAME"
+    die "Failure attaching volume $VOL_NAME to $VM_NAME"
 if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep in-use; do sleep 1; done"; then
-    echo "Volume $VOL_NAME not attached to $NAME"
+    echo "Volume $VOL_NAME not attached to $VM_NAME"
     exit 1
 fi
-end_time=`date +%s`
+end_time=$(date +%s)
 echo "Completed volume-attach in $((end_time - start_time)) seconds"
 
-VOL_ATTACH=`cinder list | grep $VOL_NAME | head -1 | get_field -1`
+VOL_ATTACH=$(cinder list | grep $VOL_NAME | head -1 | get_field -1)
 die_if_not_set VOL_ATTACH "Failure retrieving $VOL_NAME status"
 if [[ "$VOL_ATTACH" != $VM_UUID ]]; then
     echo "Volume not attached to correct instance"
     exit 1
 fi
 
+# Clean up
+# --------
+
 # Detach volume
-start_time=`date +%s`
-nova volume-detach $VM_UUID $VOL_ID || die "Failure detaching volume $VOL_NAME from $NAME"
+start_time=$(date +%s)
+nova volume-detach $VM_UUID $VOL_ID || die "Failure detaching volume $VOL_NAME from $VM_NAME"
 if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
-    echo "Volume $VOL_NAME not detached from $NAME"
+    echo "Volume $VOL_NAME not detached from $VM_NAME"
     exit 1
 fi
-end_time=`date +%s`
+end_time=$(date +%s)
 echo "Completed volume-detach in $((end_time - start_time)) seconds"
 
 # Delete volume
-start_time=`date +%s`
+start_time=$(date +%s)
 cinder delete $VOL_ID || die "Failure deleting volume $VOL_NAME"
 if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then
     echo "Volume $VOL_NAME not deleted"
     exit 1
 fi
-end_time=`date +%s`
+end_time=$(date +%s)
 echo "Completed cinder delete in $((end_time - start_time)) seconds"
 
-# Shutdown the server
-nova delete $VM_UUID || die "Failure deleting instance $NAME"
-
-# Wait for termination
+# Delete instance
+nova delete $VM_UUID || die "Failure deleting instance $VM_NAME"
 if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then
-    echo "Server $NAME not deleted"
+    echo "Server $VM_NAME not deleted"
     exit 1
 fi
 
-# Delete a secgroup
+# Delete secgroup
 nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP"
 
 set +o xtrace
diff --git a/files/apts/sysstat b/files/apts/sysstat
new file mode 100644
index 0000000..ea0c342
--- /dev/null
+++ b/files/apts/sysstat
@@ -0,0 +1 @@
+sysstat
diff --git a/files/rpms-suse/sysstat b/files/rpms-suse/sysstat
new file mode 100644
index 0000000..ea0c342
--- /dev/null
+++ b/files/rpms-suse/sysstat
@@ -0,0 +1 @@
+sysstat
diff --git a/files/rpms/sysstat b/files/rpms/sysstat
new file mode 100644
index 0000000..ea0c342
--- /dev/null
+++ b/files/rpms/sysstat
@@ -0,0 +1 @@
+sysstat
diff --git a/functions b/functions
index 22ba168..b94c611 100644
--- a/functions
+++ b/functions
@@ -1012,9 +1012,11 @@
 #  $1 The name of the database backend to use (mysql, postgresql, ...)
 function use_database {
     if [[ -z "$DATABASE_BACKENDS" ]]; then
-        # The backends haven't initialized yet, just save the selection for now
+        # No backends registered means this is likely called from ``localrc``
+        # This is now deprecated usage
         DATABASE_TYPE=$1
     else
+        # This should no longer get called...here for posterity
         use_exclusive_service DATABASE_BACKENDS DATABASE_TYPE $1
     fi
 }
diff --git a/lib/cinder b/lib/cinder
index 4d1ab42..49db410 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -171,7 +171,7 @@
     database_connection_url dburl cinder
     iniset $CINDER_CONF DEFAULT sql_connection $dburl
     iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI
-    iniset $CINDER_CONF DEFAULT root_helper "sudo ${CINDER_ROOTWRAP}"
+    iniset $CINDER_CONF DEFAULT rootwrap_config "$CINDER_CONF_DIR/rootwrap.conf"
     iniset $CINDER_CONF DEFAULT osapi_volume_extension cinder.api.contrib.standard_extensions
     iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH
 
diff --git a/lib/database b/lib/database
index 07e37ae..4fba7c2 100644
--- a/lib/database
+++ b/lib/database
@@ -2,9 +2,12 @@
 # Interface for interacting with different database backends
 
 # Dependencies:
-# DATABASE_BACKENDS variable must contain a list of available database backends
-# DATABASE_TYPE variable must be set
+# ``ENABLED_SERVICES`` must be defined
 
+# ``DATABASE_BACKENDS`` will contain a list of available database backends
+# after sourcing this file.
+
+# This is a wrapper for the specific database backends available.
 # Each database must implement four functions:
 #   recreate_database_$DATABASE_TYPE
 #   install_database_$DATABASE_TYPE
@@ -23,8 +26,36 @@
     [ -z "$DATABASE_BACKENDS" ] && DATABASE_BACKENDS=$1 || DATABASE_BACKENDS+=" $1"
 }
 
+# Sourcing the database libs sets DATABASE_BACKENDS with the available list
 for f in $TOP_DIR/lib/databases/*; do source $f; done
 
+# If ``DATABASE_TYPE`` is defined here it's because the user has it in ``localrc``
+# or has called ``use_database``.  Both are deprecated so let's fix it up for now.
+if [[ -n $DATABASE_TYPE ]]; then
+    # This is now deprecated usage, set up a warning and try to be
+    # somewhat backward compatible for now.
+    DEPRECATED_TEXT="$DEPRECATED_TEXT\nThe database backend needs to be properly set in ENABLED_SERVICES; DATABASE_TYPE or use_database is deprecated localrc\n"
+    if [[ ! $ENABLED_SERVICES =~ $DATABASE_TYPE ]]; then
+        # It's not in enabled services but user has attempted to select a
+        # database, so just add it now
+        ENABLED_SERVICES+=,$DATABASE_TYPE
+        unset DATABASE_TYPE
+    fi
+fi
+
+# ``DATABASE_BACKENDS`` now contains a list of the supported databases
+# Look in ``ENABLED_SERVICES`` to see if one has been selected
+for db in $DATABASE_BACKENDS; do
+    # Set the type for the rest of the backend to use
+    if is_service_enabled $db; then
+        # Set this now for the rest of the database funtions
+        DATABASE_TYPE=$db
+    fi
+done
+# If ``DATABASE_TYPE`` is unset here no database was selected
+# This is not an error as multi-node installs will do this on the compute nodes
+
+
 # Set the database type based on the configuration
 function initialize_database_backends {
     for backend in $DATABASE_BACKENDS; do
diff --git a/lib/quantum b/lib/quantum
index f3a3ec4..61a5218 100644
--- a/lib/quantum
+++ b/lib/quantum
@@ -297,7 +297,7 @@
         quantum router-interface-add $ROUTER_ID $SUBNET_ID
         # Create an external network, and a subnet. Configure the external network as router gw
         EXT_NET_ID=$(quantum net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2)
-        EXT_GW_IP=$(quantum subnet-create --ip_version 4 $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2)
+        EXT_GW_IP=$(quantum subnet-create --ip_version 4 ${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} $EXT_NET_ID $FLOATING_RANGE -- --enable_dhcp=False | grep 'gateway_ip' | get_field 2)
         quantum router-gateway-set $ROUTER_ID $EXT_NET_ID
 
         if is_quantum_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then
diff --git a/lib/tempest b/lib/tempest
index e43f6d7..d17b32d 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -48,7 +48,7 @@
 BUILD_TIMEOUT=400
 
 
-BOTO_MATERIALS_PATH="$FILES/images/s3-materials/cirros-0.3.0"
+BOTO_MATERIALS_PATH="$FILES/images/s3-materials/cirros-0.3.1"
 
 # Entry Points
 # ------------
@@ -212,8 +212,6 @@
         TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-False}
     fi
     iniset $TEMPEST_CONF compute allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True}
-    #Skip until #1074039 is fixed
-    iniset $TEMPEST_CONF compute run_ssh False
     iniset $TEMPEST_CONF compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED
     iniset $TEMPEST_CONF compute network_for_ssh $PRIVATE_NETWORK_NAME
     iniset $TEMPEST_CONF compute ip_version_for_ssh 4
@@ -273,8 +271,8 @@
 
 # init_tempest() - Initialize ec2 images
 function init_tempest() {
-    local base_image_name=cirros-0.3.0-x86_64
-    # /opt/stack/devstack/files/images/cirros-0.3.0-x86_64-uec
+    local base_image_name=cirros-0.3.1-x86_64
+    # /opt/stack/devstack/files/images/cirros-0.3.1-x86_64-uec
     local image_dir="$FILES/images/${base_image_name}-uec"
     local kernel="$image_dir/${base_image_name}-vmlinuz"
     local ramdisk="$image_dir/${base_image_name}-initrd"
diff --git a/stack.sh b/stack.sh
index 331743f..7d43278 100755
--- a/stack.sh
+++ b/stack.sh
@@ -83,11 +83,6 @@
 source $TOP_DIR/lib/database
 source $TOP_DIR/lib/rpc_backend
 
-# Validate database selection
-# Since DATABASE_BACKENDS is now set, this also gets ENABLED_SERVICES
-# properly configured for the database selection.
-use_database $DATABASE_TYPE || echo "Invalid database '$DATABASE_TYPE'"
-
 # Remove services which were negated in ENABLED_SERVICES
 # using the "-" prefix (e.g., "-rabbit") instead of
 # calling disable_service().
@@ -268,6 +263,11 @@
 SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP}
 SYSLOG_PORT=${SYSLOG_PORT:-516}
 
+# Enable sysstat logging
+SYSSTAT_FILE=${SYSSTAT_FILE:-"sysstat.dat"}
+SYSSTAT_INTERVAL=${SYSSTAT_INTERVAL:-"1"}
+
+
 # Use color for logging output (only available if syslog is not used)
 LOG_COLOR=`trueorfalse True $LOG_COLOR`
 
@@ -414,13 +414,13 @@
 # Database Configuration
 # ----------------------
 
-# To select between database backends, add a line to localrc like:
+# To select between database backends, add the following to ``localrc``:
 #
-#  use_database postgresql
+#    disable_service mysql
+#    enable_service postgresql
 #
-# The available database backends are defined in the ``DATABASE_BACKENDS``
-# variable defined in stackrc. By default, MySQL is enabled as the database
-# backend.
+# The available database backends are listed in ``DATABASE_BACKENDS`` after
+# ``lib/database`` is sourced. ``mysql`` is the default.
 
 initialize_database_backends && echo "Using $DATABASE_TYPE database backend" || echo "No database enabled"
 
@@ -504,11 +504,11 @@
         if [ ! -z "$LAST_SPINNER_PID" ]; then
             printf "\b\b\bdone\n" >&3
         fi
-        echo -n $@ >&6
+        echo -n -e $@ >&6
         spinner &
         LAST_SPINNER_PID=$!
     else
-        echo $@ >&6
+        echo -e $@ >&6
     fi
 }
 
@@ -1084,7 +1084,7 @@
         # Define extra baremetal nova conf flags by defining the array ``EXTRA_BAREMETAL_OPTS``.
         for I in "${EXTRA_BAREMETAL_OPTS[@]}"; do
            # Attempt to convert flags to options
-           iniset $NOVA_CONF baremetal ${I//=/ }
+           iniset $NOVA_CONF baremetal ${I/=/ }
         done
 
     # Default
@@ -1215,8 +1215,9 @@
 # Upload an image to glance.
 #
 # The default image is cirros, a small testing image which lets you login as **root**
-# cirros also uses ``cloud-init``, supporting login via keypair and sending scripts as
-# userdata.  See https://help.ubuntu.com/community/CloudInit for more on cloud-init
+# cirros has a ``cloud-init`` analog supporting login via keypair and sending
+# scripts as userdata.
+# See https://help.ubuntu.com/community/CloudInit for more on cloud-init
 #
 # Override ``IMAGE_URLS`` with a comma-separated list of UEC images.
 #  * **oneiric**: http://uec-images.ubuntu.com/oneiric/current/oneiric-server-cloudimg-amd64.tar.gz
@@ -1274,6 +1275,15 @@
     screen_it baremetal "nova-baremetal-deploy-helper"
 fi
 
+# run sysstat if it is enabled
+if is_service_enabled sysstat;then
+    if [[ -n ${SCREEN_LOGDIR} ]]; then
+        screen_it sysstat "sar -o $SCREEN_LOGDIR/$SYSSTAT_FILE $SYSSTAT_INTERVAL"
+    else
+        screen_it sysstat "sar $SYSSTAT_INTERVAL"
+    fi
+fi
+
 # Save some values we generated for later use
 CURRENT_RUN_TIME=$(date "+$TIMESTAMP_FORMAT")
 echo "# $CURRENT_RUN_TIME" >$TOP_DIR/.stackenv
@@ -1349,9 +1359,9 @@
 # Echo ``HOST_IP`` - useful for ``build_uec.sh``, which uses dhcp to give the instance an address
 echo "This is your host ip: $HOST_IP"
 
-# Warn that ``EXTRA_FLAGS`` needs to be converted to ``EXTRA_OPTS``
-if [[ -n "$EXTRA_FLAGS" ]]; then
-    echo_summary "WARNING: EXTRA_FLAGS is defined and may need to be converted to EXTRA_OPTS"
+# Warn that a deprecated feature was used
+if [[ -n "$DEPRECATED_TEXT" ]]; then
+    echo_summary "WARNING: $DEPRECATED_TEXT"
 fi
 
 # Indicate how long this took to run (bash maintained variable ``SECONDS``)
diff --git a/stackrc b/stackrc
index 91f4e2b..008bc9c 100644
--- a/stackrc
+++ b/stackrc
@@ -9,9 +9,6 @@
 # Destination for working data
 DATA_DIR=${DEST}/data
 
-# Select the default database
-DATABASE_TYPE=mysql
-
 # Determine stack user
 if [[ $EUID -eq 0 ]]; then
     STACK_USER=stack
@@ -24,7 +21,7 @@
 # ``disable_service`` functions in ``localrc``.
 # For example, to enable Swift add this to ``localrc``:
 # enable_service swift
-ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,tempest,$DATABASE_TYPE
+ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,rabbit,tempest,mysql
 
 # Set the default Nova APIs to enable
 NOVA_ENABLED_APIS=ec2,osapi_compute,metadata
@@ -167,12 +164,12 @@
 #    glance as a disk image.  If it ends in .gz, it is uncompressed first.
 #    example:
 #      http://cloud-images.ubuntu.com/releases/oneiric/release/ubuntu-11.10-server-cloudimg-armel-disk1.img
-#      http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-rootfs.img.gz
+#      http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-rootfs.img.gz
 #  * OpenVZ image:
 #    OpenVZ uses its own format of image, and does not support UEC style images
 
 #IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image
-#IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img" # cirros full disk image
+#IMAGE_URLS="http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-disk.img" # cirros full disk image
 
 # Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of
 # which may be set in ``localrc``.  Also allow ``DEFAULT_IMAGE_NAME`` and
@@ -184,16 +181,16 @@
     libvirt)
         case "$LIBVIRT_TYPE" in
             lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc
-                DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.0-x86_64-rootfs}
-                IMAGE_URLS=${IMAGE_URLS:-"http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-rootfs.img.gz"};;
+                DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.1-x86_64-rootfs}
+                IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-rootfs.img.gz"};;
             *) # otherwise, use the uec style image (with kernel, ramdisk, disk)
-                DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.0-x86_64-uec}
-                IMAGE_URLS=${IMAGE_URLS:-"http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-uec.tar.gz"};;
+                DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.1-x86_64-uec}
+                IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz"};;
         esac
         ;;
     *) # otherwise, use the uec style image (with kernel, ramdisk, disk)
-        DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.0-x86_64-uec}
-        IMAGE_URLS=${IMAGE_URLS:-"http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-uec.tar.gz"};;
+        DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.1-x86_64-uec}
+        IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz"};;
 esac
 
 # 5Gb default volume backing file size
diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh
index 55cb8fa..619d63f 100755
--- a/tools/create_userrc.sh
+++ b/tools/create_userrc.sh
@@ -1,6 +1,10 @@
 #!/usr/bin/env bash
 
-#Warning: This script just for development purposes
+# **create_userrc.sh**
+
+# Pre-create rc files and credentials for the default users.
+
+# Warning: This script just for development purposes
 
 ACCOUNT_DIR=./accrc
 
@@ -164,12 +168,12 @@
     local ec2_cert="$rcfile-cert.pem"
     local ec2_private_key="$rcfile-pk.pem"
     # Try to preserve the original file on fail (best effort)
-    mv "$ec2_private_key" "$ec2_private_key.old" &>/dev/null
-    mv "$ec2_cert" "$ec2_cert.old" &>/dev/null
+    mv -f "$ec2_private_key" "$ec2_private_key.old" &>/dev/null
+    mv -f "$ec2_cert" "$ec2_cert.old" &>/dev/null
     # It will not create certs when the password is incorrect
     if ! nova --os-password "$user_passwd" --os-username "$user_name" --os-tenant-name "$tenant_name" x509-create-cert "$ec2_private_key" "$ec2_cert"; then
-        mv "$ec2_private_key.old" "$ec2_private_key" &>/dev/null
-        mv "$ec2_cert.old" "$ec2_cert" &>/dev/null
+        mv -f "$ec2_private_key.old" "$ec2_private_key" &>/dev/null
+        mv -f "$ec2_cert.old" "$ec2_cert" &>/dev/null
     fi
     cat >"$rcfile" <<EOF
 # you can source this file