Merge "Add run_process() to start services without screen"
diff --git a/.gitignore b/.gitignore
index f9e2644..798b081 100644
--- a/.gitignore
+++ b/.gitignore
@@ -12,3 +12,4 @@
 *.pem
 accrc
 .stackenv
+.prereqs
diff --git a/AUTHORS b/AUTHORS
index 35c0a52..718a760 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -13,6 +13,7 @@
 Devin Carlen <devin.carlen@gmail.com>
 Doug hellmann <doug.hellmann@dreamhost.com>
 Eddie Hebert <edhebert@gmail.com>
+Edgar Magana <emagana@gmail.com>
 Eoghan Glynn <eglynn@redhat.com>
 Eric Windisch <ewindisch@cloudscaling.com>
 Gabriel Hurley <gabriel@strikeawe.com>
diff --git a/README.md b/README.md
index 483d1b0..a738554 100644
--- a/README.md
+++ b/README.md
@@ -85,19 +85,21 @@
 
 # Swift
 
-Swift is not installed by default, you can enable easily by adding this to your `localrc`:
+Swift is enabled by default configured with only one replica to avoid being IO/memory intensive on a small vm. When running with only one replica the account, container and object services will run directly in screen. The others services like replicator, updaters or auditor runs in background.
 
-    enable_service swift
+If you would like to disable Swift you can add this to your `localrc` :
+
+    disable_service s-proxy s-object s-container s-account
 
 If you want a minimal Swift install with only Swift and Keystone you can have this instead in your `localrc`:
 
     disable_all_services
-    enable_service key mysql swift
+    enable_service key mysql s-proxy s-object s-container s-account
 
-If you use Swift with Keystone, Swift will authenticate against it. You will need to make sure to use the Keystone URL to auth against.
+If you only want to do some testing of a real normal swift cluster with multiple replicas you can do so by customizing the variable `SWIFT_REPLICAS` in your `localrc` (usually to 3).
+
+# Swift S3
 
 If you are enabling `swift3` in `ENABLED_SERVICES` devstack will install the swift3 middleware emulation. Swift will be configured to act as a S3 endpoint for Keystone so effectively replacing the `nova-objectstore`.
 
 Only Swift proxy server is launched in the screen session all other services are started in background and managed by `swift-init` tool.
-
-By default Swift will configure 3 replicas (and one spare) which could be IO intensive on a small vm, if you only want to do some quick testing of the API you can choose to only have one replica by customizing the variable `SWIFT_REPLICAS` in your `localrc`.
diff --git a/clean.sh b/clean.sh
new file mode 100755
index 0000000..cf24f27
--- /dev/null
+++ b/clean.sh
@@ -0,0 +1,91 @@
+#!/usr/bin/env bash
+
+# **clean.sh**
+
+# ``clean.sh`` does its best to eradicate traces of a Grenade
+# run except for the following:
+# - both base and target code repos are left alone
+# - packages (system and pip) are left alone
+
+# This means that all data files are removed.  More??
+
+# Keep track of the current devstack directory.
+TOP_DIR=$(cd $(dirname "$0") && pwd)
+
+# Import common functions
+source $TOP_DIR/functions
+
+# Load local configuration
+source $TOP_DIR/stackrc
+
+# Get the variables that are set in stack.sh
+source $TOP_DIR/.stackenv
+
+# Determine what system we are running on.  This provides ``os_VENDOR``,
+# ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME``
+# and ``DISTRO``
+GetDistro
+
+
+# Import database library
+source $TOP_DIR/lib/database
+source $TOP_DIR/lib/rpc_backend
+
+source $TOP_DIR/lib/tls
+source $TOP_DIR/lib/horizon
+source $TOP_DIR/lib/keystone
+source $TOP_DIR/lib/glance
+source $TOP_DIR/lib/nova
+source $TOP_DIR/lib/cinder
+source $TOP_DIR/lib/swift
+source $TOP_DIR/lib/ceilometer
+source $TOP_DIR/lib/heat
+source $TOP_DIR/lib/quantum
+source $TOP_DIR/lib/baremetal
+source $TOP_DIR/lib/ldap
+
+
+# See if there is anything running...
+# need to adapt when run_service is merged
+SESSION=$(screen -ls | awk '/[0-9].stack/ { print $1 }')
+if [[ -n "$SESSION" ]]; then
+    # Let unstack.sh do its thing first
+    $TOP_DIR/unstack.sh --all
+fi
+
+# Clean projects
+cleanup_cinder
+cleanup_glance
+cleanup_keystone
+cleanup_nova
+cleanup_quantum
+cleanup_swift
+
+# cinder doesn't clean up the volume group as it might be used elsewhere...
+# clean it up if it is a loop device
+VG_DEV=$(sudo losetup -j $DATA_DIR/${VOLUME_GROUP}-backing-file | awk -F':' '/backing-file/ { print $1}')
+if [[ -n "$VG_DEV" ]]; then
+    sudo losetup -d $VG_DEV
+fi
+
+#if mount | grep $DATA_DIR/swift/drives; then
+#  sudo umount $DATA_DIR/swift/drives/sdb1
+#fi
+
+
+# Clean out /etc
+sudo rm -rf /etc/keystone /etc/glance /etc/nova /etc/cinder /etc/swift
+
+# Clean out tgt
+sudo rm /etc/tgt/conf.d/*
+
+# Clean up the message queue
+cleanup_rpc_backend
+cleanup_database
+
+# Clean up networking...
+# should this be in nova?
+# FIXED_IP_ADDR in br100
+
+# Clean up files
+#rm -f .stackenv
diff --git a/exercise.sh b/exercise.sh
index 5b3c56e..3516738 100755
--- a/exercise.sh
+++ b/exercise.sh
@@ -17,9 +17,19 @@
 # to refrain from exercising euca.sh use SKIP_EXERCISES=euca
 SKIP_EXERCISES=${SKIP_EXERCISES:-""}
 
-# Locate the scripts we should run
-EXERCISE_DIR=$(dirname "$0")/exercises
-basenames=$(for b in `ls $EXERCISE_DIR/*.sh`; do basename $b .sh; done)
+# comma separated list of script basenames to run
+# to run only euca.sh use RUN_EXERCISES=euca
+basenames=${RUN_EXERCISES:-""}
+
+EXERCISE_DIR=$TOP_DIR/exercises
+
+if [ -z "${basenames}" ] ; then
+    # Locate the scripts we should run
+    basenames=$(for b in `ls $EXERCISE_DIR/*.sh`; do basename $b .sh; done)
+else
+    # If RUN_EXERCISES was specified, ignore SKIP_EXERCISES.
+    SKIP_EXERCISES=
+fi
 
 # Track the state of each script
 passes=""
diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh
index a92c0d9..3c83725 100755
--- a/exercises/aggregates.sh
+++ b/exercises/aggregates.sh
@@ -56,7 +56,7 @@
     if [ $(nova aggregate-list | grep -c " $aggregate_name ") == 0 ]; then
         echo "SUCCESS $aggregate_name not present"
     else
-        echo "ERROR found aggregate: $aggregate_name"
+        die $LINENO "found aggregate: $aggregate_name"
         exit -1
     fi
 }
@@ -67,15 +67,14 @@
 AGGREGATE2_ID=$(nova aggregate-create $AGGREGATE2_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE2_NAME " | get_field 1)
 
 # check aggregate created
-nova aggregate-list | grep -q " $AGGREGATE_NAME " || die "Aggregate $AGGREGATE_NAME not created"
+nova aggregate-list | grep -q " $AGGREGATE_NAME " || die $LINENO "Aggregate $AGGREGATE_NAME not created"
 
 
 # Ensure creating a duplicate fails
 # =================================
 
 if nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE; then
-    echo "ERROR could create duplicate aggregate"
-    exit -1
+    die $LINENO "could create duplicate aggregate"
 fi
 
 
@@ -113,7 +112,7 @@
 nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY
 nova aggregate-details $AGGREGATE_ID | grep $META_DATA_3_KEY
 
-nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY && die "ERROR metadata was not cleared"
+nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY && die $LINENO "ERROR metadata was not cleared"
 
 nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_3_KEY $META_DATA_1_KEY
 nova aggregate-details $AGGREGATE_ID | egrep "{u'availability_zone': u'$AGGREGATE_A_ZONE'}|{}"
@@ -129,8 +128,7 @@
 nova aggregate-add-host $AGGREGATE_ID $FIRST_HOST
 nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST
 if nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST; then
-    echo "ERROR could add duplicate host to single aggregate"
-    exit -1
+    die $LINENO "could add duplicate host to single aggregate"
 fi
 nova aggregate-remove-host $AGGREGATE2_ID $FIRST_HOST
 nova aggregate-remove-host $AGGREGATE_ID $FIRST_HOST
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
index 679091b..14d0049 100755
--- a/exercises/boot_from_volume.sh
+++ b/exercises/boot_from_volume.sh
@@ -72,7 +72,7 @@
 
 # Grab the id of the image to launch
 IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
-die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
+die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
 
 # Security Groups
 # ---------------
@@ -140,7 +140,7 @@
 # Create the bootable volume
 start_time=$(date +%s)
 cinder create --image-id $IMAGE --display_name=$VOL_NAME --display_description "test bootable volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \
-    die "Failure creating volume $VOL_NAME"
+    die $LINENO "Failure creating volume $VOL_NAME"
 if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
     echo "Volume $VOL_NAME not created"
     exit 1
@@ -150,7 +150,7 @@
 
 # Get volume ID
 VOL_ID=$(cinder list | grep $VOL_NAME  | get_field 1)
-die_if_not_set VOL_ID "Failure retrieving volume ID for $VOL_NAME"
+die_if_not_set $LINENO VOL_ID "Failure retrieving volume ID for $VOL_NAME"
 
 # Boot instance
 # -------------
@@ -159,7 +159,7 @@
 # <dev_name>=<id>:<type>:<size(GB)>:<delete_on_terminate>
 # Leaving the middle two fields blank appears to do-the-right-thing
 VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --block-device-mapping vda=$VOL_ID --security_groups=$SECGROUP --key_name $KEY_NAME $VM_NAME | grep ' id ' | get_field 2)
-die_if_not_set VM_UUID "Failure launching $VM_NAME"
+die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME"
 
 # Check that the status is active within ACTIVE_TIMEOUT seconds
 if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
@@ -169,7 +169,7 @@
 
 # Get the instance IP
 IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2)
-die_if_not_set IP "Failure retrieving IP address"
+die_if_not_set $LINENO IP "Failure retrieving IP address"
 
 # Private IPs can be pinged in single node deployments
 ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT
@@ -178,7 +178,7 @@
 # --------
 
 # Delete volume backed instance
-nova delete $VM_UUID || die "Failure deleting instance $VM_NAME"
+nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME"
 if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then
     echo "Server $VM_NAME not deleted"
     exit 1
@@ -192,7 +192,7 @@
 
 # Delete volume
 start_time=$(date +%s)
-cinder delete $VOL_ID || die "Failure deleting volume $VOLUME_NAME"
+cinder delete $VOL_ID || die $LINENO "Failure deleting volume $VOLUME_NAME"
 if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then
     echo "Volume $VOL_NAME not deleted"
     exit 1
@@ -201,7 +201,7 @@
 echo "Completed cinder delete in $((end_time - start_time)) seconds"
 
 # Delete secgroup
-nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP"
+nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP"
 
 set +o xtrace
 echo "*********************************************************************"
diff --git a/exercises/bundle.sh b/exercises/bundle.sh
index 12f2732..dce36aa 100755
--- a/exercises/bundle.sh
+++ b/exercises/bundle.sh
@@ -49,21 +49,20 @@
 BUCKET=testbucket
 IMAGE=bundle.img
 truncate -s 5M /tmp/$IMAGE
-euca-bundle-image -i /tmp/$IMAGE || die "Failure bundling image $IMAGE"
+euca-bundle-image -i /tmp/$IMAGE || die $LINENO "Failure bundling image $IMAGE"
 
-euca-upload-bundle --debug -b $BUCKET -m /tmp/$IMAGE.manifest.xml || die "Failure uploading bundle $IMAGE to $BUCKET"
+euca-upload-bundle --debug -b $BUCKET -m /tmp/$IMAGE.manifest.xml || die $LINENO "Failure uploading bundle $IMAGE to $BUCKET"
 
 AMI=`euca-register $BUCKET/$IMAGE.manifest.xml | cut -f2`
-die_if_not_set AMI "Failure registering $BUCKET/$IMAGE"
+die_if_not_set $LINENO AMI "Failure registering $BUCKET/$IMAGE"
 
 # Wait for the image to become available
 if ! timeout $REGISTER_TIMEOUT sh -c "while euca-describe-images | grep $AMI | grep -q available; do sleep 1; done"; then
-    echo "Image $AMI not available within $REGISTER_TIMEOUT seconds"
-    exit 1
+    die $LINENO "Image $AMI not available within $REGISTER_TIMEOUT seconds"
 fi
 
 # Clean up
-euca-deregister $AMI || die "Failure deregistering $AMI"
+euca-deregister $AMI || die $LINENO "Failure deregistering $AMI"
 
 set +o xtrace
 echo "*********************************************************************"
diff --git a/exercises/client-args.sh b/exercises/client-args.sh
index 894da74..1e92500 100755
--- a/exercises/client-args.sh
+++ b/exercises/client-args.sh
@@ -136,7 +136,7 @@
 # Swift client
 # ------------
 
-if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
+if [[ "$ENABLED_SERVICES" =~ "swift" || "$ENABLED_SERVICES" =~ "s-proxy" ]]; then
     if [[ "$SKIP_EXERCISES" =~ "swift" ]] ; then
         STATUS_SWIFT="Skipped"
     else
diff --git a/exercises/client-env.sh b/exercises/client-env.sh
index c84e84e..dd8e56e 100755
--- a/exercises/client-env.sh
+++ b/exercises/client-env.sh
@@ -144,7 +144,8 @@
 # Swift client
 # ------------
 
-if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
+
+if [[ "$ENABLED_SERVICES" =~ "swift" || "$ENABLED_SERVICES" =~ "s-proxy" ]]; then
     if [[ "$SKIP_EXERCISES" =~ "swift" ]] ; then
         STATUS_SWIFT="Skipped"
     else
diff --git a/exercises/euca.sh b/exercises/euca.sh
index 8b15da8..50d4744 100755
--- a/exercises/euca.sh
+++ b/exercises/euca.sh
@@ -56,68 +56,62 @@
 
 # Find a machine image to boot
 IMAGE=`euca-describe-images | grep machine | grep ${DEFAULT_IMAGE_NAME} | cut -f2 | head -n1`
-die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
+die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
 
 # Add a secgroup
 if ! euca-describe-groups | grep -q $SECGROUP; then
     euca-add-group -d "$SECGROUP description" $SECGROUP
     if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! euca-describe-groups | grep -q $SECGROUP; do sleep 1; done"; then
-        echo "Security group not created"
-        exit 1
+        die $LINENO "Security group not created"
     fi
 fi
 
 # Launch it
 INSTANCE=`euca-run-instances -g $SECGROUP -t $DEFAULT_INSTANCE_TYPE $IMAGE | grep INSTANCE | cut -f2`
-die_if_not_set INSTANCE "Failure launching instance"
+die_if_not_set $LINENO INSTANCE "Failure launching instance"
 
 # Assure it has booted within a reasonable time
 if ! timeout $RUNNING_TIMEOUT sh -c "while ! euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then
-    echo "server didn't become active within $RUNNING_TIMEOUT seconds"
-    exit 1
+    die $LINENO "server didn't become active within $RUNNING_TIMEOUT seconds"
 fi
 
 # Volumes
 # -------
 if [[ "$ENABLED_SERVICES" =~ "c-vol" ]]; then
    VOLUME_ZONE=`euca-describe-availability-zones | head -n1 | cut -f2`
-   die_if_not_set VOLUME_ZONE "Failure to find zone for volume"
+   die_if_not_set $LINENO VOLUME_ZONE "Failure to find zone for volume"
 
    VOLUME=`euca-create-volume -s 1 -z $VOLUME_ZONE | cut -f2`
-   die_if_not_set VOLUME "Failure to create volume"
+   die_if_not_set $LINENO VOLUME "Failure to create volume"
 
    # Test that volume has been created
    VOLUME=`euca-describe-volumes | cut -f2`
-   die_if_not_set VOLUME "Failure to get volume"
+   die_if_not_set $LINENO VOLUME "Failure to get volume"
 
    # Test volume has become available
    if ! timeout $RUNNING_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then
-       echo "volume didnt become available within $RUNNING_TIMEOUT seconds"
-       exit 1
+       die $LINENO "volume didnt become available within $RUNNING_TIMEOUT seconds"
    fi
 
    # Attach volume to an instance
    euca-attach-volume -i $INSTANCE -d $ATTACH_DEVICE $VOLUME || \
-       die "Failure attaching volume $VOLUME to $INSTANCE"
+       die $LINENO "Failure attaching volume $VOLUME to $INSTANCE"
    if ! timeout $ACTIVE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q in-use; do sleep 1; done"; then
-       echo "Could not attach $VOLUME to $INSTANCE"
-       exit 1
+       die $LINENO "Could not attach $VOLUME to $INSTANCE"
    fi
 
    # Detach volume from an instance
    euca-detach-volume $VOLUME || \
-       die "Failure detaching volume $VOLUME to $INSTANCE"
+       die $LINENO "Failure detaching volume $VOLUME to $INSTANCE"
     if ! timeout $ACTIVE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then
-        echo "Could not detach $VOLUME to $INSTANCE"
-        exit 1
+        die $LINENO "Could not detach $VOLUME to $INSTANCE"
     fi
 
     # Remove volume
     euca-delete-volume $VOLUME || \
-        die "Failure to delete volume"
+        die $LINENO "Failure to delete volume"
     if ! timeout $ACTIVE_TIMEOUT sh -c "while euca-describe-volumes | grep $VOLUME; do sleep 1; done"; then
-       echo "Could not delete $VOLUME"
-       exit 1
+       die $LINENO "Could not delete $VOLUME"
     fi
 else
     echo "Volume Tests Skipped"
@@ -125,58 +119,55 @@
 
 # Allocate floating address
 FLOATING_IP=`euca-allocate-address | cut -f2`
-die_if_not_set FLOATING_IP "Failure allocating floating IP"
+die_if_not_set $LINENO FLOATING_IP "Failure allocating floating IP"
 
 # Associate floating address
 euca-associate-address -i $INSTANCE $FLOATING_IP || \
-    die "Failure associating address $FLOATING_IP to $INSTANCE"
+    die $LINENO "Failure associating address $FLOATING_IP to $INSTANCE"
 
 # Authorize pinging
 euca-authorize -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \
-    die "Failure authorizing rule in $SECGROUP"
+    die $LINENO "Failure authorizing rule in $SECGROUP"
 
 # Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds
 ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT
 
 # Revoke pinging
 euca-revoke -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \
-    die "Failure revoking rule in $SECGROUP"
+    die $LINENO "Failure revoking rule in $SECGROUP"
 
 # Release floating address
 euca-disassociate-address $FLOATING_IP || \
-    die "Failure disassociating address $FLOATING_IP"
+    die $LINENO "Failure disassociating address $FLOATING_IP"
 
 # Wait just a tick for everything above to complete so release doesn't fail
 if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep $INSTANCE | grep -q $FLOATING_IP; do sleep 1; done"; then
-    echo "Floating ip $FLOATING_IP not disassociated within $ASSOCIATE_TIMEOUT seconds"
-    exit 1
+    die $LINENO "Floating ip $FLOATING_IP not disassociated within $ASSOCIATE_TIMEOUT seconds"
 fi
 
 # Release floating address
 euca-release-address $FLOATING_IP || \
-    die "Failure releasing address $FLOATING_IP"
+    die $LINENO "Failure releasing address $FLOATING_IP"
 
 # Wait just a tick for everything above to complete so terminate doesn't fail
 if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep -q $FLOATING_IP; do sleep 1; done"; then
-    echo "Floating ip $FLOATING_IP not released within $ASSOCIATE_TIMEOUT seconds"
-    exit 1
+    die $LINENO "Floating ip $FLOATING_IP not released within $ASSOCIATE_TIMEOUT seconds"
 fi
 
 # Terminate instance
 euca-terminate-instances $INSTANCE || \
-    die "Failure terminating instance $INSTANCE"
+    die $LINENO "Failure terminating instance $INSTANCE"
 
 # Assure it has terminated within a reasonable time. The behaviour of this
 # case changed with bug/836978. Requesting the status of an invalid instance
 # will now return an error message including the instance id, so we need to
 # filter that out.
 if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -ve \"\\\(InstanceNotFound\\\|InvalidInstanceID\[.\]NotFound\\\)\" | grep -q $INSTANCE; do sleep 1; done"; then
-    echo "server didn't terminate within $TERMINATE_TIMEOUT seconds"
-    exit 1
+    die $LINENO "server didn't terminate within $TERMINATE_TIMEOUT seconds"
 fi
 
 # Delete secgroup
-euca-delete-group $SECGROUP || die "Failure deleting security group $SECGROUP"
+euca-delete-group $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP"
 
 set +o xtrace
 echo "*********************************************************************"
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index 34ab69d..b4e1c42 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -71,7 +71,7 @@
 
 # Grab the id of the image to launch
 IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
-die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
+die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
 
 # Security Groups
 # ---------------
@@ -83,8 +83,7 @@
 if ! nova secgroup-list | grep -q $SECGROUP; then
     nova secgroup-create $SECGROUP "$SECGROUP description"
     if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then
-        echo "Security group not created"
-        exit 1
+        die $LINENO "Security group not created"
     fi
 fi
 
@@ -115,7 +114,7 @@
 # Clean-up from previous runs
 nova delete $VM_NAME || true
 if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then
-    echo "server didn't terminate!"
+    die $LINENO "server didn't terminate!"
     exit 1
 fi
 
@@ -123,17 +122,16 @@
 # -------------
 
 VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2)
-die_if_not_set VM_UUID "Failure launching $VM_NAME"
+die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME"
 
 # Check that the status is active within ACTIVE_TIMEOUT seconds
 if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
-    echo "server didn't become active!"
-    exit 1
+    die $LINENO "server didn't become active!"
 fi
 
 # Get the instance IP
 IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2)
-die_if_not_set IP "Failure retrieving IP address"
+die_if_not_set $LINENO IP "Failure retrieving IP address"
 
 # Private IPs can be pinged in single node deployments
 ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT
@@ -143,17 +141,16 @@
 
 # Allocate a floating IP from the default pool
 FLOATING_IP=$(nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | get_field 1)
-die_if_not_set FLOATING_IP "Failure creating floating IP from pool $DEFAULT_FLOATING_POOL"
+die_if_not_set $LINENO FLOATING_IP "Failure creating floating IP from pool $DEFAULT_FLOATING_POOL"
 
 # List floating addresses
 if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $FLOATING_IP; do sleep 1; done"; then
-    echo "Floating IP not allocated"
-    exit 1
+    die $LINENO "Floating IP not allocated"
 fi
 
 # Add floating IP to our server
 nova add-floating-ip $VM_UUID $FLOATING_IP || \
-    die "Failure adding floating IP $FLOATING_IP to $VM_NAME"
+    die $LINENO "Failure adding floating IP $FLOATING_IP to $VM_NAME"
 
 # Test we can ping our floating IP within ASSOCIATE_TIMEOUT seconds
 ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT
@@ -161,18 +158,17 @@
 if ! is_service_enabled quantum; then
     # Allocate an IP from second floating pool
     TEST_FLOATING_IP=$(nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | get_field 1)
-    die_if_not_set TEST_FLOATING_IP "Failure creating floating IP in $TEST_FLOATING_POOL"
+    die_if_not_set $LINENO TEST_FLOATING_IP "Failure creating floating IP in $TEST_FLOATING_POOL"
 
     # list floating addresses
     if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep $TEST_FLOATING_POOL | grep -q $TEST_FLOATING_IP; do sleep 1; done"; then
-        echo "Floating IP not allocated"
-        exit 1
+        die $LINENO "Floating IP not allocated"
      fi
 fi
 
 # Dis-allow icmp traffic (ping)
 nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || \
-    die "Failure deleting security group rule from $SECGROUP"
+    die $LINENO "Failure deleting security group rule from $SECGROUP"
 
 # FIXME (anthony): make xs support security groups
 if [ "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then
@@ -186,24 +182,23 @@
 if ! is_service_enabled quantum; then
     # Delete second floating IP
     nova floating-ip-delete $TEST_FLOATING_IP || \
-        die "Failure deleting floating IP $TEST_FLOATING_IP"
+        die $LINENO "Failure deleting floating IP $TEST_FLOATING_IP"
 fi
 
 # Delete the floating ip
 nova floating-ip-delete $FLOATING_IP || \
-    die "Failure deleting floating IP $FLOATING_IP"
+    die $LINENO "Failure deleting floating IP $FLOATING_IP"
 
 # Delete instance
-nova delete $VM_UUID || die "Failure deleting instance $VM_NAME"
+nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME"
 # Wait for termination
 if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then
-    echo "Server $VM_NAME not deleted"
-    exit 1
+    die $LINENO "Server $VM_NAME not deleted"
 fi
 
 # Delete secgroup
 nova secgroup-delete $SECGROUP || \
-    die "Failure deleting security group $SECGROUP"
+    die $LINENO "Failure deleting security group $SECGROUP"
 
 set +o xtrace
 echo "*********************************************************************"
diff --git a/exercises/horizon.sh b/exercises/horizon.sh
index c5dae3a..5d778c9 100755
--- a/exercises/horizon.sh
+++ b/exercises/horizon.sh
@@ -36,7 +36,7 @@
 is_service_enabled horizon || exit 55
 
 # can we get the front page
-curl http://$SERVICE_HOST 2>/dev/null | grep -q '<h3>Log In</h3>' || die "Horizon front page not functioning!"
+curl http://$SERVICE_HOST 2>/dev/null | grep -q '<h3>Log In</h3>' || die $LINENO "Horizon front page not functioning!"
 
 set +o xtrace
 echo "*********************************************************************"
diff --git a/exercises/quantum-adv-test.sh b/exercises/quantum-adv-test.sh
index bc33fe8..5c4b16e 100755
--- a/exercises/quantum-adv-test.sh
+++ b/exercises/quantum-adv-test.sh
@@ -264,7 +264,7 @@
         --image $(get_image_id) \
         $NIC \
         $TENANT-server$NUM | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'`
-    die_if_not_set VM_UUID "Failure launching $TENANT-server$NUM" VM_UUID
+    die_if_not_set $LINENO VM_UUID "Failure launching $TENANT-server$NUM"
     confirm_server_active $VM_UUID
 }
 
@@ -309,8 +309,7 @@
 function shutdown_vms {
     foreach_tenant_vm 'shutdown_vm ${%TENANT%_NAME} %NUM%'
     if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q ACTIVE; do sleep 1; done"; then
-        echo "Some VMs failed to shutdown"
-        false
+        die $LINENO "Some VMs failed to shutdown"
     fi
 }
 
diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh
index a33c9c6..b73afdf 100755
--- a/exercises/sec_groups.sh
+++ b/exercises/sec_groups.sh
@@ -68,7 +68,7 @@
 
 # Delete secgroup
 nova secgroup-delete $SEC_GROUP_NAME || \
-    die "Failure deleting security group $SEC_GROUP_NAME"
+    die $LINENO "Failure deleting security group $SEC_GROUP_NAME"
 
 set +o xtrace
 echo "*********************************************************************"
diff --git a/exercises/swift.sh b/exercises/swift.sh
index a75f955..c4ec3e9 100755
--- a/exercises/swift.sh
+++ b/exercises/swift.sh
@@ -35,7 +35,7 @@
 
 # If swift is not enabled we exit with exitcode 55 which mean
 # exercise is skipped.
-is_service_enabled swift || exit 55
+is_service_enabled s-proxy || exit 55
 
 # Container name
 CONTAINER=ex-swift
@@ -45,20 +45,20 @@
 # =============
 
 # Check if we have to swift via keystone
-swift stat || die "Failure geting status"
+swift stat || die $LINENO "Failure geting status"
 
 # We start by creating a test container
-swift post $CONTAINER || die "Failure creating container $CONTAINER"
+swift post $CONTAINER || die $LINENO "Failure creating container $CONTAINER"
 
 # add some files into it.
-swift upload $CONTAINER /etc/issue || die "Failure uploading file to container $CONTAINER"
+swift upload $CONTAINER /etc/issue || die $LINENO "Failure uploading file to container $CONTAINER"
 
 # list them
-swift list $CONTAINER || die "Failure listing contents of container $CONTAINER"
+swift list $CONTAINER || die $LINENO "Failure listing contents of container $CONTAINER"
 
 # And we may want to delete them now that we have tested that
 # everything works.
-swift delete $CONTAINER || die "Failure deleting container $CONTAINER"
+swift delete $CONTAINER || die $LINENO "Failure deleting container $CONTAINER"
 
 set +o xtrace
 echo "*********************************************************************"
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index 45cb0c8..7913641 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -70,7 +70,7 @@
 
 # Grab the id of the image to launch
 IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
-die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
+die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
 
 # Security Groups
 # ---------------
@@ -114,25 +114,23 @@
 # Clean-up from previous runs
 nova delete $VM_NAME || true
 if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then
-    echo "server didn't terminate!"
-    exit 1
+    die $LINENO "server didn't terminate!"
 fi
 
 # Boot instance
 # -------------
 
 VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2)
-die_if_not_set VM_UUID "Failure launching $VM_NAME"
+die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME"
 
 # Check that the status is active within ACTIVE_TIMEOUT seconds
 if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
-    echo "server didn't become active!"
-    exit 1
+    die $LINENO "server didn't become active!"
 fi
 
 # Get the instance IP
 IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2)
-die_if_not_set IP "Failure retrieving IP address"
+die_if_not_set $LINENO IP "Failure retrieving IP address"
 
 # Private IPs can be pinged in single node deployments
 ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT
@@ -142,42 +140,38 @@
 
 # Verify it doesn't exist
 if [[ -n $(cinder list | grep $VOL_NAME | head -1 | get_field 2) ]]; then
-    echo "Volume $VOL_NAME already exists"
-    exit 1
+    die $LINENO "Volume $VOL_NAME already exists"
 fi
 
 # Create a new volume
 start_time=$(date +%s)
 cinder create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \
-    die "Failure creating volume $VOL_NAME"
+    die $LINENO "Failure creating volume $VOL_NAME"
 if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
-    echo "Volume $VOL_NAME not created"
-    exit 1
+    die $LINENO "Volume $VOL_NAME not created"
 fi
 end_time=$(date +%s)
 echo "Completed cinder create in $((end_time - start_time)) seconds"
 
 # Get volume ID
 VOL_ID=$(cinder list | grep $VOL_NAME | head -1 | get_field 1)
-die_if_not_set VOL_ID "Failure retrieving volume ID for $VOL_NAME"
+die_if_not_set $LINENO VOL_ID "Failure retrieving volume ID for $VOL_NAME"
 
 # Attach to server
 DEVICE=/dev/vdb
 start_time=$(date +%s)
 nova volume-attach $VM_UUID $VOL_ID $DEVICE || \
-    die "Failure attaching volume $VOL_NAME to $VM_NAME"
+    die $LINENO "Failure attaching volume $VOL_NAME to $VM_NAME"
 if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep in-use; do sleep 1; done"; then
-    echo "Volume $VOL_NAME not attached to $VM_NAME"
-    exit 1
+    die $LINENO "Volume $VOL_NAME not attached to $VM_NAME"
 fi
 end_time=$(date +%s)
 echo "Completed volume-attach in $((end_time - start_time)) seconds"
 
 VOL_ATTACH=$(cinder list | grep $VOL_NAME | head -1 | get_field -1)
-die_if_not_set VOL_ATTACH "Failure retrieving $VOL_NAME status"
+die_if_not_set $LINENO VOL_ATTACH "Failure retrieving $VOL_NAME status"
 if [[ "$VOL_ATTACH" != $VM_UUID ]]; then
-    echo "Volume not attached to correct instance"
-    exit 1
+    die $LINENO "Volume not attached to correct instance"
 fi
 
 # Clean up
@@ -185,33 +179,30 @@
 
 # Detach volume
 start_time=$(date +%s)
-nova volume-detach $VM_UUID $VOL_ID || die "Failure detaching volume $VOL_NAME from $VM_NAME"
+nova volume-detach $VM_UUID $VOL_ID || die $LINENO "Failure detaching volume $VOL_NAME from $VM_NAME"
 if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
-    echo "Volume $VOL_NAME not detached from $VM_NAME"
-    exit 1
+    die $LINENO "Volume $VOL_NAME not detached from $VM_NAME"
 fi
 end_time=$(date +%s)
 echo "Completed volume-detach in $((end_time - start_time)) seconds"
 
 # Delete volume
 start_time=$(date +%s)
-cinder delete $VOL_ID || die "Failure deleting volume $VOL_NAME"
+cinder delete $VOL_ID || die $LINENO "Failure deleting volume $VOL_NAME"
 if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then
-    echo "Volume $VOL_NAME not deleted"
-    exit 1
+    die $LINENO "Volume $VOL_NAME not deleted"
 fi
 end_time=$(date +%s)
 echo "Completed cinder delete in $((end_time - start_time)) seconds"
 
 # Delete instance
-nova delete $VM_UUID || die "Failure deleting instance $VM_NAME"
+nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME"
 if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then
-    echo "Server $VM_NAME not deleted"
-    exit 1
+    die $LINENO "Server $VM_NAME not deleted"
 fi
 
 # Delete secgroup
-nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP"
+nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP"
 
 set +o xtrace
 echo "*********************************************************************"
diff --git a/files/apts/ceilometer-collector b/files/apts/ceilometer-collector
index c67ade3..71007ba 100644
--- a/files/apts/ceilometer-collector
+++ b/files/apts/ceilometer-collector
@@ -1,2 +1,6 @@
 python-pymongo
 mongodb-server
+libnspr4-dev
+pkg-config
+libxml2-dev
+libxslt-dev
\ No newline at end of file
diff --git a/files/apts/general b/files/apts/general
index 0264066..a1fcf3c 100644
--- a/files/apts/general
+++ b/files/apts/general
@@ -9,6 +9,7 @@
 git
 lsof # useful when debugging
 openssh-server
+openssl
 vim-nox
 locate # useful when debugging
 python-virtualenv
diff --git a/files/apts/n-vol b/files/apts/n-vol
deleted file mode 100644
index 5db06ea..0000000
--- a/files/apts/n-vol
+++ /dev/null
@@ -1,2 +0,0 @@
-tgt
-lvm2
diff --git a/files/apts/nova b/files/apts/nova
index ba6d8c5..f4615c4 100644
--- a/files/apts/nova
+++ b/files/apts/nova
@@ -18,6 +18,7 @@
 libjs-jquery-tablesorter # Needed for coverage html reports
 vlan
 curl
+genisoimage # required for config_drive
 rabbitmq-server # NOPRIME
 qpidd # dist:precise NOPRIME
 socat # used by ajaxterm
diff --git a/files/keystone_data.sh b/files/keystone_data.sh
index 4c76c9b..72b5b1e 100755
--- a/files/keystone_data.sh
+++ b/files/keystone_data.sh
@@ -52,7 +52,7 @@
 # Services
 # --------
 
-if [[ "$ENABLED_SERVICES" =~ "n-api" ]] && [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
+if [[ "$ENABLED_SERVICES" =~ "n-api" ]] && [[ "$ENABLED_SERVICES" =~ "s-proxy" || "$ENABLED_SERVICES" =~ "swift" ]]; then
     NOVA_USER=$(keystone user-list | awk "/ nova / { print \$2 }")
     # Nova needs ResellerAdmin role to download images when accessing
     # swift through the s3 api.
@@ -123,7 +123,8 @@
 fi
 
 # Swift
-if [[ "$ENABLED_SERVICES" =~ "swift" ]]; then
+
+if [[ "$ENABLED_SERVICES" =~ "swift" || "$ENABLED_SERVICES" =~ "s-proxy" ]]; then
     SWIFT_USER=$(get_id keystone user-create \
         --name=swift \
         --pass="$SERVICE_PASSWORD" \
@@ -167,9 +168,9 @@
         keystone endpoint-create \
             --region RegionOne \
             --service_id $CEILOMETER_SERVICE \
-            --publicurl "http://$SERVICE_HOST:8777/" \
-            --adminurl "http://$SERVICE_HOST:8777/" \
-            --internalurl "http://$SERVICE_HOST:8777/"
+            --publicurl "http://$SERVICE_HOST:8777" \
+            --adminurl "http://$SERVICE_HOST:8777" \
+            --internalurl "http://$SERVICE_HOST:8777"
     fi
 fi
 
@@ -190,7 +191,7 @@
 fi
 
 # S3
-if [[ "$ENABLED_SERVICES" =~ "n-obj" || "$ENABLED_SERVICES" =~ "swift" ]]; then
+if [[ "$ENABLED_SERVICES" =~ "n-obj" || "$ENABLED_SERVICES" =~ "swift3" ]]; then
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
         S3_SERVICE=$(get_id keystone service-create \
             --name=s3 \
diff --git a/files/ldap/openstack.ldif b/files/ldap/openstack.ldif
index 00c9861..2b76372 100644
--- a/files/ldap/openstack.ldif
+++ b/files/ldap/openstack.ldif
@@ -20,6 +20,10 @@
 objectClass: organizationalUnit
 ou: Projects
 
+dn: ou=Domains,dc=openstack,dc=org
+objectClass: organizationalUnit
+ou: Domains
+
 dn: cn=9fe2ff9ee4384b1894a90878d3e92bab,ou=Roles,dc=openstack,dc=org
 objectClass: organizationalRole
 ou: _member_
diff --git a/files/rpms-suse/general b/files/rpms-suse/general
index 8ed74ec..b8ceeb7 100644
--- a/files/rpms-suse/general
+++ b/files/rpms-suse/general
@@ -4,6 +4,7 @@
 git-core
 iputils
 openssh
+openssl
 psmisc
 python-cmd2 # dist:opensuse-12.3
 python-netaddr
diff --git a/files/rpms-suse/ldap b/files/rpms-suse/ldap
new file mode 100644
index 0000000..46d26f0
--- /dev/null
+++ b/files/rpms-suse/ldap
@@ -0,0 +1,3 @@
+openldap2
+openldap2-client
+python-ldap
diff --git a/files/rpms-suse/n-spice b/files/rpms-suse/n-spice
new file mode 100644
index 0000000..c8722b9
--- /dev/null
+++ b/files/rpms-suse/n-spice
@@ -0,0 +1 @@
+python-numpy
diff --git a/files/rpms-suse/n-vol b/files/rpms-suse/n-vol
deleted file mode 100644
index e5b4727..0000000
--- a/files/rpms-suse/n-vol
+++ /dev/null
@@ -1,2 +0,0 @@
-lvm2
-tgt
diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova
index 0306716..04af7f3 100644
--- a/files/rpms-suse/nova
+++ b/files/rpms-suse/nova
@@ -3,6 +3,7 @@
 dnsmasq
 ebtables
 gawk
+genisoimage # required for config_drive
 iptables
 iputils
 kpartx
@@ -34,6 +35,7 @@
 python-mox
 python-mysql
 python-netaddr
+python-numpy # needed by websockify for spice console
 python-paramiko
 python-python-gflags
 python-sqlalchemy-migrate
diff --git a/files/rpms-suse/ryu b/files/rpms-suse/ryu
index 763fd24..90b43a4 100644
--- a/files/rpms-suse/ryu
+++ b/files/rpms-suse/ryu
@@ -2,4 +2,5 @@
 python-setuptools # instead of python-distribute; dist:sle11sp2
 python-Sphinx
 python-gevent
+python-netifaces
 python-python-gflags
diff --git a/files/rpms/ceilometer-collector b/files/rpms/ceilometer-collector
index c5c855c..d7b7ea8 100644
--- a/files/rpms/ceilometer-collector
+++ b/files/rpms/ceilometer-collector
@@ -1,2 +1,3 @@
+selinux-policy-targeted
 mongodb-server
 pymongo
diff --git a/files/rpms/general b/files/rpms/general
index e4f143d..fc3412b 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -3,6 +3,7 @@
 euca2ools # only for testing client
 git-core
 openssh-server
+openssl
 psmisc
 pylint
 python-netaddr
diff --git a/files/rpms/n-vol b/files/rpms/n-vol
deleted file mode 100644
index df861aa..0000000
--- a/files/rpms/n-vol
+++ /dev/null
@@ -1,2 +0,0 @@
-lvm2
-scsi-target-utils
diff --git a/files/rpms/nova b/files/rpms/nova
index 568ee7f..7ff926b 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -3,6 +3,7 @@
 dnsmasq-utils # for dhcp_release
 ebtables
 gawk
+genisoimage # required for config_drive
 iptables
 iputils
 kpartx
diff --git a/functions b/functions
index d8b87d4..fe50547 100644
--- a/functions
+++ b/functions
@@ -53,12 +53,19 @@
 }
 
 
-# Prints "message" and exits
-# die "message"
+# Prints line number and "message" then exits
+# die $LINENO "message"
 function die() {
     local exitcode=$?
+    if [ $exitcode == 0 ]; then
+        exitcode=1
+    fi
     set +o xtrace
-    echo $@
+    local msg="[ERROR] $0:$1 $2"
+    echo $msg 1>&2;
+    if [[ -n ${SCREEN_LOGDIR} ]]; then
+        echo $msg >> "${SCREEN_LOGDIR}/error.log"
+    fi
     exit $exitcode
 }
 
@@ -66,15 +73,14 @@
 # Checks an environment variable is not set or has length 0 OR if the
 # exit code is non-zero and prints "message" and exits
 # NOTE: env-var is the variable name without a '$'
-# die_if_not_set env-var "message"
+# die_if_not_set $LINENO env-var "message"
 function die_if_not_set() {
     (
         local exitcode=$?
         set +o xtrace
-        local evar=$1; shift
+        local evar=$2; shift
         if ! is_set $evar || [ $exitcode != 0 ]; then
-            echo $@
-            exit -1
+            die $@
         fi
     )
 }
@@ -117,20 +123,63 @@
 }
 
 
+# Get the default value for HOST_IP
+# get_default_host_ip fixed_range floating_range host_ip_iface host_ip
+function get_default_host_ip() {
+    local fixed_range=$1
+    local floating_range=$2
+    local host_ip_iface=$3
+    local host_ip=$4
+
+    # Find the interface used for the default route
+    host_ip_iface=${host_ip_iface:-$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }' | head -1)}
+    # Search for an IP unless an explicit is set by ``HOST_IP`` environment variable
+    if [ -z "$host_ip" -o "$host_ip" == "dhcp" ]; then
+        host_ip=""
+        host_ips=`LC_ALL=C ip -f inet addr show ${host_ip_iface} | awk '/inet/ {split($2,parts,"/");  print parts[1]}'`
+        for IP in $host_ips; do
+            # Attempt to filter out IP addresses that are part of the fixed and
+            # floating range. Note that this method only works if the ``netaddr``
+            # python library is installed. If it is not installed, an error
+            # will be printed and the first IP from the interface will be used.
+            # If that is not correct set ``HOST_IP`` in ``localrc`` to the correct
+            # address.
+            if ! (address_in_net $IP $fixed_range || address_in_net $IP $floating_range); then
+                host_ip=$IP
+                break;
+            fi
+        done
+    fi
+    echo $host_ip
+}
+
+
+function _get_package_dir() {
+    local pkg_dir
+    if is_ubuntu; then
+        pkg_dir=$FILES/apts
+    elif is_fedora; then
+        pkg_dir=$FILES/rpms
+    elif is_suse; then
+        pkg_dir=$FILES/rpms-suse
+    else
+        exit_distro_not_supported "list of packages"
+    fi
+    echo "$pkg_dir"
+}
+
 # get_packages() collects a list of package names of any type from the
 # prerequisite files in ``files/{apts|rpms}``.  The list is intended
 # to be passed to a package installer such as apt or yum.
 #
-# Only packages required for the services in ``ENABLED_SERVICES`` will be
+# Only packages required for the services in 1st argument will be
 # included.  Two bits of metadata are recognized in the prerequisite files:
 # - ``# NOPRIME`` defers installation to be performed later in stack.sh
 # - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection
 #   of the package to the distros listed.  The distro names are case insensitive.
-#
-# Uses globals ``ENABLED_SERVICES``
-# get_packages dir
 function get_packages() {
-    local package_dir=$1
+    local services=$1
+    local package_dir=$(_get_package_dir)
     local file_to_parse
     local service
 
@@ -141,7 +190,7 @@
     if [[ -z "$DISTRO" ]]; then
         GetDistro
     fi
-    for service in general ${ENABLED_SERVICES//,/ }; do
+    for service in general ${services//,/ }; do
         # Allow individual services to specify dependencies
         if [[ -e ${package_dir}/${service} ]]; then
             file_to_parse="${file_to_parse} $service"
@@ -163,6 +212,10 @@
             if [[ ! $file_to_parse =~ ceilometer ]]; then
                 file_to_parse="${file_to_parse} ceilometer"
             fi
+        elif [[ $service == s-* ]]; then
+            if [[ ! $file_to_parse =~ swift ]]; then
+                file_to_parse="${file_to_parse} swift"
+            fi
         elif [[ $service == n-* ]]; then
             if [[ ! $file_to_parse =~ nova ]]; then
                 file_to_parse="${file_to_parse} nova"
@@ -253,6 +306,8 @@
             if [[ $? -eq 0 ]]; then
                 os_VENDOR="openSUSE"
             fi
+        elif [[ $os_VENDOR == "openSUSE project" ]]; then
+            os_VENDOR="openSUSE"
         elif [[ $os_VENDOR =~ Red.*Hat ]]; then
             os_VENDOR="Red Hat"
         fi
@@ -406,12 +461,10 @@
     fi
 
     if [ $# -gt 0 ]; then
-        echo "Support for $DISTRO is incomplete: no support for $@"
+        die $LINENO "Support for $DISTRO is incomplete: no support for $@"
     else
-        echo "Support for $DISTRO is incomplete."
+        die $LINENO "Support for $DISTRO is incomplete."
     fi
-
-    exit 1
 }
 
 
@@ -537,6 +590,56 @@
     fi
 }
 
+# Get a multiple line option from an INI file
+# iniget_multiline config-file section option
+function iniget_multiline() {
+    local file=$1
+    local section=$2
+    local option=$3
+    local values
+    values=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { s/^$option[ \t]*=[ \t]*//gp; }" "$file")
+    echo ${values}
+}
+
+# Set a multiple line option in an INI file
+# iniset_multiline config-file section option value1 value2 valu3 ...
+function iniset_multiline() {
+    local file=$1
+    local section=$2
+    local option=$3
+    shift 3
+    local values
+    for v in $@; do
+        # The later sed command inserts each new value in the line next to
+        # the section identifier, which causes the values to be inserted in
+        # the reverse order. Do a reverse here to keep the original order.
+        values="$v ${values}"
+    done
+    if ! grep -q "^\[$section\]" "$file"; then
+        # Add section at the end
+        echo -e "\n[$section]" >>"$file"
+    else
+        # Remove old values
+        sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file"
+    fi
+    # Add new ones
+    for v in $values; do
+        sed -i -e "/^\[$section\]/ a\\
+$option = $v
+" "$file"
+    done
+}
+
+# Append a new option in an ini file without replacing the old value
+# iniadd config-file section option value1 value2 value3 ...
+function iniadd() {
+    local file=$1
+    local section=$2
+    local option=$3
+    shift 3
+    local values="$(iniget_multiline $file $section $option) $@"
+    iniset_multiline $file $section $option $values
+}
 
 # is_service_enabled() checks if the service(s) specified as arguments are
 # enabled by the user in ``ENABLED_SERVICES``.
@@ -550,6 +653,9 @@
 #   **ceilometer** returns true if any service enabled start with **ceilometer**
 #   **glance** returns true if any service enabled start with **g-**
 #   **quantum** returns true if any service enabled start with **q-**
+#   **swift** returns true if any service enabled start with **s-**
+#   For backward compatibility if we have **swift** in ENABLED_SERVICES all the
+#   **s-** services will be enabled. This will be deprecated in the future.
 #
 # Uses global ``ENABLED_SERVICES``
 # is_service_enabled service [service ...]
@@ -562,6 +668,8 @@
         [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && return 0
         [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0
         [[ ${service} == "quantum" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0
+        [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && return 0
+        [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && return 0
     done
     return 1
 }
@@ -667,6 +775,21 @@
 }
 
 
+# Distro-agnostic package uninstaller
+# uninstall_package package [package ...]
+function uninstall_package() {
+    if is_ubuntu; then
+        apt_get purge "$@"
+    elif is_fedora; then
+        yum remove -y "$@"
+    elif is_suse; then
+        rpm -e "$@"
+    else
+        exit_distro_not_supported "uninstalling packages"
+    fi
+}
+
+
 # Distro-agnostic function to tell if a package is installed
 # is_package_installed package [package ...]
 function is_package_installed() {
@@ -1059,6 +1182,7 @@
         # No backends registered means this is likely called from ``localrc``
         # This is now deprecated usage
         DATABASE_TYPE=$1
+        DEPRECATED_TEXT="$DEPRECATED_TEXT\nThe database backend needs to be properly set in ENABLED_SERVICES; use_database is deprecated localrc\n"
     else
         # This should no longer get called...here for posterity
         use_exclusive_service DATABASE_BACKENDS DATABASE_TYPE $1
@@ -1131,9 +1255,9 @@
     fi
     if ! timeout $boot_timeout sh -c "$check_command"; then
         if [[ "$expected" = "True" ]]; then
-            echo "[Fail] Couldn't ping server"
+            die $LINENO "[Fail] Couldn't ping server"
         else
-            echo "[Fail] Could ping server"
+            die $LINENO "[Fail] Could ping server"
         fi
         exit 1
     fi
@@ -1157,8 +1281,7 @@
     local ACTIVE_TIMEOUT=$5
     local probe_cmd=""
     if ! timeout $ACTIVE_TIMEOUT sh -c "while ! ssh -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP echo success ; do sleep 1; done"; then
-        echo "server didn't become ssh-able!"
-        exit 1
+        die $LINENO "server didn't become ssh-able!"
     fi
 }
 
diff --git a/lib/baremetal b/lib/baremetal
index 2659386..5326dd1 100644
--- a/lib/baremetal
+++ b/lib/baremetal
@@ -33,7 +33,7 @@
 # baremetal driver uses that to push a disk image onto the node(s).
 #
 # Below we define various defaults which control the behavior of the
-# baremetal compute service, and inform it of the hardware it will contorl.
+# baremetal compute service, and inform it of the hardware it will control.
 #
 # Below that, various functions are defined, which are called by devstack
 # in the following order:
@@ -63,7 +63,7 @@
 
 # sub-driver to use for remote power management
 # - nova.virt.baremetal.fake.FakePowerManager, for manual power control
-# - nova.virt.baremetal.ipmi.Ipmi, for remote IPMI
+# - nova.virt.baremetal.ipmi.IPMI, for remote IPMI
 # - nova.virt.baremetal.tilera_pdu.Pdu, for TilePro hardware
 BM_POWER_MANAGER=${BM_POWER_MANAGER:-nova.virt.baremetal.fake.FakePowerManager}
 
@@ -258,9 +258,10 @@
     nova flavor-create $BM_FLAVOR_NAME $BM_FLAVOR_ID \
             $BM_FLAVOR_RAM $BM_FLAVOR_ROOT_DISK $BM_FLAVOR_CPU
     nova flavor-key $BM_FLAVOR_NAME set \
-            cpu_arch=$BM_FLAVOR_ARCH \
-            deploy_kernel_id=$aki \
-            deploy_ramdisk_id=$ari
+            "cpu_arch"="$BM_FLAVOR_ARCH" \
+            "baremetal:deploy_kernel_id"="$aki" \
+            "baremetal:deploy_ramdisk_id"="$ari"
+
 }
 
 # pull run-time kernel/ramdisk out of disk image and load into glance
@@ -394,7 +395,7 @@
        ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} \
        ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}"
 
-    # override DEFAULT_IMAGE_NAME so that tempest can find the image 
+    # override DEFAULT_IMAGE_NAME so that tempest can find the image
     # that we just uploaded in glance
     DEFAULT_IMAGE_NAME="${IMAGE_NAME%.img}"
 }
diff --git a/lib/ceilometer b/lib/ceilometer
index e890ff9..d90694c 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -64,17 +64,11 @@
     [ ! -d $CEILOMETER_API_LOG_DIR ] &&  sudo mkdir -m 755 -p $CEILOMETER_API_LOG_DIR
     sudo chown $USER $CEILOMETER_API_LOG_DIR
 
-    if is_service_enabled rabbit ; then
-        iniset $CEILOMETER_CONF DEFAULT rpc_backend 'ceilometer.openstack.common.rpc.impl_kombu'
-        iniset $CEILOMETER_CONF DEFAULT rabbit_host $RABBIT_HOST
-        iniset $CEILOMETER_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
-    elif is_service_enabled qpid ; then
-        iniset $CEILOMETER_CONF DEFAULT rpc_backend 'ceilometer.openstack.common.rpc.impl_qpid'
-    fi
+    iniset_rpc_backend ceilometer $CEILOMETER_CONF DEFAULT
 
     iniset $CEILOMETER_CONF DEFAULT notification_topics 'notifications,glance_notifications'
     iniset $CEILOMETER_CONF DEFAULT verbose True
-    iniset $CEILOMETER_CONF DEFAULT sql_connection $BASE_SQL_CONN/nova?charset=utf8
+    iniset $CEILOMETER_CONF DEFAULT `database_connection_url nova`
 
     # Install the policy file for the API server
     cp $CEILOMETER_DIR/etc/ceilometer/policy.json $CEILOMETER_CONF_DIR
@@ -94,9 +88,20 @@
     iniset $CEILOMETER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
     iniset $CEILOMETER_CONF keystone_authtoken signing_dir $CEILOMETER_AUTH_CACHE_DIR
 
+    configure_mongodb
+
     cleanup_ceilometer
 }
 
+function configure_mongodb() {
+    if is_fedora ; then
+        # ensure smallfiles selected to minimize freespace requirements
+        sudo sed -i '/--smallfiles/!s/OPTIONS=\"/OPTIONS=\"--smallfiles /' /etc/sysconfig/mongod
+
+        restart_service mongod
+    fi
+}
+
 # init_ceilometer() - Initialize etc.
 function init_ceilometer() {
     # Create cache dir
diff --git a/lib/cinder b/lib/cinder
index c8291a2..b3e1904 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -50,8 +50,13 @@
     CINDER_BIN_DIR=$(get_python_exec_prefix)
 fi
 
-# Name of the lvm volume group to use/create for iscsi volumes
+# Support for multi lvm backend configuration (default is no support)
+CINDER_MULTI_LVM_BACKEND=$(trueorfalse False $CINDER_MULTI_LVM_BACKEND)
+
+# Name of the lvm volume groups to use/create for iscsi volumes
+# VOLUME_GROUP2 is used only if CINDER_MULTI_LVM_BACKEND = True
 VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
+VOLUME_GROUP2=${VOLUME_GROUP2:-stack-volumes2}
 VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
 
 # _clean_volume_group removes all cinder volumes from the specified volume group
@@ -106,6 +111,9 @@
 
     # Campsite rule: leave behind a volume group at least as clean as we found it
     _clean_volume_group $VOLUME_GROUP $VOLUME_NAME_PREFIX
+    if [ "$CINDER_MULTI_LVM_BACKEND" = "True" ]; then
+        _clean_volume_group $VOLUME_GROUP2 $VOLUME_NAME_PREFIX
+    fi
 }
 
 # configure_cinder() - Set config files, create data dirs, etc
@@ -163,13 +171,22 @@
 
     cp $CINDER_DIR/etc/cinder/cinder.conf.sample $CINDER_CONF
     iniset $CINDER_CONF DEFAULT auth_strategy keystone
+    iniset $CINDER_CONF DEFAULT debug True
     iniset $CINDER_CONF DEFAULT verbose True
-    iniset $CINDER_CONF DEFAULT volume_group $VOLUME_GROUP
-    iniset $CINDER_CONF DEFAULT volume_name_template ${VOLUME_NAME_PREFIX}%s
+    if [ "$CINDER_MULTI_LVM_BACKEND" = "True" ]; then
+        iniset $CINDER_CONF DEFAULT enabled_backends lvmdriver-1,lvmdriver-2
+        iniset $CINDER_CONF lvmdriver-1 volume_group $VOLUME_GROUP
+        iniset $CINDER_CONF lvmdriver-1 volume_driver cinder.volume.drivers.lvm.LVMISCSIDriver
+        iniset $CINDER_CONF lvmdriver-1 volume_backend_name LVM_iSCSI
+        iniset $CINDER_CONF lvmdriver-2 volume_group $VOLUME_GROUP2
+        iniset $CINDER_CONF lvmdriver-2 volume_driver cinder.volume.drivers.lvm.LVMISCSIDriver
+        iniset $CINDER_CONF lvmdriver-2 volume_backend_name LVM_iSCSI
+    else
+        iniset $CINDER_CONF DEFAULT volume_group $VOLUME_GROUP
+        iniset $CINDER_CONF DEFAULT volume_name_template ${VOLUME_NAME_PREFIX}%s
+    fi
     iniset $CINDER_CONF DEFAULT iscsi_helper tgtadm
-    local dburl
-    database_connection_url dburl cinder
-    iniset $CINDER_CONF DEFAULT sql_connection $dburl
+    iniset $CINDER_CONF DEFAULT sql_connection `database_connection_url cinder`
     iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI
     iniset $CINDER_CONF DEFAULT rootwrap_config "$CINDER_CONF_DIR/rootwrap.conf"
     iniset $CINDER_CONF DEFAULT osapi_volume_extension cinder.api.contrib.standard_extensions
@@ -263,12 +280,14 @@
 }
 
 create_cinder_volume_group() {
-    # Configure a default volume group called '`stack-volumes`' for the volume
-    # service if it does not yet exist.  If you don't wish to use a file backed
-    # volume group, create your own volume group called ``stack-volumes`` before
-    # invoking ``stack.sh``.
+    # According to the CINDER_MULTI_LVM_BACKEND value, configure one or two default volumes
+    # group called ``stack-volumes`` (and ``stack-volumes2``) for the volume
+    # service if it (they) does (do) not yet exist. If you don't wish to use a
+    # file backed volume group, create your own volume group called ``stack-volumes``
+    # and ``stack-volumes2`` before invoking ``stack.sh``.
     #
-    # By default, the backing file is 5G in size, and is stored in ``/opt/stack/data``.
+    # By default, the two backing files are 5G in size, and are stored in
+    # ``/opt/stack/data``.
 
     if ! sudo vgs $VOLUME_GROUP; then
         VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DATA_DIR/${VOLUME_GROUP}-backing-file}
@@ -283,6 +302,23 @@
             sudo vgcreate $VOLUME_GROUP $DEV
         fi
     fi
+    if [ "$CINDER_MULTI_LVM_BACKEND" = "True" ]; then
+        #set up the second volume if CINDER_MULTI_LVM_BACKEND is enabled
+
+        if ! sudo vgs $VOLUME_GROUP2; then
+            VOLUME_BACKING_FILE2=${VOLUME_BACKING_FILE2:-$DATA_DIR/${VOLUME_GROUP2}-backing-file}
+
+            # Only create if the file doesn't already exists
+            [[ -f $VOLUME_BACKING_FILE2 ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE2
+
+            DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE2`
+
+            # Only create if the loopback device doesn't contain $VOLUME_GROUP
+            if ! sudo vgs $VOLUME_GROUP2; then
+                sudo vgcreate $VOLUME_GROUP2 $DEV
+            fi
+        fi
+    fi
 
     mkdir -p $CINDER_STATE_PATH/volumes
 }
@@ -314,6 +350,9 @@
             sudo tgtadm --op show --mode target | grep $VOLUME_NAME_PREFIX | grep Target | cut -f3 -d ' ' | sudo xargs -n1 tgt-admin --delete || true
             # Start with a clean volume group
             _clean_volume_group $VOLUME_GROUP $VOLUME_NAME_PREFIX
+            if [ "$CINDER_MULTI_LVM_BACKEND" = "True" ]; then
+                _clean_volume_group $VOLUME_GROUP2 $VOLUME_NAME_PREFIX
+            fi
         fi
     fi
 
diff --git a/lib/database b/lib/database
index 4fba7c2..79b77a2 100644
--- a/lib/database
+++ b/lib/database
@@ -29,20 +29,6 @@
 # Sourcing the database libs sets DATABASE_BACKENDS with the available list
 for f in $TOP_DIR/lib/databases/*; do source $f; done
 
-# If ``DATABASE_TYPE`` is defined here it's because the user has it in ``localrc``
-# or has called ``use_database``.  Both are deprecated so let's fix it up for now.
-if [[ -n $DATABASE_TYPE ]]; then
-    # This is now deprecated usage, set up a warning and try to be
-    # somewhat backward compatible for now.
-    DEPRECATED_TEXT="$DEPRECATED_TEXT\nThe database backend needs to be properly set in ENABLED_SERVICES; DATABASE_TYPE or use_database is deprecated localrc\n"
-    if [[ ! $ENABLED_SERVICES =~ $DATABASE_TYPE ]]; then
-        # It's not in enabled services but user has attempted to select a
-        # database, so just add it now
-        ENABLED_SERVICES+=,$DATABASE_TYPE
-        unset DATABASE_TYPE
-    fi
-fi
-
 # ``DATABASE_BACKENDS`` now contains a list of the supported databases
 # Look in ``ENABLED_SERVICES`` to see if one has been selected
 for db in $DATABASE_BACKENDS; do
@@ -56,6 +42,11 @@
 # This is not an error as multi-node installs will do this on the compute nodes
 
 
+# Get rid of everything enough to cleanly change database backends
+function cleanup_database {
+    cleanup_database_$DATABASE_TYPE
+}
+
 # Set the database type based on the configuration
 function initialize_database_backends {
     for backend in $DATABASE_BACKENDS; do
diff --git a/lib/databases/mysql b/lib/databases/mysql
index 94aedc6..0633ab0 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -10,6 +10,24 @@
 
 register_database mysql
 
+# Get rid of everything enough to cleanly change database backends
+function cleanup_database_mysql {
+    if is_ubuntu; then
+        # Get ruthless with mysql
+        stop_service $MYSQL
+        sudo aptitude purge -y ~nmysql-server
+        sudo rm -rf /var/lib/mysql
+        return
+    elif is_fedora; then
+        MYSQL=mysqld
+    elif is_suse; then
+        MYSQL=mysql
+    else
+        return
+    fi
+    stop_service $MYSQL
+}
+
 function recreate_database_mysql {
     local db=$1
     local charset=$2
@@ -115,9 +133,8 @@
 }
 
 function database_connection_url_mysql {
-    local output=$1
-    local db=$2
-    eval "$output=$BASE_SQL_CONN/$db?charset=utf8"
+    local db=$1
+    echo "$BASE_SQL_CONN/$db?charset=utf8"
 }
 
 # Restore xtrace
diff --git a/lib/databases/postgresql b/lib/databases/postgresql
index 2c37f49..efc206f 100644
--- a/lib/databases/postgresql
+++ b/lib/databases/postgresql
@@ -10,6 +10,20 @@
 
 register_database postgresql
 
+# Get rid of everything enough to cleanly change database backends
+function cleanup_database_postgresql {
+    stop_service postgresql
+    if is_ubuntu; then
+        # Get ruthless with mysql
+        sudo aptitude purge -y  ~npostgresql
+        return
+    elif is_fedora; then
+        uninstall_package postgresql-server
+    else
+        return
+    fi
+}
+
 function recreate_database_postgresql {
     local db=$1
     local charset=$2
@@ -70,9 +84,8 @@
 }
 
 function database_connection_url_postgresql {
-    local output=$1
-    local db=$2
-    eval "$output=$BASE_SQL_CONN/$db?client_encoding=utf8"
+    local db=$1
+    echo "$BASE_SQL_CONN/$db?client_encoding=utf8"
 }
 
 # Restore xtrace
diff --git a/lib/glance b/lib/glance
index 80d3902..edf6982 100644
--- a/lib/glance
+++ b/lib/glance
@@ -59,8 +59,7 @@
 function cleanup_glance() {
     # kill instances (nova)
     # delete image files (glance)
-    # This function intentionally left blank
-    :
+    sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR $GLANCE_AUTH_CACHE_DIR
 }
 
 # configure_glanceclient() - Set config files, create data dirs, etc
@@ -81,8 +80,7 @@
     cp $GLANCE_DIR/etc/glance-registry.conf $GLANCE_REGISTRY_CONF
     iniset $GLANCE_REGISTRY_CONF DEFAULT debug True
     inicomment $GLANCE_REGISTRY_CONF DEFAULT log_file
-    local dburl
-    database_connection_url dburl glance
+    local dburl=`database_connection_url glance`
     iniset $GLANCE_REGISTRY_CONF DEFAULT sql_connection $dburl
     iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG
     iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone
@@ -114,9 +112,8 @@
         iniset $GLANCE_API_CONF DEFAULT notifier_strategy qpid
     elif [ -n "$RABBIT_HOST" ] &&  [ -n "$RABBIT_PASSWORD" ]; then
         iniset $GLANCE_API_CONF DEFAULT notifier_strategy rabbit
-        iniset $GLANCE_API_CONF DEFAULT rabbit_host $RABBIT_HOST
-        iniset $GLANCE_API_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
     fi
+    iniset_rpc_backend glance $GLANCE_API_CONF DEFAULT
     iniset $GLANCE_API_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/api
 
     cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI
@@ -187,8 +184,7 @@
     screen_it g-api "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf"
     echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..."
     if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then
-      echo "g-api did not start"
-      exit 1
+      die $LINENO "g-api did not start"
     fi
 }
 
diff --git a/lib/heat b/lib/heat
index 5b8b360..56d6f39 100644
--- a/lib/heat
+++ b/lib/heat
@@ -117,9 +117,7 @@
     iniset $HEAT_ENGINE_CONF DEFAULT heat_metadata_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT
     iniset $HEAT_ENGINE_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1/waitcondition
     iniset $HEAT_ENGINE_CONF DEFAULT heat_watch_server_url http://$HEAT_API_CW_HOST:$HEAT_API_CW_PORT
-    local dburl
-    database_connection_url dburl heat
-    iniset $HEAT_ENGINE_CONF DEFAULT sql_connection $dburl
+    iniset $HEAT_ENGINE_CONF DEFAULT sql_connection `database_connection_url heat`
     iniset $HEAT_ENGINE_CONF DEFAULT auth_encryption_key `hexdump -n 16 -v -e '/1 "%02x"' /dev/random`
 
     iniset_rpc_backend heat $HEAT_ENGINE_CONF DEFAULT
diff --git a/lib/horizon b/lib/horizon
index 9180370..9c96b58 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -34,6 +34,24 @@
 APACHE_USER=${APACHE_USER:-$USER}
 APACHE_GROUP=${APACHE_GROUP:-$(id -gn $APACHE_USER)}
 
+# utility method of setting python option
+function _horizon_config_set() {
+    local file=$1
+    local section=$2
+    local option=$3
+    local value=$4
+
+    if grep -q "^$section" $file; then
+        line=$(sed -ne "/^$section/,/^}/ { /^ *'$option':/ p; }" $file)
+        if [ -n "$line" ]; then
+            sed -i -e "/^$section/,/^}/ s/^\( *'$option'\) *:.*$/\1: $value,/" $file
+        else
+            sed -i -e "/^$section/ a\n    '$option': $value,\n" $file
+        fi
+    else
+        echo -e "\n\n$section = {\n    '$option': $value,\n}" >> $file
+    fi
+}
 
 # Entry Points
 # ------------
@@ -61,6 +79,11 @@
     local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py
     cp $FILES/horizon_settings.py $local_settings
 
+    # enable loadbalancer dashboard in case service is enabled
+    if is_service_enabled q-lbaas; then
+        _horizon_config_set $local_settings OPENSTACK_QUANTUM_NETWORK enable_lb True
+    fi
+
     # Initialize the horizon database (it stores sessions and notices shown to
     # users).  The user system is external (keystone).
     cd $HORIZON_DIR
diff --git a/lib/keystone b/lib/keystone
index a1a57f8..17e0866 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -31,6 +31,7 @@
 KEYSTONE_DIR=$DEST/keystone
 KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone}
 KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf
+KEYSTONE_PASTE_INI=${KEYSTONE_PASTE_INI:-$KEYSTONE_CONF_DIR/keystone-paste.ini}
 KEYSTONE_AUTH_CACHE_DIR=${KEYSTONE_AUTH_CACHE_DIR:-/var/cache/keystone}
 
 KEYSTONECLIENT_DIR=$DEST/python-keystoneclient
@@ -88,11 +89,18 @@
     if [[ "$KEYSTONE_CONF_DIR" != "$KEYSTONE_DIR/etc" ]]; then
         cp -p $KEYSTONE_DIR/etc/keystone.conf.sample $KEYSTONE_CONF
         cp -p $KEYSTONE_DIR/etc/policy.json $KEYSTONE_CONF_DIR
+        if [[ -f "$KEYSTONE_DIR/etc/keystone-paste.ini" ]]; then
+            cp -p "$KEYSTONE_DIR/etc/keystone-paste.ini" "$KEYSTONE_PASTE_INI"
+        fi
+    fi
+    if [[ -f "$KEYSTONE_PASTE_INI" ]]; then
+        iniset "$KEYSTONE_CONF" paste_deploy config_file "$KEYSTONE_PASTE_INI"
+    else
+        # compatibility with mixed cfg and paste.deploy configuration
+        KEYSTONE_PASTE_INI="$KEYSTONE_CONF"
     fi
 
     # Rewrite stock ``keystone.conf``
-    local dburl
-    database_connection_url dburl keystone
 
     if is_service_enabled ldap; then
         #Set all needed ldap values
@@ -123,14 +131,8 @@
 
     iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN"
     iniset $KEYSTONE_CONF signing token_format "$KEYSTONE_TOKEN_FORMAT"
-    iniset $KEYSTONE_CONF sql connection $dburl
+    iniset $KEYSTONE_CONF sql connection `database_connection_url keystone`
     iniset $KEYSTONE_CONF ec2 driver "keystone.contrib.ec2.backends.sql.Ec2"
-    sed -e "
-        /^pipeline.*ec2_extension crud_/s|ec2_extension crud_extension|ec2_extension s3_extension crud_extension|;
-    " -i $KEYSTONE_CONF
-
-    # Append the S3 bits
-    iniset $KEYSTONE_CONF filter:s3_extension paste.filter_factory "keystone.contrib.s3:S3Extension.factory"
 
     if [[ "$KEYSTONE_TOKEN_BACKEND" = "sql" ]]; then
         iniset $KEYSTONE_CONF token driver keystone.token.backends.sql.Token
@@ -146,7 +148,7 @@
         cp -p $FILES/default_catalog.templates $KEYSTONE_CATALOG
 
         # Add swift endpoints to service catalog if swift is enabled
-        if is_service_enabled swift; then
+        if is_service_enabled s-proxy; then
             echo "catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
             echo "catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:8080/" >> $KEYSTONE_CATALOG
             echo "catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
@@ -323,8 +325,7 @@
     screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug"
     echo "Waiting for keystone to start..."
     if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s http://$SERVICE_HOST:$service_port/v2.0/ >/dev/null; do sleep 1; done"; then
-      echo "keystone did not start"
-      exit 1
+      die $LINENO "keystone did not start"
     fi
 
     # Start proxies if enabled
diff --git a/lib/ldap b/lib/ldap
index 5cb4534..0a0d197 100644
--- a/lib/ldap
+++ b/lib/ldap
@@ -16,13 +16,11 @@
     echo "os_VENDOR is $os_VENDOR"
     printf "installing"
     if is_ubuntu; then
-        echo "os vendor is Ubuntu"
         LDAP_OLCDB_NUMBER=1
         LDAP_ROOTPW_COMMAND=replace
         sudo DEBIAN_FRONTEND=noninteractive apt-get install slapd ldap-utils
         #automatically starts LDAP on ubuntu so no need to call start_ldap
-    elif is_fedora; then
-        echo "os vendor is Fedora"
+    elif is_fedora || is_suse; then
         LDAP_OLCDB_NUMBER=2
         LDAP_ROOTPW_COMMAND=add
         start_ldap
diff --git a/lib/nova b/lib/nova
index 849ec57..23346b7 100644
--- a/lib/nova
+++ b/lib/nova
@@ -106,6 +106,8 @@
         # Clean out the instances directory.
         sudo rm -rf $NOVA_INSTANCES_PATH/*
     fi
+
+    sudo rm -rf $NOVA_STATE_PATH $NOVA_AUTH_CACHE_DIR
 }
 
 # configure_novaclient() - Set config files, create data dirs, etc
@@ -308,9 +310,6 @@
                 sudo chown -R $STACK_USER $NOVA_INSTANCES_PATH
             fi
         fi
-
-        # Clean up old instances
-        cleanup_nova
     fi
 }
 
@@ -371,18 +370,15 @@
     iniset $NOVA_CONF DEFAULT compute_scheduler_driver "$SCHEDULER"
     iniset $NOVA_CONF DEFAULT dhcpbridge_flagfile "$NOVA_CONF"
     iniset $NOVA_CONF DEFAULT force_dhcp_release "True"
-    iniset $NOVA_CONF DEFAULT fixed_range "$FIXED_RANGE"
+    iniset $NOVA_CONF DEFAULT fixed_range ""
     iniset $NOVA_CONF DEFAULT default_floating_pool "$PUBLIC_NETWORK_NAME"
     iniset $NOVA_CONF DEFAULT s3_host "$SERVICE_HOST"
     iniset $NOVA_CONF DEFAULT s3_port "$S3_SERVICE_PORT"
     iniset $NOVA_CONF DEFAULT osapi_compute_extension "nova.api.openstack.compute.contrib.standard_extensions"
     iniset $NOVA_CONF DEFAULT my_ip "$HOST_IP"
-    local dburl
-    database_connection_url dburl nova
-    iniset $NOVA_CONF DEFAULT sql_connection "$dburl"
+    iniset $NOVA_CONF DEFAULT sql_connection `database_connection_url nova`
     if is_baremetal; then
-        database_connection_url dburl nova_bm
-        iniset $NOVA_CONF baremetal sql_connection $dburl
+        iniset $NOVA_CONF baremetal sql_connection `database_connection_url nova_bm`
     fi
     iniset $NOVA_CONF DEFAULT libvirt_type "$LIBVIRT_TYPE"
     iniset $NOVA_CONF DEFAULT libvirt_cpu_mode "none"
@@ -428,8 +424,7 @@
     if is_service_enabled ceilometer; then
         iniset $NOVA_CONF DEFAULT instance_usage_audit "True"
         iniset $NOVA_CONF DEFAULT instance_usage_audit_period "hour"
-        iniset $NOVA_CONF DEFAULT notification_driver "nova.openstack.common.notifier.rpc_notifier"
-        iniset $NOVA_CONF DEFAULT notification_driver "ceilometer.compute.nova_notifier"
+        iniset_multiline $NOVA_CONF DEFAULT notification_driver "nova.openstack.common.notifier.rpc_notifier" "ceilometer.compute.nova_notifier"
     fi
 
 
@@ -542,8 +537,7 @@
     screen_it n-api "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api"
     echo "Waiting for nova-api to start..."
     if ! wait_for_service $SERVICE_TIMEOUT http://$SERVICE_HOST:$service_port; then
-      echo "nova-api did not start"
-      exit 1
+      die $LINENO "nova-api did not start"
     fi
 
     # Start proxies if enabled
diff --git a/lib/quantum b/lib/quantum
index 3466162..7b31323 100644
--- a/lib/quantum
+++ b/lib/quantum
@@ -89,7 +89,7 @@
 # Meta data IP
 Q_META_DATA_IP=${Q_META_DATA_IP:-$HOST_IP}
 # Allow Overlapping IP among subnets
-Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-False}
+Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True}
 # Use quantum-debug command
 Q_USE_DEBUG_COMMAND=${Q_USE_DEBUG_COMMAND:-False}
 # The name of the default q-l3 router
@@ -176,6 +176,11 @@
 # Please refer to lib/quantum_plugins/README.md for details.
 source $TOP_DIR/lib/quantum_plugins/$Q_PLUGIN
 
+# Agent loadbalancer service plugin functions
+# -------------------------------------------
+# Hardcoding for 1 service plugin for now
+source $TOP_DIR/lib/quantum_plugins/agent_loadbalancer
+
 # Entry Points
 # ------------
 
@@ -185,6 +190,10 @@
     _configure_quantum_common
     iniset_rpc_backend quantum $QUANTUM_CONF DEFAULT
 
+    # goes before q-svc to init Q_SERVICE_PLUGIN_CLASSES
+    if is_service_enabled q-lbaas; then
+        _configure_quantum_lbaas
+    fi
     if is_service_enabled q-svc; then
         _configure_quantum_service
     fi
@@ -202,8 +211,6 @@
     fi
 
     _configure_quantum_debug_command
-
-    _cleanup_quantum
 }
 
 function create_nova_conf_quantum() {
@@ -351,18 +358,21 @@
     screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE"
     echo "Waiting for Quantum to start..."
     if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://$Q_HOST:$Q_PORT; do sleep 1; done"; then
-      echo "Quantum did not start"
-      exit 1
+      die $LINENO "Quantum did not start"
     fi
 }
 
 # Start running processes, including screen
 function start_quantum_agents() {
     # Start up the quantum agents if enabled
-    screen_it q-agt "python $AGENT_BINARY --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE"
-    screen_it q-dhcp "python $AGENT_DHCP_BINARY --config-file $QUANTUM_CONF --config-file=$Q_DHCP_CONF_FILE"
-    screen_it q-meta "python $AGENT_META_BINARY --config-file $QUANTUM_CONF --config-file=$Q_META_CONF_FILE"
-    screen_it q-l3 "python $AGENT_L3_BINARY --config-file $QUANTUM_CONF --config-file=$Q_L3_CONF_FILE"
+    screen_it q-agt "cd $QUANTUM_DIR && python $AGENT_BINARY --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE"
+    screen_it q-dhcp "cd $QUANTUM_DIR && python $AGENT_DHCP_BINARY --config-file $QUANTUM_CONF --config-file=$Q_DHCP_CONF_FILE"
+    screen_it q-l3 "cd $QUANTUM_DIR && python $AGENT_L3_BINARY --config-file $QUANTUM_CONF --config-file=$Q_L3_CONF_FILE"
+    screen_it q-meta "cd $QUANTUM_DIR && python $AGENT_META_BINARY --config-file $QUANTUM_CONF --config-file=$Q_META_CONF_FILE"
+
+    if is_service_enabled q-lbaas; then
+        screen_it q-lbaas "cd $QUANTUM_DIR && python $AGENT_LBAAS_BINARY --config-file $QUANTUM_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME"
+    fi
 }
 
 # stop_quantum() - Stop running processes (non-screen)
@@ -373,9 +383,9 @@
     fi
 }
 
-# _cleanup_quantum() - Remove residual data files, anything left over from previous
+# cleanup_quantum() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
-function _cleanup_quantum() {
+function cleanup_quantum() {
     :
 }
 
@@ -396,8 +406,7 @@
     quantum_plugin_configure_common
 
     if [[ $Q_PLUGIN_CONF_PATH == '' || $Q_PLUGIN_CONF_FILENAME == '' || $Q_PLUGIN_CLASS == '' ]]; then
-        echo "Quantum plugin not set.. exiting"
-        exit 1
+        die $LINENO "Quantum plugin not set.. exiting"
     fi
 
     # If needed, move config file from ``$QUANTUM_DIR/etc/quantum`` to ``QUANTUM_CONF_DIR``
@@ -405,9 +414,8 @@
     Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME
     cp $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE
 
-    database_connection_url dburl $Q_DB_NAME
-    iniset /$Q_PLUGIN_CONF_FILE DATABASE sql_connection $dburl
-    unset dburl
+    iniset /$Q_PLUGIN_CONF_FILE DATABASE sql_connection `database_connection_url $Q_DB_NAME`
+    iniset $QUANTUM_CONF DEFAULT state_path $DATA_DIR/quantum
 
     _quantum_setup_rootwrap
 }
@@ -485,6 +493,13 @@
     _quantum_setup_keystone $Q_META_CONF_FILE DEFAULT set_auth_url
 }
 
+function _configure_quantum_lbaas()
+{
+    quantum_agent_lbaas_install_agent_packages
+    quantum_agent_lbaas_configure_common
+    quantum_agent_lbaas_configure_agent
+}
+
 # _configure_quantum_plugin_agent() - Set config files for quantum plugin agent
 # It is called when q-agt is enabled.
 function _configure_quantum_plugin_agent() {
@@ -508,16 +523,19 @@
     if is_service_enabled $DATABASE_BACKENDS; then
         recreate_database $Q_DB_NAME utf8
     else
-        echo "A database must be enabled in order to use the $Q_PLUGIN Quantum plugin."
-        exit 1
+        die $LINENO "A database must be enabled in order to use the $Q_PLUGIN Quantum plugin."
     fi
 
     # Update either configuration file with plugin
     iniset $QUANTUM_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS
 
+    if [[ $Q_SERVICE_PLUGIN_CLASSES != '' ]]; then
+        iniset $QUANTUM_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES
+    fi
+
     iniset $QUANTUM_CONF DEFAULT verbose True
     iniset $QUANTUM_CONF DEFAULT debug True
-    iniset $QUANTUM_CONF DEFAULT state_path $DATA_DIR/quantum
+    iniset $QUANTUM_CONF DEFAULT policy_file $Q_POLICY_FILE
     iniset $QUANTUM_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP
 
     iniset $QUANTUM_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY
@@ -659,11 +677,10 @@
     fi
     if ! timeout $timeout_sec sh -c "$check_command"; then
         if [[ "$expected" = "True" ]]; then
-            echo "[Fail] Couldn't ping server"
+            die $LINENO "[Fail] Couldn't ping server"
         else
-            echo "[Fail] Could ping server"
+            die $LINENO "[Fail] Could ping server"
         fi
-        exit 1
     fi
 }
 
@@ -677,8 +694,7 @@
     local probe_cmd = ""
     probe_cmd=`_get_probe_cmd_prefix $from_net`
     if ! timeout $timeout_sec sh -c "while ! $probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success ; do sleep 1; done"; then
-        echo "server didn't become ssh-able!"
-        exit 1
+        die $LINENO "server didn't become ssh-able!"
     fi
 }
 
diff --git a/lib/quantum_plugins/README.md b/lib/quantum_plugins/README.md
index a66d35a..5411de0 100644
--- a/lib/quantum_plugins/README.md
+++ b/lib/quantum_plugins/README.md
@@ -18,7 +18,7 @@
 * ``quantum_plugin_create_nova_conf`` :
   set ``NOVA_VIF_DRIVER`` and optionally set options in nova_conf
   e.g.
-  NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"}
+  NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
 * ``quantum_plugin_install_agent_packages`` :
   install packages that is specific to plugin agent
   e.g.
diff --git a/lib/quantum_plugins/agent_loadbalancer b/lib/quantum_plugins/agent_loadbalancer
new file mode 100644
index 0000000..87e7aaa
--- /dev/null
+++ b/lib/quantum_plugins/agent_loadbalancer
@@ -0,0 +1,48 @@
+# Quantum loadbalancer plugin
+# ---------------------------
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+AGENT_LBAAS_BINARY="$QUANTUM_DIR/bin/quantum-lbaas-agent"
+
+function quantum_agent_lbaas_install_agent_packages() {
+    if is_ubuntu || is_fedora; then
+        install_package haproxy
+    elif is_suse; then
+        ### FIXME: Find out if package can be pushed to Factory
+        echo "HAProxy packages can be installed from server:http project in OBS"
+    fi
+}
+
+function quantum_agent_lbaas_configure_common() {
+    if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then
+        Q_SERVICE_PLUGIN_CLASSES="quantum.plugins.services.agent_loadbalancer.plugin.LoadBalancerPlugin"
+    else
+        Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,quantum.plugins.services.agent_loadbalancer.plugin.LoadBalancerPlugin"
+    fi
+}
+
+function quantum_agent_lbaas_configure_agent() {
+    LBAAS_AGENT_CONF_PATH=/etc/quantum/plugins/services/agent_loadbalancer
+    mkdir -p $LBAAS_AGENT_CONF_PATH
+
+    LBAAS_AGENT_CONF_FILENAME="$LBAAS_AGENT_CONF_PATH/lbaas_agent.ini"
+
+    cp $QUANTUM_DIR/etc/lbaas_agent.ini /$LBAAS_AGENT_CONF_FILENAME
+
+    if [[ $Q_PLUGIN == 'linuxbridge' || $Q_PLUGIN == 'brocade' ]]; then
+        iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT interface_driver "quantum.agent.linux.interface.BridgeInterfaceDriver"
+    else
+        iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT interface_driver "quantum.agent.linux.interface.OVSInterfaceDriver"
+    fi
+
+    if is_fedora; then
+        iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT user_group "nobody"
+    fi
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/quantum_plugins/bigswitch_floodlight b/lib/quantum_plugins/bigswitch_floodlight
index 2c928be..7d3fd96 100644
--- a/lib/quantum_plugins/bigswitch_floodlight
+++ b/lib/quantum_plugins/bigswitch_floodlight
@@ -9,7 +9,7 @@
 source $TOP_DIR/lib/quantum_thirdparty/bigswitch_floodlight     # for third party service specific configuration values
 
 function quantum_plugin_create_nova_conf() {
-    NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"}
+    NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
 }
 
 function quantum_plugin_install_agent_packages() {
diff --git a/lib/quantum_plugins/brocade b/lib/quantum_plugins/brocade
index c372c19..ac91143 100644
--- a/lib/quantum_plugins/brocade
+++ b/lib/quantum_plugins/brocade
@@ -10,7 +10,7 @@
 }
 
 function quantum_plugin_create_nova_conf() {
-    NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver"}
+    NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
 }
 
 function quantum_plugin_install_agent_packages() {
diff --git a/lib/quantum_plugins/linuxbridge b/lib/quantum_plugins/linuxbridge
index 6d5d4e0..11bc585 100644
--- a/lib/quantum_plugins/linuxbridge
+++ b/lib/quantum_plugins/linuxbridge
@@ -11,7 +11,7 @@
 }
 
 function quantum_plugin_create_nova_conf() {
-    NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.QuantumLinuxBridgeVIFDriver"}
+    NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
 }
 
 function quantum_plugin_install_agent_packages() {
@@ -30,11 +30,12 @@
 }
 
 function quantum_plugin_configure_dhcp_agent() {
-    :
+    iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager quantum.agent.dhcp_agent.DhcpAgentWithStateReport
 }
 
 function quantum_plugin_configure_l3_agent() {
     iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge
+    iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager quantum.agent.l3_agent.L3NATAgentWithStateReport
 }
 
 function quantum_plugin_configure_plugin_agent() {
diff --git a/lib/quantum_plugins/nicira b/lib/quantum_plugins/nicira
index bc9a36f..8c150b1 100644
--- a/lib/quantum_plugins/nicira
+++ b/lib/quantum_plugins/nicira
@@ -19,8 +19,7 @@
         conn=(${NVP_CONTROLLER_CONNECTION//\:/ })
         OVS_MGR_IP=${conn[0]}
     else
-        echo "Error - No controller specified. Unable to set a manager for OVS"
-        exit 1
+        die $LINENO "Error - No controller specified. Unable to set a manager for OVS"
     fi
     sudo ovs-vsctl set-manager ssl:$OVS_MGR_IP
 }
@@ -63,14 +62,12 @@
 
 function quantum_plugin_configure_l3_agent() {
    # Nicira plugin does not run L3 agent
-   echo "ERROR - q-l3 should must not be executed with Nicira plugin!"
-   exit 1
+   die $LINENO "q-l3 should must not be executed with Nicira plugin!"
 }
 
 function quantum_plugin_configure_plugin_agent() {
    # Nicira plugin does not run L2 agent
-   echo "ERROR - q-agt must not be executed with Nicira plugin!"
-   exit 1
+   die $LINENO "q-agt must not be executed with Nicira plugin!"
 }
 
 function quantum_plugin_configure_service() {
@@ -93,8 +90,7 @@
         if [[ "$DEFAULT_TZ_UUID" != "" ]]; then
             iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" default_tz_uuid $DEFAULT_TZ_UUID
         else
-            echo "ERROR - The nicira plugin won't work without a default transport zone."
-            exit 1
+            die $LINENO "The nicira plugin won't work without a default transport zone."
         fi
         if [[ "$DEFAULT_L3_GW_SVC_UUID" != "" ]]; then
             iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" default_l3_gw_service_uuid $DEFAULT_L3_GW_SVC_UUID
@@ -114,8 +110,7 @@
             # Only 1 controller can be specified in this case
             iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" nvp_controller_connection $NVP_CONTROLLER_CONNECTION
         else
-            echo "ERROR - The nicira plugin needs at least an NVP controller."
-            exit 1
+            die $LINENO "The nicira plugin needs at least an NVP controller."
         fi
         if [[ "$NVP_USER" != "" ]]; then
             iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" nvp_user $NVP_USER
diff --git a/lib/quantum_plugins/openvswitch b/lib/quantum_plugins/openvswitch
index 726c6c3..dda1239 100644
--- a/lib/quantum_plugins/openvswitch
+++ b/lib/quantum_plugins/openvswitch
@@ -8,7 +8,7 @@
 source $TOP_DIR/lib/quantum_plugins/ovs_base
 
 function quantum_plugin_create_nova_conf() {
-    NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver"}
+    NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
     if [ "$VIRT_DRIVER" = 'xenserver' ]; then
         iniset $NOVA_CONF DEFAULT xenapi_vif_driver nova.virt.xenapi.vif.XenAPIOpenVswitchDriver
         iniset $NOVA_CONF DEFAULT xenapi_ovs_integration_bridge $FLAT_NETWORK_BRIDGE
@@ -36,6 +36,7 @@
 
 function quantum_plugin_configure_l3_agent() {
     _quantum_ovs_base_configure_l3_agent
+    iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager quantum.agent.l3_agent.L3NATAgentWithStateReport
 }
 
 function quantum_plugin_configure_plugin_agent() {
@@ -49,9 +50,7 @@
         # REVISIT - also check kernel module support for GRE and patch ports
         OVS_VERSION=`ovs-vsctl --version | head -n 1 | awk '{print $4;}'`
         if [ $OVS_VERSION \< "1.4" ] && ! is_service_enabled q-svc ; then
-            echo "You are running OVS version $OVS_VERSION."
-            echo "OVS 1.4+ is required for tunneling between multiple hosts."
-            exit 1
+            die $LINENO "You are running OVS version $OVS_VERSION. OVS 1.4+ is required for tunneling between multiple hosts."
         fi
         iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True
         iniset /$Q_PLUGIN_CONF_FILE OVS local_ip $HOST_IP
diff --git a/lib/quantum_plugins/ovs_base b/lib/quantum_plugins/ovs_base
index 8563674..ab988d9 100644
--- a/lib/quantum_plugins/ovs_base
+++ b/lib/quantum_plugins/ovs_base
@@ -24,10 +24,13 @@
     if is_ubuntu; then
         kernel_version=`cat /proc/version | cut -d " " -f3`
         install_package make fakeroot dkms openvswitch-switch openvswitch-datapath-dkms linux-headers-$kernel_version
-    else
-        ### FIXME(dtroyer): Find RPMs for OpenVSwitch
-        echo "OpenVSwitch packages need to be located"
-        # Fedora does not started OVS by default
+    elif is_fedora; then
+        install_package openvswitch
+        # Ensure that the service is started
+        restart_service openvswitch
+    elif is_suse; then
+        ### FIXME: Find out if package can be pushed to Factory
+        echo "OpenVSwitch packages can be installed from Cloud:OpenStack:Master in OBS"
         restart_service openvswitch
     fi
 }
diff --git a/lib/quantum_plugins/plumgrid b/lib/quantum_plugins/plumgrid
new file mode 100644
index 0000000..b49aa92
--- /dev/null
+++ b/lib/quantum_plugins/plumgrid
@@ -0,0 +1,37 @@
+# PLUMgrid Quantum Plugin
+# Edgar Magana emagana@plumgrid.com
+# ------------------------------------
+
+# Save trace settings
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+#source $TOP_DIR/lib/quantum_plugins/ovs_base
+
+function quantum_plugin_create_nova_conf() {
+
+    NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+}
+
+function quantum_plugin_setup_interface_driver() {
+    :
+}
+
+function quantum_plugin_configure_common() {
+    Q_PLUGIN_CONF_PATH=etc/quantum/plugins/plumgrid
+    Q_PLUGIN_CONF_FILENAME=plumgrid.ini
+    Q_DB_NAME="plumgrid_quantum"
+    Q_PLUGIN_CLASS="quantum.plugins.plumgrid.plumgrid_nos_plugin.plumgrid_plugin.QuantumPluginPLUMgridV2"
+}
+
+function quantum_plugin_configure_service() {
+    iniset /$Q_PLUGIN_CONF_FILE PLUMgridNOS nos_server localhost
+    iniset /$Q_PLUGIN_CONF_FILE PLUMgridNOS nos_server_port 7766
+}
+
+function quantum_plugin_configure_debug_command() {
+    :
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/quantum_plugins/ryu b/lib/quantum_plugins/ryu
index 2dfd4f7..d1d7382 100644
--- a/lib/quantum_plugins/ryu
+++ b/lib/quantum_plugins/ryu
@@ -17,7 +17,9 @@
     _quantum_ovs_base_install_agent_packages
 
     # quantum_ryu_agent requires ryu module
+    install_package $(get_packages "ryu")
     install_ryu
+    configure_ryu
 }
 
 function quantum_plugin_configure_common() {
diff --git a/lib/quantum_thirdparty/ryu b/lib/quantum_thirdparty/ryu
index 7a01923..f1e9e7c 100644
--- a/lib/quantum_thirdparty/ryu
+++ b/lib/quantum_thirdparty/ryu
@@ -17,24 +17,15 @@
 RYU_OFP_PORT=${RYU_OFP_PORT:-6633}
 # Ryu Applications
 RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest}
-# Ryu configuration
-RYU_CONF_CONTENTS=${RYU_CONF_CONTENTS:-"
---app_lists=$RYU_APPS
---wsapi_host=$RYU_API_HOST
---wsapi_port=$RYU_API_PORT
---ofp_listen_host=$RYU_OFP_HOST
---ofp_tcp_listen_port=$RYU_OFP_PORT
---quantum_url=http://$Q_HOST:$Q_PORT
---quantum_admin_username=$Q_ADMIN_USERNAME
---quantum_admin_password=$SERVICE_PASSWORD
---quantum_admin_tenant_name=$SERVICE_TENANT_NAME
---quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0
---quantum_auth_strategy=$Q_AUTH_STRATEGY
---quantum_controller_addr=tcp:$RYU_OFP_HOST:$RYU_OFP_PORT
-"}
 
+# configure_ryu can be called multiple times as quantum_pluing/ryu may call
+# this function for quantum-ryu-agent
+_RYU_CONFIGURED=${_RYU_CONFIGURED:-False}
 function configure_ryu() {
-    setup_develop $RYU_DIR
+    if [[ "$_RYU_CONFIGURED" == "False" ]]; then
+        setup_develop $RYU_DIR
+        _RYU_CONFIGURED=True
+    fi
 }
 
 function init_ryu() {
@@ -46,6 +37,21 @@
     RYU_CONF=$RYU_CONF_DIR/ryu.conf
     sudo rm -rf $RYU_CONF
 
+    # Ryu configuration
+    RYU_CONF_CONTENTS=${RYU_CONF_CONTENTS:-"[DEFAULT]
+app_lists=$RYU_APPS
+wsapi_host=$RYU_API_HOST
+wsapi_port=$RYU_API_PORT
+ofp_listen_host=$RYU_OFP_HOST
+ofp_tcp_listen_port=$RYU_OFP_PORT
+quantum_url=http://$Q_HOST:$Q_PORT
+quantum_admin_username=$Q_ADMIN_USERNAME
+quantum_admin_password=$SERVICE_PASSWORD
+quantum_admin_tenant_name=$SERVICE_TENANT_NAME
+quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0
+quantum_auth_strategy=$Q_AUTH_STRATEGY
+quantum_controller_addr=tcp:$RYU_OFP_HOST:$RYU_OFP_PORT
+"}
     echo "${RYU_CONF_CONTENTS}" > $RYU_CONF
 }
 
@@ -62,7 +68,7 @@
 }
 
 function start_ryu() {
-    screen_it ryu "cd $RYU_DIR && $RYU_DIR/bin/ryu-manager --flagfile $RYU_CONF"
+    screen_it ryu "cd $RYU_DIR && $RYU_DIR/bin/ryu-manager --config-file $RYU_CONF"
 }
 
 function stop_ryu() {
diff --git a/lib/rpc_backend b/lib/rpc_backend
index f35f9db..bbd51f0 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -39,8 +39,39 @@
     fi
 
     if is_service_enabled qpid && ! qpid_is_supported; then
-        echo "Qpid support is not available for this version of your distribution."
-        exit 1
+        die $LINENO "Qpid support is not available for this version of your distribution."
+    fi
+}
+
+# clean up after rpc backend - eradicate all traces so changing backends
+# produces a clean switch
+function cleanup_rpc_backend {
+    if is_service_enabled rabbit; then
+        # Obliterate rabbitmq-server
+        uninstall_package rabbitmq-server
+        sudo killall epmd
+        if is_ubuntu; then
+            # And the Erlang runtime too
+            sudo aptitude purge -y ~nerlang
+        fi
+    elif is_service_enabled qpid; then
+        if is_fedora; then
+            uninstall_package qpid-cpp-server-daemon
+        elif is_ubuntu; then
+            uninstall_package qpidd
+        else
+            exit_distro_not_supported "qpid installation"
+        fi
+    elif is_service_enabled zeromq; then
+        if is_fedora; then
+            uninstall_package zeromq python-zmq
+        elif is_ubuntu; then
+            uninstall_package libzmq1 python-zmq
+        elif is_suse; then
+            uninstall_package libzmq1 python-pyzmq
+        else
+            exit_distro_not_supported "zeromq installation"
+        fi
     fi
 }
 
@@ -58,6 +89,8 @@
             install_package qpid-cpp-server-daemon
         elif is_ubuntu; then
             install_package qpidd
+            sudo sed -i '/PLAIN/!s/mech_list: /mech_list: PLAIN /' /etc/sasl2/qpidd.conf
+            sudo chmod o+r /etc/qpid/qpidd.sasldb
         else
             exit_distro_not_supported "qpid installation"
         fi
@@ -100,6 +133,11 @@
         iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_zmq
     elif is_service_enabled qpid; then
         iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_qpid
+        if is_ubuntu; then
+            QPID_PASSWORD=`sudo strings /etc/qpid/qpidd.sasldb | grep -B1 admin | head -1`
+            iniset $file $section qpid_password $QPID_PASSWORD
+            iniset $file $section qpid_username admin
+        fi
     elif is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then
         iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_kombu
         iniset $file $section rabbit_host $RABBIT_HOST
diff --git a/lib/swift b/lib/swift
index 5ba7e56..2c87d21 100644
--- a/lib/swift
+++ b/lib/swift
@@ -35,9 +35,10 @@
 # Default is the common DevStack data directory.
 SWIFT_DATA_DIR=${SWIFT_DATA_DIR:-${DATA_DIR}/swift}
 
-# Set ``SWIFT_CONFIG_DIR`` to the location of the configuration files.
+# Set ``SWIFT_CONF_DIR`` to the location of the configuration files.
 # Default is ``/etc/swift``.
-SWIFT_CONFIG_DIR=${SWIFT_CONFIG_DIR:-/etc/swift}
+# TODO(dtroyer): remove SWIFT_CONFIG_DIR after cutting stable/grizzly
+SWIFT_CONF_DIR=${SWIFT_CONF_DIR:-${SWIFT_CONFIG_DIR:-/etc/swift}}
 
 # DevStack will create a loop-back disk formatted as XFS to store the
 # swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in
@@ -45,6 +46,10 @@
 # Default is 1 gigabyte.
 SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000}
 
+# Set ``SWIFT_EXTRAS_MIDDLEWARE`` to extras middlewares.
+# Default is ``staticweb, tempurl, bulk, formpost``
+SWIFT_EXTRAS_MIDDLEWARE=${SWIFT_EXTRAS_MIDDLEWARE:-tempurl formpost staticweb bulk}
+
 # The ring uses a configurable number of bits from a path’s MD5 hash as
 # a partition index that designates a device. The number of bits kept
 # from the hash is known as the partition power, and 2 to the partition
@@ -56,17 +61,18 @@
 SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9}
 
 # Set ``SWIFT_REPLICAS`` to configure how many replicas are to be
-# configured for your Swift cluster.  By default the three replicas would need a
-# bit of IO and Memory on a VM you may want to lower that to 1 if you want to do
-# only some quick testing.
-SWIFT_REPLICAS=${SWIFT_REPLICAS:-3}
+# configured for your Swift cluster. By default we are configuring
+# only one replica since this is way less CPU and memory intensive. If
+# you are planning to test swift replication you may want to set this
+# up to 3.
+SWIFT_REPLICAS=${SWIFT_REPLICAS:-1}
 SWIFT_REPLICAS_SEQ=$(seq ${SWIFT_REPLICAS})
 
 # Set ``OBJECT_PORT_BASE``, ``CONTAINER_PORT_BASE``, ``ACCOUNT_PORT_BASE``
 # Port bases used in port number calclution for the service "nodes"
 # The specified port number will be used, the additinal ports calculated by
 # base_port + node_num * 10
-OBJECT_PORT_BASE=6010
+OBJECT_PORT_BASE=6013
 CONTAINER_PORT_BASE=6011
 ACCOUNT_PORT_BASE=6012
 
@@ -76,18 +82,19 @@
 
 # cleanup_swift() - Remove residual data files
 function cleanup_swift() {
-   rm -f ${SWIFT_CONFIG_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz}
+   rm -f ${SWIFT_CONF_DIR}{*.builder,*.ring.gz,backups/*.builder,backups/*.ring.gz}
    if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
       sudo umount ${SWIFT_DATA_DIR}/drives/sdb1
    fi
    if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then
       rm ${SWIFT_DATA_DIR}/drives/images/swift.img
    fi
+   rm -rf ${SWIFT_DATA_DIR}/run/
 }
 
 # configure_swift() - Set config files, create data dirs and loop image
 function configure_swift() {
-    local swift_auth_server
+    local swift_pipeline=" "
     local node_number
     local swift_node_config
     local swift_log_dir
@@ -143,13 +150,13 @@
         sudo chown -R $USER: ${node}
     done
 
-   sudo mkdir -p ${SWIFT_CONFIG_DIR}/{object,container,account}-server
-   sudo chown -R $USER: ${SWIFT_CONFIG_DIR}
+   sudo mkdir -p ${SWIFT_CONF_DIR}/{object,container,account}-server
+   sudo chown -R $USER: ${SWIFT_CONF_DIR}
 
-    if [[ "$SWIFT_CONFIG_DIR" != "/etc/swift" ]]; then
+    if [[ "$SWIFT_CONF_DIR" != "/etc/swift" ]]; then
         # Some swift tools are hard-coded to use ``/etc/swift`` and are apparently not going to be fixed.
         # Create a symlink if the config dir is moved
-        sudo ln -sf ${SWIFT_CONFIG_DIR} /etc/swift
+        sudo ln -sf ${SWIFT_CONF_DIR} /etc/swift
     fi
 
     # Swift use rsync to synchronize between all the different
@@ -180,14 +187,14 @@
         swift_auth_server=tempauth
     fi
 
-    SWIFT_CONFIG_PROXY_SERVER=${SWIFT_CONFIG_DIR}/proxy-server.conf
+    SWIFT_CONFIG_PROXY_SERVER=${SWIFT_CONF_DIR}/proxy-server.conf
     cp ${SWIFT_DIR}/etc/proxy-server.conf-sample ${SWIFT_CONFIG_PROXY_SERVER}
 
     iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user
     iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user ${USER}
 
     iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir
-    iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir ${SWIFT_CONFIG_DIR}
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir ${SWIFT_CONF_DIR}
 
     iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers
     iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT workers 1
@@ -198,10 +205,21 @@
     iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port
     iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080}
 
-    # Only enable Swift3 if we have it enabled in ENABLED_SERVICES
-    is_service_enabled swift3 && swift3=swift3 || swift3=""
-
-    iniset ${SWIFT_CONFIG_PROXY_SERVER} pipeline:main pipeline "catch_errors healthcheck cache ratelimit ${swift3} ${swift_auth_server} proxy-logging proxy-server"
+    # By default Swift will be installed with the tempauth middleware
+    # which has some default username and password if you have
+    # configured keystone it will configure swift with it.
+    if is_service_enabled key;then
+        if is_service_enabled swift3;then
+            swift_pipeline=" s3token swift3 "
+        fi
+        swift_pipeline+=" authtoken keystoneauth "
+    else
+        if is_service_enabled swift3;then
+            swift_pipeline=" swift3 "
+        fi
+        swift_pipeline+=" tempauth "
+    fi
+    sed -i "/^pipeline/ { s/tempauth/${swift_pipeline} ${SWIFT_EXTRAS_MIDDLEWARE}/ ;}" ${SWIFT_CONFIG_PROXY_SERVER}
 
     iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true
 
@@ -237,8 +255,8 @@
 EOF
     fi
 
-    cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONFIG_DIR}/swift.conf
-    iniset ${SWIFT_CONFIG_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH}
+    cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONF_DIR}/swift.conf
+    iniset ${SWIFT_CONF_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH}
 
     # This function generates an object/account/proxy configuration
     # emulating 4 nodes on different ports
@@ -257,7 +275,7 @@
         iniset ${swift_node_config} DEFAULT bind_port ${bind_port}
 
         iniuncomment ${swift_node_config} DEFAULT swift_dir
-        iniset ${swift_node_config} DEFAULT swift_dir ${SWIFT_CONFIG_DIR}
+        iniset ${swift_node_config} DEFAULT swift_dir ${SWIFT_CONF_DIR}
 
         iniuncomment ${swift_node_config} DEFAULT devices
         iniset ${swift_node_config} DEFAULT devices ${node_path}
@@ -273,7 +291,7 @@
     }
 
     for node_number in ${SWIFT_REPLICAS_SEQ}; do
-        swift_node_config=${SWIFT_CONFIG_DIR}/object-server/${node_number}.conf
+        swift_node_config=${SWIFT_CONF_DIR}/object-server/${node_number}.conf
         cp ${SWIFT_DIR}/etc/object-server.conf-sample ${swift_node_config}
         generate_swift_config ${swift_node_config} ${node_number} $[OBJECT_PORT_BASE + 10 * (node_number - 1)]
         iniset ${swift_node_config} filter:recon recon_cache_path  ${SWIFT_DATA_DIR}/cache
@@ -281,14 +299,14 @@
         # modification and make sure it works for new sections.
         sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config}
 
-        swift_node_config=${SWIFT_CONFIG_DIR}/container-server/${node_number}.conf
+        swift_node_config=${SWIFT_CONF_DIR}/container-server/${node_number}.conf
         cp ${SWIFT_DIR}/etc/container-server.conf-sample ${swift_node_config}
         generate_swift_config ${swift_node_config} ${node_number} $[CONTAINER_PORT_BASE + 10 * (node_number - 1)]
         iniuncomment ${swift_node_config} app:container-server allow_versions
         iniset ${swift_node_config} app:container-server allow_versions  "true"
         sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config}
 
-        swift_node_config=${SWIFT_CONFIG_DIR}/account-server/${node_number}.conf
+        swift_node_config=${SWIFT_CONF_DIR}/account-server/${node_number}.conf
         cp ${SWIFT_DIR}/etc/account-server.conf-sample ${swift_node_config}
         generate_swift_config ${swift_node_config} ${node_number} $[ACCOUNT_PORT_BASE + 10 * (node_number - 1)]
         sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config}
@@ -315,7 +333,7 @@
 
     # This is where we create three different rings for swift with
     # different object servers binding on different ports.
-    pushd ${SWIFT_CONFIG_DIR} >/dev/null && {
+    pushd ${SWIFT_CONF_DIR} >/dev/null && {
 
         rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz
 
@@ -352,6 +370,9 @@
 function start_swift() {
     # (re)start rsyslog
     restart_service rsyslog
+    # (re)start memcached to make sure we have a clean memcache.
+    restart_service memcached
+
     # Start rsync
     if is_ubuntu; then
         sudo /etc/init.d/rsync restart || :
@@ -359,19 +380,36 @@
         sudo systemctl start xinetd.service
     fi
 
-   # First spawn all the swift services then kill the
-   # proxy service so we can run it in foreground in screen.
-   # ``swift-init ... {stop|restart}`` exits with '1' if no servers are running,
-   # ignore it just in case
+   # By default with only one replica we are launching the proxy,
+   # container, account and object server in screen in foreground and
+   # other services in background. If we have SWIFT_REPLICAS set to something
+   # greater than one we first spawn all the swift services then kill the proxy
+   # service so we can run it in foreground in screen.  ``swift-init ...
+   # {stop|restart}`` exits with '1' if no servers are running, ignore it just
+   # in case
    swift-init --run-dir=${SWIFT_DATA_DIR}/run all restart || true
-   swift-init --run-dir=${SWIFT_DATA_DIR}/run proxy stop || true
-   screen_it swift "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONFIG_DIR}/proxy-server.conf -v"
+   if [[ ${SWIFT_REPLICAS} == 1 ]]; then
+        todo="object container account"
+   fi
+   for type in proxy ${todo}; do
+       swift-init --run-dir=${SWIFT_DATA_DIR}/run ${type} stop || true
+   done
+   screen_it s-proxy "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v"
+   if [[ ${SWIFT_REPLICAS} == 1 ]]; then
+       for type in object container account;do
+           screen_it s-${type} "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v"
+       done
+   fi
 }
 
 # stop_swift() - Stop running processes (non-screen)
 function stop_swift() {
     # screen normally killed by unstack.sh
-    swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
+    if type -p swift-init >/dev/null; then
+        swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
+    fi
+    # Dump the proxy server
+    sudo pkill -f swift-proxy-server
 }
 
 # Restore xtrace
diff --git a/lib/tempest b/lib/tempest
index d17b32d..85e643e 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -238,6 +238,9 @@
     iniset $TEMPEST_CONF "compute-admin" password "$password" # DEPRECATED
 
     # network
+    if is_service_enabled quantum; then
+        iniset $TEMPEST_CONF network quantum_available "True"
+    fi
     iniset $TEMPEST_CONF network api_version 2.0
     iniset $TEMPEST_CONF network tenant_networks_reachable "$tenant_networks_reachable"
     iniset $TEMPEST_CONF network public_network_id "$public_network_id"
@@ -263,10 +266,6 @@
 # install_tempest() - Collect source and prepare
 function install_tempest() {
     git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH
-
-    # Tempest doesn't satisfy its dependencies on its own, so
-    # install them here instead.
-    pip_install -r $TEMPEST_DIR/tools/pip-requires
 }
 
 # init_tempest() - Initialize ec2 images
diff --git a/stack.sh b/stack.sh
index 3fab488..936d587 100755
--- a/stack.sh
+++ b/stack.sh
@@ -55,8 +55,7 @@
 # allow you to safely override those settings.
 
 if [[ ! -r $TOP_DIR/stackrc ]]; then
-    echo "ERROR: missing $TOP_DIR/stackrc - did you grab more than just stack.sh?"
-    exit 1
+    log_error $LINENO "missing $TOP_DIR/stackrc - did you grab more than just stack.sh?"
 fi
 source $TOP_DIR/stackrc
 
@@ -93,8 +92,7 @@
 if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17|f18|opensuse-12.2) ]]; then
     echo "WARNING: this script has not been tested on $DISTRO"
     if [[ "$FORCE" != "yes" ]]; then
-        echo "If you wish to run this script anyway run with FORCE=yes"
-        exit 1
+        die $LINENO "If you wish to run this script anyway run with FORCE=yes"
     fi
 fi
 
@@ -105,16 +103,14 @@
 # ``stack.sh`` keeps function libraries here
 # Make sure ``$TOP_DIR/lib`` directory is present
 if [ ! -d $TOP_DIR/lib ]; then
-    echo "ERROR: missing devstack/lib"
-    exit 1
+    log_error $LINENO "missing devstack/lib"
 fi
 
 # ``stack.sh`` keeps the list of ``apt`` and ``rpm`` dependencies and config
 # templates and other useful files in the ``files`` subdirectory
 FILES=$TOP_DIR/files
 if [ ! -d $FILES ]; then
-    echo "ERROR: missing devstack/files"
-    exit 1
+    log_error $LINENO "missing devstack/files"
 fi
 
 SCREEN_NAME=${SCREEN_NAME:-stack}
@@ -197,9 +193,7 @@
 
 # Create the destination directory and ensure it is writable by the user
 sudo mkdir -p $DEST
-if [ ! -w $DEST ]; then
-    sudo chown $STACK_USER $DEST
-fi
+sudo chown -R $STACK_USER $DEST
 
 # Set ``OFFLINE`` to ``True`` to configure ``stack.sh`` to run cleanly without
 # Internet access. ``stack.sh`` must have been previously run with Internet
@@ -214,7 +208,7 @@
 # Destination path for service data
 DATA_DIR=${DATA_DIR:-${DEST}/data}
 sudo mkdir -p $DATA_DIR
-sudo chown $STACK_USER $DATA_DIR
+sudo chown -R $STACK_USER $DATA_DIR
 
 
 # Common Configuration
@@ -229,29 +223,9 @@
 FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256}
 NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1}
 
-# Find the interface used for the default route
-HOST_IP_IFACE=${HOST_IP_IFACE:-$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }' | head -1)}
-# Search for an IP unless an explicit is set by ``HOST_IP`` environment variable
-if [ -z "$HOST_IP" -o "$HOST_IP" == "dhcp" ]; then
-    HOST_IP=""
-    HOST_IPS=`LC_ALL=C ip -f inet addr show ${HOST_IP_IFACE} | awk '/inet/ {split($2,parts,"/");  print parts[1]}'`
-    for IP in $HOST_IPS; do
-        # Attempt to filter out IP addresses that are part of the fixed and
-        # floating range. Note that this method only works if the ``netaddr``
-        # python library is installed. If it is not installed, an error
-        # will be printed and the first IP from the interface will be used.
-        # If that is not correct set ``HOST_IP`` in ``localrc`` to the correct
-        # address.
-        if ! (address_in_net $IP $FIXED_RANGE || address_in_net $IP $FLOATING_RANGE); then
-            HOST_IP=$IP
-            break;
-        fi
-    done
-    if [ "$HOST_IP" == "" ]; then
-        echo "Could not determine host ip address."
-        echo "Either localrc specified dhcp on ${HOST_IP_IFACE} or defaulted"
-        exit 1
-    fi
+HOST_IP=$(get_default_host_ip $FIXED_RANGE $FLOATING_RANGE "$HOST_IP_IFACE" "$HOST_IP")
+if [ "$HOST_IP" == "" ]; then
+    die $LINENO "Could not determine host ip address. Either localrc specified dhcp on ${HOST_IP_IFACE} or defaulted"
 fi
 
 # Allow the use of an alternate hostname (such as localhost/127.0.0.1) for service endpoints.
@@ -433,7 +407,7 @@
     read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT."
 fi
 
-if is_service_enabled swift; then
+if is_service_enabled s-proxy; then
     # If we are using swift3, we can default the s3 port to swift instead
     # of nova-objectstore
     if is_service_enabled swift3;then
@@ -532,9 +506,9 @@
     # as the template to search for, appending '.*' to match the date
     # we added on earlier runs.
     LOGDIR=$(dirname "$LOGFILE")
-    LOGNAME=$(basename "$LOGFILE")
+    LOGFILENAME=$(basename "$LOGFILE")
     mkdir -p $LOGDIR
-    find $LOGDIR -maxdepth 1 -name $LOGNAME.\* -mtime +$LOGDAYS -exec rm {} \;
+    find $LOGDIR -maxdepth 1 -name $LOGFILENAME.\* -mtime +$LOGDAYS -exec rm {} \;
     LOGFILE=$LOGFILE.${CURRENT_LOG_TIME}
     SUMFILE=$LOGFILE.${CURRENT_LOG_TIME}.summary
 
@@ -564,8 +538,8 @@
 
     echo_summary "stack.sh log $LOGFILE"
     # Specified logfile name always links to the most recent log
-    ln -sf $LOGFILE $LOGDIR/$LOGNAME
-    ln -sf $SUMFILE $LOGDIR/$LOGNAME.summary
+    ln -sf $LOGFILE $LOGDIR/$LOGFILENAME
+    ln -sf $SUMFILE $LOGDIR/$LOGFILENAME.summary
 else
     # Set up output redirection without log files
     # Copy stdout to fd 3
@@ -670,12 +644,12 @@
 git_clone $OPENSTACKCLIENT_REPO $OPENSTACKCLIENT_DIR $OPENSTACKCLIENT_BRANCH
 
 # glance, swift middleware and nova api needs keystone middleware
-if is_service_enabled key g-api n-api swift; then
+if is_service_enabled key g-api n-api s-proxy; then
     # unified auth system (manages accounts/tokens)
     install_keystone
 fi
 
-if is_service_enabled swift; then
+if is_service_enabled s-proxy; then
     install_swiftclient
     install_swift
     if is_service_enabled swift3; then
@@ -732,10 +706,10 @@
 configure_keystoneclient
 configure_novaclient
 setup_develop $OPENSTACKCLIENT_DIR
-if is_service_enabled key g-api n-api swift; then
+if is_service_enabled key g-api n-api s-proxy; then
     configure_keystone
 fi
-if is_service_enabled swift; then
+if is_service_enabled s-proxy; then
     configure_swift
     configure_swiftclient
     if is_service_enabled swift3; then
@@ -751,6 +725,8 @@
 configure_glanceclient
 
 if is_service_enabled nova; then
+    # First clean up old instances
+    cleanup_nova
     configure_nova
 fi
 if is_service_enabled horizon; then
@@ -922,7 +898,7 @@
     init_glance
 
     # Store the images in swift if enabled.
-    if is_service_enabled swift; then
+    if is_service_enabled s-proxy; then
         iniset $GLANCE_API_CONF DEFAULT default_store swift
         iniset $GLANCE_API_CONF DEFAULT swift_store_auth_address $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/
         iniset $GLANCE_API_CONF DEFAULT swift_store_user $SERVICE_TENANT_NAME:glance
@@ -981,7 +957,7 @@
 # Storage Service
 # ---------------
 
-if is_service_enabled swift; then
+if is_service_enabled s-proxy; then
     echo_summary "Configuring Swift"
     init_swift
 fi
@@ -1073,7 +1049,7 @@
 
     elif [ "$VIRT_DRIVER" = 'openvz' ]; then
         echo_summary "Using OpenVZ virtualization driver"
-        iniset $NOVA_CONF DEFAULT compute_driver "openvz.driver.OpenVzDriver"
+        iniset $NOVA_CONF DEFAULT compute_driver "openvz.OpenVzDriver"
         iniset $NOVA_CONF DEFAULT connection_type "openvz"
         LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"}
         iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER"
@@ -1087,9 +1063,8 @@
         iniset $NOVA_CONF DEFAULT compute_driver nova.virt.baremetal.driver.BareMetalDriver
         iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER
         iniset $NOVA_CONF DEFAULT scheduler_host_manager nova.scheduler.baremetal_host_manager.BaremetalHostManager
-        # NOTE(deva): ComputeCapabilitiesFilter does not currently work with Baremetal. See bug # 1129485
-        #             As a work around, we disable CCFilter by explicitly enabling all the other default filters.
-        iniset $NOVA_CONF DEFAULT scheduler_default_filters ComputeFilter,RetryFilter,AvailabilityZoneFilter,ImagePropertiesFilter
+        iniset $NOVA_CONF DEFAULT ram_allocation_ratio 1.0
+        iniset $NOVA_CONF DEFAULT reserved_host_memory_mb 0
         iniset $NOVA_CONF baremetal instance_type_extra_specs cpu_arch:$BM_CPU_ARCH
         iniset $NOVA_CONF baremetal driver $BM_DRIVER
         iniset $NOVA_CONF baremetal power_manager $BM_POWER_MANAGER
@@ -1128,7 +1103,7 @@
 # Only run the services specified in ``ENABLED_SERVICES``
 
 # Launch Swift Services
-if is_service_enabled swift; then
+if is_service_enabled s-proxy; then
     echo_summary "Starting Swift"
     start_swift
 fi
diff --git a/stackrc b/stackrc
index 5b473c4..7d3cfe0 100644
--- a/stackrc
+++ b/stackrc
@@ -45,15 +45,15 @@
 CEILOMETER_BRANCH=master
 
 # ceilometer client library
-CEILOMETERCLIENT_REPO=${GIT_BASE}/openstack/python-ceilometerclient
+CEILOMETERCLIENT_REPO=${GIT_BASE}/openstack/python-ceilometerclient.git
 CEILOMETERCLIENT_BRANCH=master
 
 # volume service
-CINDER_REPO=${GIT_BASE}/openstack/cinder
+CINDER_REPO=${GIT_BASE}/openstack/cinder.git
 CINDER_BRANCH=master
 
 # volume client
-CINDERCLIENT_REPO=${GIT_BASE}/openstack/python-cinderclient
+CINDERCLIENT_REPO=${GIT_BASE}/openstack/python-cinderclient.git
 CINDERCLIENT_BRANCH=master
 
 # compute service
@@ -63,11 +63,11 @@
 # storage service
 SWIFT_REPO=${GIT_BASE}/openstack/swift.git
 SWIFT_BRANCH=master
-SWIFT3_REPO=https://github.com/fujita/swift3.git
+SWIFT3_REPO=${GIT_BASE}/fujita/swift3.git
 SWIFT3_BRANCH=master
 
 # python swift client library
-SWIFTCLIENT_REPO=${GIT_BASE}/openstack/python-swiftclient
+SWIFTCLIENT_REPO=${GIT_BASE}/openstack/python-swiftclient.git
 SWIFTCLIENT_BRANCH=master
 
 # image catalog service
@@ -75,7 +75,7 @@
 GLANCE_BRANCH=master
 
 # python glance client library
-GLANCECLIENT_REPO=${GIT_BASE}/openstack/python-glanceclient
+GLANCECLIENT_REPO=${GIT_BASE}/openstack/python-glanceclient.git
 GLANCECLIENT_BRANCH=master
 
 # unified auth system (manages accounts/tokens)
@@ -83,7 +83,7 @@
 KEYSTONE_BRANCH=master
 
 # a websockets/html5 or flash powered VNC console for vm instances
-NOVNC_REPO=https://github.com/kanaka/noVNC.git
+NOVNC_REPO=${GIT_BASE}/kanaka/noVNC.git
 NOVNC_BRANCH=master
 
 # a websockets/html5 or flash powered SPICE console for vm instances
@@ -103,15 +103,15 @@
 OPENSTACKCLIENT_BRANCH=master
 
 # python keystone client library to nova that horizon uses
-KEYSTONECLIENT_REPO=${GIT_BASE}/openstack/python-keystoneclient
+KEYSTONECLIENT_REPO=${GIT_BASE}/openstack/python-keystoneclient.git
 KEYSTONECLIENT_BRANCH=master
 
 # quantum service
-QUANTUM_REPO=${GIT_BASE}/openstack/quantum
+QUANTUM_REPO=${GIT_BASE}/openstack/quantum.git
 QUANTUM_BRANCH=master
 
 # quantum client
-QUANTUMCLIENT_REPO=${GIT_BASE}/openstack/python-quantumclient
+QUANTUMCLIENT_REPO=${GIT_BASE}/openstack/python-quantumclient.git
 QUANTUMCLIENT_BRANCH=master
 
 # Tempest test suite
@@ -127,17 +127,17 @@
 HEATCLIENT_BRANCH=master
 
 # ryu service
-RYU_REPO=https://github.com/osrg/ryu.git
+RYU_REPO=${GIT_BASE}/osrg/ryu.git
 RYU_BRANCH=master
 
 # diskimage-builder
-BM_IMAGE_BUILD_REPO=https://github.com/stackforge/diskimage-builder.git
+BM_IMAGE_BUILD_REPO=${GIT_BASE}/stackforge/diskimage-builder.git
 BM_IMAGE_BUILD_BRANCH=master
 
 # bm_poseur
 # Used to simulate a hardware environment for baremetal
 # Only used if BM_USE_FAKE_ENV is set
-BM_POSEUR_REPO=https://github.com/tripleo/bm_poseur.git
+BM_POSEUR_REPO=${GIT_BASE}/tripleo/bm_poseur.git
 BM_POSEUR_BRANCH=master
 
 
diff --git a/tests/functions.sh b/tests/functions.sh
index 4fe6443..27a6cfe 100755
--- a/tests/functions.sh
+++ b/tests/functions.sh
@@ -60,6 +60,10 @@
 
 [ddd]
 empty =
+
+[eee]
+multi = foo1
+multi = foo2
 EOF
 
 # Test with spaces
@@ -193,6 +197,34 @@
     echo "inicomment failed: $VAL"
 fi
 
+# Test multiple line iniset/iniget
+iniset_multiline test.ini eee multi bar1 bar2
+
+VAL=$(iniget_multiline test.ini eee multi)
+if [[ "$VAL" == "bar1 bar2" ]]; then
+    echo "OK: iniset_multiline"
+else
+    echo "iniset_multiline failed: $VAL"
+fi
+
+# Test iniadd with exiting values
+iniadd test.ini eee multi bar3
+VAL=$(iniget_multiline test.ini eee multi)
+if [[ "$VAL" == "bar1 bar2 bar3" ]]; then
+    echo "OK: iniadd"
+else
+    echo "iniadd failed: $VAL"
+fi
+
+# Test iniadd with non-exiting values
+iniadd test.ini eee non-multi foobar1 foobar2
+VAL=$(iniget_multiline test.ini eee non-multi)
+if [[ "$VAL" == "foobar1 foobar2" ]]; then
+    echo "OK: iniadd with non-exiting value"
+else
+    echo "iniadd with non-exsting failed: $VAL"
+fi
+
 rm test.ini
 
 # Enabling/disabling services
diff --git a/tools/info.sh b/tools/info.sh
index ef1f338..14ab8f6 100755
--- a/tools/info.sh
+++ b/tools/info.sh
@@ -88,17 +88,7 @@
 # - We are going to check packages only for the services needed.
 # - We are parsing the packages files and detecting metadatas.
 
-if is_ubuntu; then
-    PKG_DIR=$FILES/apts
-elif is_fedora; then
-    PKG_DIR=$FILES/rpms
-elif is_suse; then
-    PKG_DIR=$FILES/rpms-suse
-else
-    exit_distro_not_supported "list of packages"
-fi
-
-for p in $(get_packages $PKG_DIR); do
+for p in $(get_packages $ENABLED_SERVICES); do
     if [[ "$os_PACKAGE" = "deb" ]]; then
         ver=$(dpkg -s $p 2>/dev/null | grep '^Version: ' | cut -d' ' -f2)
     elif [[ "$os_PACKAGE" = "rpm" ]]; then
diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh
index 4d151db..7c4386f 100755
--- a/tools/install_prereqs.sh
+++ b/tools/install_prereqs.sh
@@ -54,15 +54,7 @@
 # ================
 
 # Install package requirements
-if is_ubuntu; then
-    install_package $(get_packages $FILES/apts)
-elif is_fedora; then
-    install_package $(get_packages $FILES/rpms)
-elif is_suse; then
-    install_package $(get_packages $FILES/rpms-suse)
-else
-    exit_distro_not_supported "list of packages"
-fi
+install_package $(get_packages $ENABLED_SERVICES)
 
 if [[ -n "$SYSLOG" && "$SYSLOG" != "False" ]]; then
     if is_ubuntu || is_fedora; then
diff --git a/tools/xen/devstackubuntupreseed.cfg b/tools/xen/devstackubuntupreseed.cfg
index d8caaee..c559b1e 100644
--- a/tools/xen/devstackubuntupreseed.cfg
+++ b/tools/xen/devstackubuntupreseed.cfg
@@ -257,7 +257,7 @@
 
 # The kernel image (meta) package to be installed; "none" can be used if no
 # kernel is to be installed.
-#d-i base-installer/kernel/image string linux-generic
+d-i base-installer/kernel/image string linux-virtual
 
 ### Account setup
 # Skip creation of a root account (normal user account will be able to
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
index f0e057b..0c0e1e2 100755
--- a/tools/xen/install_os_domU.sh
+++ b/tools/xen/install_os_domU.sh
@@ -20,13 +20,13 @@
 fi
 
 # This directory
-TOP_DIR=$(cd $(dirname "$0") && pwd)
+THIS_DIR=$(cd $(dirname "$0") && pwd)
 
 # Source lower level functions
-. $TOP_DIR/../../functions
+. $THIS_DIR/../../functions
 
 # Include onexit commands
-. $TOP_DIR/scripts/on_exit.sh
+. $THIS_DIR/scripts/on_exit.sh
 
 
 #
@@ -49,7 +49,7 @@
 # including installing XenAPI plugins
 #
 
-cd $TOP_DIR
+cd $THIS_DIR
 if [ -f ./master ]
 then
     rm -rf ./master
@@ -271,7 +271,7 @@
             HTTP_SERVER_LOCATION="/var/www/html"
             mkdir -p $HTTP_SERVER_LOCATION
         fi
-        cp -f $TOP_DIR/devstackubuntupreseed.cfg $HTTP_SERVER_LOCATION
+        cp -f $THIS_DIR/devstackubuntupreseed.cfg $HTTP_SERVER_LOCATION
         MIRROR=${MIRROR:-""}
         if [ -n "$MIRROR" ]; then
             sed -e "s,d-i mirror/http/hostname string .*,d-i mirror/http/hostname string $MIRROR," \
@@ -280,11 +280,11 @@
     fi
 
     # Update the template
-    $TOP_DIR/scripts/install_ubuntu_template.sh $PRESEED_URL
+    $THIS_DIR/scripts/install_ubuntu_template.sh $PRESEED_URL
 
     # create a new VM with the given template
     # creating the correct VIFs and metadata
-    $TOP_DIR/scripts/install-os-vpx.sh -t "$UBUNTU_INST_TEMPLATE_NAME" -v $VM_BR -m $MGT_BR -p $PUB_BR -l $GUEST_NAME -r $OSDOMU_MEM_MB -k "flat_network_bridge=${VM_BR}"
+    $THIS_DIR/scripts/install-os-vpx.sh -t "$UBUNTU_INST_TEMPLATE_NAME" -v $VM_BR -m $MGT_BR -p $PUB_BR -l $GUEST_NAME -r $OSDOMU_MEM_MB -k "flat_network_bridge=${VM_BR}"
 
     # wait for install to finish
     wait_for_VM_to_halt
@@ -298,7 +298,7 @@
     #
 
     # Install XenServer tools, and other such things
-    $TOP_DIR/prepare_guest_template.sh "$GUEST_NAME"
+    $THIS_DIR/prepare_guest_template.sh "$GUEST_NAME"
 
     # start the VM to run the prepare steps
     xe vm-start vm="$GUEST_NAME"
@@ -320,7 +320,7 @@
 #
 # Inject DevStack inside VM disk
 #
-$TOP_DIR/build_xva.sh "$GUEST_NAME"
+$THIS_DIR/build_xva.sh "$GUEST_NAME"
 
 # create a snapshot before the first boot
 # to allow a quick re-run with the same settings
diff --git a/tools/xen/xenrc b/tools/xen/xenrc
index 1a5a2a9..e4d8ac9 100644
--- a/tools/xen/xenrc
+++ b/tools/xen/xenrc
@@ -32,10 +32,10 @@
 PUB_IP=${PUB_IP:-192.168.1.55}
 
 # Public network
-PUB_BR=${PUB_BR:-"xenbr0"}
-PUB_DEV=${PUB_DEV:-eth0}
-PUB_VLAN=${PUB_VLAN:--1}
 PUB_NETMASK=${PUB_NETMASK:-255.255.255.0}
+PUB_BR=${PUB_BR:-"xenbr0"}
+PUB_VLAN=${PUB_VLAN:--1}
+PUB_DEV=${PUB_DEV:-eth0}
 
 # VM network params
 VM_NETMASK=${VM_NETMASK:-255.255.255.0}
diff --git a/unstack.sh b/unstack.sh
index a086d5c..3ac2985 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -63,7 +63,7 @@
 fi
 
 # Swift runs daemons
-if is_service_enabled swift; then
+if is_service_enabled s-proxy; then
     stop_swift
     cleanup_swift
 fi