Merge "chown -R to ensure that stack user owns data_dir"
diff --git a/AUTHORS b/AUTHORS
index 35c0a52..718a760 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -13,6 +13,7 @@
 Devin Carlen <devin.carlen@gmail.com>
 Doug hellmann <doug.hellmann@dreamhost.com>
 Eddie Hebert <edhebert@gmail.com>
+Edgar Magana <emagana@gmail.com>
 Eoghan Glynn <eglynn@redhat.com>
 Eric Windisch <ewindisch@cloudscaling.com>
 Gabriel Hurley <gabriel@strikeawe.com>
diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh
index a92c0d9..3c83725 100755
--- a/exercises/aggregates.sh
+++ b/exercises/aggregates.sh
@@ -56,7 +56,7 @@
     if [ $(nova aggregate-list | grep -c " $aggregate_name ") == 0 ]; then
         echo "SUCCESS $aggregate_name not present"
     else
-        echo "ERROR found aggregate: $aggregate_name"
+        die $LINENO "found aggregate: $aggregate_name"
         exit -1
     fi
 }
@@ -67,15 +67,14 @@
 AGGREGATE2_ID=$(nova aggregate-create $AGGREGATE2_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE2_NAME " | get_field 1)
 
 # check aggregate created
-nova aggregate-list | grep -q " $AGGREGATE_NAME " || die "Aggregate $AGGREGATE_NAME not created"
+nova aggregate-list | grep -q " $AGGREGATE_NAME " || die $LINENO "Aggregate $AGGREGATE_NAME not created"
 
 
 # Ensure creating a duplicate fails
 # =================================
 
 if nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE; then
-    echo "ERROR could create duplicate aggregate"
-    exit -1
+    die $LINENO "could create duplicate aggregate"
 fi
 
 
@@ -113,7 +112,7 @@
 nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY
 nova aggregate-details $AGGREGATE_ID | grep $META_DATA_3_KEY
 
-nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY && die "ERROR metadata was not cleared"
+nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY && die $LINENO "ERROR metadata was not cleared"
 
 nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_3_KEY $META_DATA_1_KEY
 nova aggregate-details $AGGREGATE_ID | egrep "{u'availability_zone': u'$AGGREGATE_A_ZONE'}|{}"
@@ -129,8 +128,7 @@
 nova aggregate-add-host $AGGREGATE_ID $FIRST_HOST
 nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST
 if nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST; then
-    echo "ERROR could add duplicate host to single aggregate"
-    exit -1
+    die $LINENO "could add duplicate host to single aggregate"
 fi
 nova aggregate-remove-host $AGGREGATE2_ID $FIRST_HOST
 nova aggregate-remove-host $AGGREGATE_ID $FIRST_HOST
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
index 679091b..14d0049 100755
--- a/exercises/boot_from_volume.sh
+++ b/exercises/boot_from_volume.sh
@@ -72,7 +72,7 @@
 
 # Grab the id of the image to launch
 IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
-die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
+die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
 
 # Security Groups
 # ---------------
@@ -140,7 +140,7 @@
 # Create the bootable volume
 start_time=$(date +%s)
 cinder create --image-id $IMAGE --display_name=$VOL_NAME --display_description "test bootable volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \
-    die "Failure creating volume $VOL_NAME"
+    die $LINENO "Failure creating volume $VOL_NAME"
 if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
     echo "Volume $VOL_NAME not created"
     exit 1
@@ -150,7 +150,7 @@
 
 # Get volume ID
 VOL_ID=$(cinder list | grep $VOL_NAME  | get_field 1)
-die_if_not_set VOL_ID "Failure retrieving volume ID for $VOL_NAME"
+die_if_not_set $LINENO VOL_ID "Failure retrieving volume ID for $VOL_NAME"
 
 # Boot instance
 # -------------
@@ -159,7 +159,7 @@
 # <dev_name>=<id>:<type>:<size(GB)>:<delete_on_terminate>
 # Leaving the middle two fields blank appears to do-the-right-thing
 VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --block-device-mapping vda=$VOL_ID --security_groups=$SECGROUP --key_name $KEY_NAME $VM_NAME | grep ' id ' | get_field 2)
-die_if_not_set VM_UUID "Failure launching $VM_NAME"
+die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME"
 
 # Check that the status is active within ACTIVE_TIMEOUT seconds
 if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
@@ -169,7 +169,7 @@
 
 # Get the instance IP
 IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2)
-die_if_not_set IP "Failure retrieving IP address"
+die_if_not_set $LINENO IP "Failure retrieving IP address"
 
 # Private IPs can be pinged in single node deployments
 ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT
@@ -178,7 +178,7 @@
 # --------
 
 # Delete volume backed instance
-nova delete $VM_UUID || die "Failure deleting instance $VM_NAME"
+nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME"
 if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then
     echo "Server $VM_NAME not deleted"
     exit 1
@@ -192,7 +192,7 @@
 
 # Delete volume
 start_time=$(date +%s)
-cinder delete $VOL_ID || die "Failure deleting volume $VOLUME_NAME"
+cinder delete $VOL_ID || die $LINENO "Failure deleting volume $VOLUME_NAME"
 if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then
     echo "Volume $VOL_NAME not deleted"
     exit 1
@@ -201,7 +201,7 @@
 echo "Completed cinder delete in $((end_time - start_time)) seconds"
 
 # Delete secgroup
-nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP"
+nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP"
 
 set +o xtrace
 echo "*********************************************************************"
diff --git a/exercises/bundle.sh b/exercises/bundle.sh
index 12f2732..dce36aa 100755
--- a/exercises/bundle.sh
+++ b/exercises/bundle.sh
@@ -49,21 +49,20 @@
 BUCKET=testbucket
 IMAGE=bundle.img
 truncate -s 5M /tmp/$IMAGE
-euca-bundle-image -i /tmp/$IMAGE || die "Failure bundling image $IMAGE"
+euca-bundle-image -i /tmp/$IMAGE || die $LINENO "Failure bundling image $IMAGE"
 
-euca-upload-bundle --debug -b $BUCKET -m /tmp/$IMAGE.manifest.xml || die "Failure uploading bundle $IMAGE to $BUCKET"
+euca-upload-bundle --debug -b $BUCKET -m /tmp/$IMAGE.manifest.xml || die $LINENO "Failure uploading bundle $IMAGE to $BUCKET"
 
 AMI=`euca-register $BUCKET/$IMAGE.manifest.xml | cut -f2`
-die_if_not_set AMI "Failure registering $BUCKET/$IMAGE"
+die_if_not_set $LINENO AMI "Failure registering $BUCKET/$IMAGE"
 
 # Wait for the image to become available
 if ! timeout $REGISTER_TIMEOUT sh -c "while euca-describe-images | grep $AMI | grep -q available; do sleep 1; done"; then
-    echo "Image $AMI not available within $REGISTER_TIMEOUT seconds"
-    exit 1
+    die $LINENO "Image $AMI not available within $REGISTER_TIMEOUT seconds"
 fi
 
 # Clean up
-euca-deregister $AMI || die "Failure deregistering $AMI"
+euca-deregister $AMI || die $LINENO "Failure deregistering $AMI"
 
 set +o xtrace
 echo "*********************************************************************"
diff --git a/exercises/euca.sh b/exercises/euca.sh
index 8b15da8..50d4744 100755
--- a/exercises/euca.sh
+++ b/exercises/euca.sh
@@ -56,68 +56,62 @@
 
 # Find a machine image to boot
 IMAGE=`euca-describe-images | grep machine | grep ${DEFAULT_IMAGE_NAME} | cut -f2 | head -n1`
-die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
+die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
 
 # Add a secgroup
 if ! euca-describe-groups | grep -q $SECGROUP; then
     euca-add-group -d "$SECGROUP description" $SECGROUP
     if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! euca-describe-groups | grep -q $SECGROUP; do sleep 1; done"; then
-        echo "Security group not created"
-        exit 1
+        die $LINENO "Security group not created"
     fi
 fi
 
 # Launch it
 INSTANCE=`euca-run-instances -g $SECGROUP -t $DEFAULT_INSTANCE_TYPE $IMAGE | grep INSTANCE | cut -f2`
-die_if_not_set INSTANCE "Failure launching instance"
+die_if_not_set $LINENO INSTANCE "Failure launching instance"
 
 # Assure it has booted within a reasonable time
 if ! timeout $RUNNING_TIMEOUT sh -c "while ! euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then
-    echo "server didn't become active within $RUNNING_TIMEOUT seconds"
-    exit 1
+    die $LINENO "server didn't become active within $RUNNING_TIMEOUT seconds"
 fi
 
 # Volumes
 # -------
 if [[ "$ENABLED_SERVICES" =~ "c-vol" ]]; then
    VOLUME_ZONE=`euca-describe-availability-zones | head -n1 | cut -f2`
-   die_if_not_set VOLUME_ZONE "Failure to find zone for volume"
+   die_if_not_set $LINENO VOLUME_ZONE "Failure to find zone for volume"
 
    VOLUME=`euca-create-volume -s 1 -z $VOLUME_ZONE | cut -f2`
-   die_if_not_set VOLUME "Failure to create volume"
+   die_if_not_set $LINENO VOLUME "Failure to create volume"
 
    # Test that volume has been created
    VOLUME=`euca-describe-volumes | cut -f2`
-   die_if_not_set VOLUME "Failure to get volume"
+   die_if_not_set $LINENO VOLUME "Failure to get volume"
 
    # Test volume has become available
    if ! timeout $RUNNING_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then
-       echo "volume didnt become available within $RUNNING_TIMEOUT seconds"
-       exit 1
+       die $LINENO "volume didnt become available within $RUNNING_TIMEOUT seconds"
    fi
 
    # Attach volume to an instance
    euca-attach-volume -i $INSTANCE -d $ATTACH_DEVICE $VOLUME || \
-       die "Failure attaching volume $VOLUME to $INSTANCE"
+       die $LINENO "Failure attaching volume $VOLUME to $INSTANCE"
    if ! timeout $ACTIVE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q in-use; do sleep 1; done"; then
-       echo "Could not attach $VOLUME to $INSTANCE"
-       exit 1
+       die $LINENO "Could not attach $VOLUME to $INSTANCE"
    fi
 
    # Detach volume from an instance
    euca-detach-volume $VOLUME || \
-       die "Failure detaching volume $VOLUME to $INSTANCE"
+       die $LINENO "Failure detaching volume $VOLUME to $INSTANCE"
     if ! timeout $ACTIVE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then
-        echo "Could not detach $VOLUME to $INSTANCE"
-        exit 1
+        die $LINENO "Could not detach $VOLUME to $INSTANCE"
     fi
 
     # Remove volume
     euca-delete-volume $VOLUME || \
-        die "Failure to delete volume"
+        die $LINENO "Failure to delete volume"
     if ! timeout $ACTIVE_TIMEOUT sh -c "while euca-describe-volumes | grep $VOLUME; do sleep 1; done"; then
-       echo "Could not delete $VOLUME"
-       exit 1
+       die $LINENO "Could not delete $VOLUME"
     fi
 else
     echo "Volume Tests Skipped"
@@ -125,58 +119,55 @@
 
 # Allocate floating address
 FLOATING_IP=`euca-allocate-address | cut -f2`
-die_if_not_set FLOATING_IP "Failure allocating floating IP"
+die_if_not_set $LINENO FLOATING_IP "Failure allocating floating IP"
 
 # Associate floating address
 euca-associate-address -i $INSTANCE $FLOATING_IP || \
-    die "Failure associating address $FLOATING_IP to $INSTANCE"
+    die $LINENO "Failure associating address $FLOATING_IP to $INSTANCE"
 
 # Authorize pinging
 euca-authorize -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \
-    die "Failure authorizing rule in $SECGROUP"
+    die $LINENO "Failure authorizing rule in $SECGROUP"
 
 # Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds
 ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT
 
 # Revoke pinging
 euca-revoke -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \
-    die "Failure revoking rule in $SECGROUP"
+    die $LINENO "Failure revoking rule in $SECGROUP"
 
 # Release floating address
 euca-disassociate-address $FLOATING_IP || \
-    die "Failure disassociating address $FLOATING_IP"
+    die $LINENO "Failure disassociating address $FLOATING_IP"
 
 # Wait just a tick for everything above to complete so release doesn't fail
 if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep $INSTANCE | grep -q $FLOATING_IP; do sleep 1; done"; then
-    echo "Floating ip $FLOATING_IP not disassociated within $ASSOCIATE_TIMEOUT seconds"
-    exit 1
+    die $LINENO "Floating ip $FLOATING_IP not disassociated within $ASSOCIATE_TIMEOUT seconds"
 fi
 
 # Release floating address
 euca-release-address $FLOATING_IP || \
-    die "Failure releasing address $FLOATING_IP"
+    die $LINENO "Failure releasing address $FLOATING_IP"
 
 # Wait just a tick for everything above to complete so terminate doesn't fail
 if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep -q $FLOATING_IP; do sleep 1; done"; then
-    echo "Floating ip $FLOATING_IP not released within $ASSOCIATE_TIMEOUT seconds"
-    exit 1
+    die $LINENO "Floating ip $FLOATING_IP not released within $ASSOCIATE_TIMEOUT seconds"
 fi
 
 # Terminate instance
 euca-terminate-instances $INSTANCE || \
-    die "Failure terminating instance $INSTANCE"
+    die $LINENO "Failure terminating instance $INSTANCE"
 
 # Assure it has terminated within a reasonable time. The behaviour of this
 # case changed with bug/836978. Requesting the status of an invalid instance
 # will now return an error message including the instance id, so we need to
 # filter that out.
 if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -ve \"\\\(InstanceNotFound\\\|InvalidInstanceID\[.\]NotFound\\\)\" | grep -q $INSTANCE; do sleep 1; done"; then
-    echo "server didn't terminate within $TERMINATE_TIMEOUT seconds"
-    exit 1
+    die $LINENO "server didn't terminate within $TERMINATE_TIMEOUT seconds"
 fi
 
 # Delete secgroup
-euca-delete-group $SECGROUP || die "Failure deleting security group $SECGROUP"
+euca-delete-group $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP"
 
 set +o xtrace
 echo "*********************************************************************"
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index 34ab69d..b4e1c42 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -71,7 +71,7 @@
 
 # Grab the id of the image to launch
 IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
-die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
+die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
 
 # Security Groups
 # ---------------
@@ -83,8 +83,7 @@
 if ! nova secgroup-list | grep -q $SECGROUP; then
     nova secgroup-create $SECGROUP "$SECGROUP description"
     if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then
-        echo "Security group not created"
-        exit 1
+        die $LINENO "Security group not created"
     fi
 fi
 
@@ -115,7 +114,7 @@
 # Clean-up from previous runs
 nova delete $VM_NAME || true
 if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then
-    echo "server didn't terminate!"
+    die $LINENO "server didn't terminate!"
     exit 1
 fi
 
@@ -123,17 +122,16 @@
 # -------------
 
 VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2)
-die_if_not_set VM_UUID "Failure launching $VM_NAME"
+die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME"
 
 # Check that the status is active within ACTIVE_TIMEOUT seconds
 if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
-    echo "server didn't become active!"
-    exit 1
+    die $LINENO "server didn't become active!"
 fi
 
 # Get the instance IP
 IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2)
-die_if_not_set IP "Failure retrieving IP address"
+die_if_not_set $LINENO IP "Failure retrieving IP address"
 
 # Private IPs can be pinged in single node deployments
 ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT
@@ -143,17 +141,16 @@
 
 # Allocate a floating IP from the default pool
 FLOATING_IP=$(nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | get_field 1)
-die_if_not_set FLOATING_IP "Failure creating floating IP from pool $DEFAULT_FLOATING_POOL"
+die_if_not_set $LINENO FLOATING_IP "Failure creating floating IP from pool $DEFAULT_FLOATING_POOL"
 
 # List floating addresses
 if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $FLOATING_IP; do sleep 1; done"; then
-    echo "Floating IP not allocated"
-    exit 1
+    die $LINENO "Floating IP not allocated"
 fi
 
 # Add floating IP to our server
 nova add-floating-ip $VM_UUID $FLOATING_IP || \
-    die "Failure adding floating IP $FLOATING_IP to $VM_NAME"
+    die $LINENO "Failure adding floating IP $FLOATING_IP to $VM_NAME"
 
 # Test we can ping our floating IP within ASSOCIATE_TIMEOUT seconds
 ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT
@@ -161,18 +158,17 @@
 if ! is_service_enabled quantum; then
     # Allocate an IP from second floating pool
     TEST_FLOATING_IP=$(nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | get_field 1)
-    die_if_not_set TEST_FLOATING_IP "Failure creating floating IP in $TEST_FLOATING_POOL"
+    die_if_not_set $LINENO TEST_FLOATING_IP "Failure creating floating IP in $TEST_FLOATING_POOL"
 
     # list floating addresses
     if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep $TEST_FLOATING_POOL | grep -q $TEST_FLOATING_IP; do sleep 1; done"; then
-        echo "Floating IP not allocated"
-        exit 1
+        die $LINENO "Floating IP not allocated"
      fi
 fi
 
 # Dis-allow icmp traffic (ping)
 nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || \
-    die "Failure deleting security group rule from $SECGROUP"
+    die $LINENO "Failure deleting security group rule from $SECGROUP"
 
 # FIXME (anthony): make xs support security groups
 if [ "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then
@@ -186,24 +182,23 @@
 if ! is_service_enabled quantum; then
     # Delete second floating IP
     nova floating-ip-delete $TEST_FLOATING_IP || \
-        die "Failure deleting floating IP $TEST_FLOATING_IP"
+        die $LINENO "Failure deleting floating IP $TEST_FLOATING_IP"
 fi
 
 # Delete the floating ip
 nova floating-ip-delete $FLOATING_IP || \
-    die "Failure deleting floating IP $FLOATING_IP"
+    die $LINENO "Failure deleting floating IP $FLOATING_IP"
 
 # Delete instance
-nova delete $VM_UUID || die "Failure deleting instance $VM_NAME"
+nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME"
 # Wait for termination
 if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then
-    echo "Server $VM_NAME not deleted"
-    exit 1
+    die $LINENO "Server $VM_NAME not deleted"
 fi
 
 # Delete secgroup
 nova secgroup-delete $SECGROUP || \
-    die "Failure deleting security group $SECGROUP"
+    die $LINENO "Failure deleting security group $SECGROUP"
 
 set +o xtrace
 echo "*********************************************************************"
diff --git a/exercises/horizon.sh b/exercises/horizon.sh
index c5dae3a..5d778c9 100755
--- a/exercises/horizon.sh
+++ b/exercises/horizon.sh
@@ -36,7 +36,7 @@
 is_service_enabled horizon || exit 55
 
 # can we get the front page
-curl http://$SERVICE_HOST 2>/dev/null | grep -q '<h3>Log In</h3>' || die "Horizon front page not functioning!"
+curl http://$SERVICE_HOST 2>/dev/null | grep -q '<h3>Log In</h3>' || die $LINENO "Horizon front page not functioning!"
 
 set +o xtrace
 echo "*********************************************************************"
diff --git a/exercises/quantum-adv-test.sh b/exercises/quantum-adv-test.sh
index bc33fe8..5c4b16e 100755
--- a/exercises/quantum-adv-test.sh
+++ b/exercises/quantum-adv-test.sh
@@ -264,7 +264,7 @@
         --image $(get_image_id) \
         $NIC \
         $TENANT-server$NUM | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'`
-    die_if_not_set VM_UUID "Failure launching $TENANT-server$NUM" VM_UUID
+    die_if_not_set $LINENO VM_UUID "Failure launching $TENANT-server$NUM"
     confirm_server_active $VM_UUID
 }
 
@@ -309,8 +309,7 @@
 function shutdown_vms {
     foreach_tenant_vm 'shutdown_vm ${%TENANT%_NAME} %NUM%'
     if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q ACTIVE; do sleep 1; done"; then
-        echo "Some VMs failed to shutdown"
-        false
+        die $LINENO "Some VMs failed to shutdown"
     fi
 }
 
diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh
index a33c9c6..b73afdf 100755
--- a/exercises/sec_groups.sh
+++ b/exercises/sec_groups.sh
@@ -68,7 +68,7 @@
 
 # Delete secgroup
 nova secgroup-delete $SEC_GROUP_NAME || \
-    die "Failure deleting security group $SEC_GROUP_NAME"
+    die $LINENO "Failure deleting security group $SEC_GROUP_NAME"
 
 set +o xtrace
 echo "*********************************************************************"
diff --git a/exercises/swift.sh b/exercises/swift.sh
index a75f955..46ac2c5 100755
--- a/exercises/swift.sh
+++ b/exercises/swift.sh
@@ -45,20 +45,20 @@
 # =============
 
 # Check if we have to swift via keystone
-swift stat || die "Failure geting status"
+swift stat || die $LINENO "Failure geting status"
 
 # We start by creating a test container
-swift post $CONTAINER || die "Failure creating container $CONTAINER"
+swift post $CONTAINER || die $LINENO "Failure creating container $CONTAINER"
 
 # add some files into it.
-swift upload $CONTAINER /etc/issue || die "Failure uploading file to container $CONTAINER"
+swift upload $CONTAINER /etc/issue || die $LINENO "Failure uploading file to container $CONTAINER"
 
 # list them
-swift list $CONTAINER || die "Failure listing contents of container $CONTAINER"
+swift list $CONTAINER || die $LINENO "Failure listing contents of container $CONTAINER"
 
 # And we may want to delete them now that we have tested that
 # everything works.
-swift delete $CONTAINER || die "Failure deleting container $CONTAINER"
+swift delete $CONTAINER || die $LINENO "Failure deleting container $CONTAINER"
 
 set +o xtrace
 echo "*********************************************************************"
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index 45cb0c8..7913641 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -70,7 +70,7 @@
 
 # Grab the id of the image to launch
 IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
-die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
+die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
 
 # Security Groups
 # ---------------
@@ -114,25 +114,23 @@
 # Clean-up from previous runs
 nova delete $VM_NAME || true
 if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then
-    echo "server didn't terminate!"
-    exit 1
+    die $LINENO "server didn't terminate!"
 fi
 
 # Boot instance
 # -------------
 
 VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2)
-die_if_not_set VM_UUID "Failure launching $VM_NAME"
+die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME"
 
 # Check that the status is active within ACTIVE_TIMEOUT seconds
 if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
-    echo "server didn't become active!"
-    exit 1
+    die $LINENO "server didn't become active!"
 fi
 
 # Get the instance IP
 IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2)
-die_if_not_set IP "Failure retrieving IP address"
+die_if_not_set $LINENO IP "Failure retrieving IP address"
 
 # Private IPs can be pinged in single node deployments
 ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT
@@ -142,42 +140,38 @@
 
 # Verify it doesn't exist
 if [[ -n $(cinder list | grep $VOL_NAME | head -1 | get_field 2) ]]; then
-    echo "Volume $VOL_NAME already exists"
-    exit 1
+    die $LINENO "Volume $VOL_NAME already exists"
 fi
 
 # Create a new volume
 start_time=$(date +%s)
 cinder create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \
-    die "Failure creating volume $VOL_NAME"
+    die $LINENO "Failure creating volume $VOL_NAME"
 if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
-    echo "Volume $VOL_NAME not created"
-    exit 1
+    die $LINENO "Volume $VOL_NAME not created"
 fi
 end_time=$(date +%s)
 echo "Completed cinder create in $((end_time - start_time)) seconds"
 
 # Get volume ID
 VOL_ID=$(cinder list | grep $VOL_NAME | head -1 | get_field 1)
-die_if_not_set VOL_ID "Failure retrieving volume ID for $VOL_NAME"
+die_if_not_set $LINENO VOL_ID "Failure retrieving volume ID for $VOL_NAME"
 
 # Attach to server
 DEVICE=/dev/vdb
 start_time=$(date +%s)
 nova volume-attach $VM_UUID $VOL_ID $DEVICE || \
-    die "Failure attaching volume $VOL_NAME to $VM_NAME"
+    die $LINENO "Failure attaching volume $VOL_NAME to $VM_NAME"
 if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep in-use; do sleep 1; done"; then
-    echo "Volume $VOL_NAME not attached to $VM_NAME"
-    exit 1
+    die $LINENO "Volume $VOL_NAME not attached to $VM_NAME"
 fi
 end_time=$(date +%s)
 echo "Completed volume-attach in $((end_time - start_time)) seconds"
 
 VOL_ATTACH=$(cinder list | grep $VOL_NAME | head -1 | get_field -1)
-die_if_not_set VOL_ATTACH "Failure retrieving $VOL_NAME status"
+die_if_not_set $LINENO VOL_ATTACH "Failure retrieving $VOL_NAME status"
 if [[ "$VOL_ATTACH" != $VM_UUID ]]; then
-    echo "Volume not attached to correct instance"
-    exit 1
+    die $LINENO "Volume not attached to correct instance"
 fi
 
 # Clean up
@@ -185,33 +179,30 @@
 
 # Detach volume
 start_time=$(date +%s)
-nova volume-detach $VM_UUID $VOL_ID || die "Failure detaching volume $VOL_NAME from $VM_NAME"
+nova volume-detach $VM_UUID $VOL_ID || die $LINENO "Failure detaching volume $VOL_NAME from $VM_NAME"
 if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
-    echo "Volume $VOL_NAME not detached from $VM_NAME"
-    exit 1
+    die $LINENO "Volume $VOL_NAME not detached from $VM_NAME"
 fi
 end_time=$(date +%s)
 echo "Completed volume-detach in $((end_time - start_time)) seconds"
 
 # Delete volume
 start_time=$(date +%s)
-cinder delete $VOL_ID || die "Failure deleting volume $VOL_NAME"
+cinder delete $VOL_ID || die $LINENO "Failure deleting volume $VOL_NAME"
 if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then
-    echo "Volume $VOL_NAME not deleted"
-    exit 1
+    die $LINENO "Volume $VOL_NAME not deleted"
 fi
 end_time=$(date +%s)
 echo "Completed cinder delete in $((end_time - start_time)) seconds"
 
 # Delete instance
-nova delete $VM_UUID || die "Failure deleting instance $VM_NAME"
+nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME"
 if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then
-    echo "Server $VM_NAME not deleted"
-    exit 1
+    die $LINENO "Server $VM_NAME not deleted"
 fi
 
 # Delete secgroup
-nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP"
+nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP"
 
 set +o xtrace
 echo "*********************************************************************"
diff --git a/functions b/functions
index 11f7d50..8cb703c 100644
--- a/functions
+++ b/functions
@@ -57,8 +57,15 @@
 # die "message"
 function die() {
     local exitcode=$?
+    if [ $exitcode == 0 ]; then
+        exitcode=1
+    fi
     set +o xtrace
-    echo $@
+    local msg="[ERROR] $0:$1 $2"
+    echo $msg 1>&2;
+    if [[ -n ${SCREEN_LOGDIR} ]]; then
+        echo $msg >> "${SCREEN_LOGDIR}/error.log"
+    fi
     exit $exitcode
 }
 
@@ -71,10 +78,9 @@
     (
         local exitcode=$?
         set +o xtrace
-        local evar=$1; shift
+        local evar=$2; shift
         if ! is_set $evar || [ $exitcode != 0 ]; then
-            echo $@
-            exit -1
+            die $@
         fi
     )
 }
@@ -418,12 +424,10 @@
     fi
 
     if [ $# -gt 0 ]; then
-        echo "Support for $DISTRO is incomplete: no support for $@"
+        die $LINENO "Support for $DISTRO is incomplete: no support for $@"
     else
-        echo "Support for $DISTRO is incomplete."
+        die $LINENO "Support for $DISTRO is incomplete."
     fi
-
-    exit 1
 }
 
 
@@ -1027,6 +1031,7 @@
         # No backends registered means this is likely called from ``localrc``
         # This is now deprecated usage
         DATABASE_TYPE=$1
+        DEPRECATED_TEXT="$DEPRECATED_TEXT\nThe database backend needs to be properly set in ENABLED_SERVICES; use_database is deprecated localrc\n"
     else
         # This should no longer get called...here for posterity
         use_exclusive_service DATABASE_BACKENDS DATABASE_TYPE $1
@@ -1099,9 +1104,9 @@
     fi
     if ! timeout $boot_timeout sh -c "$check_command"; then
         if [[ "$expected" = "True" ]]; then
-            echo "[Fail] Couldn't ping server"
+            die $LINENO "[Fail] Couldn't ping server"
         else
-            echo "[Fail] Could ping server"
+            die $LINENO "[Fail] Could ping server"
         fi
         exit 1
     fi
@@ -1125,8 +1130,7 @@
     local ACTIVE_TIMEOUT=$5
     local probe_cmd=""
     if ! timeout $ACTIVE_TIMEOUT sh -c "while ! ssh -o StrictHostKeyChecking=no -i $KEY_FILE ${DEFAULT_INSTANCE_USER}@$FLOATING_IP echo success ; do sleep 1; done"; then
-        echo "server didn't become ssh-able!"
-        exit 1
+        die $LINENO "server didn't become ssh-able!"
     fi
 }
 
diff --git a/lib/ceilometer b/lib/ceilometer
index e890ff9..8772867 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -64,13 +64,7 @@
     [ ! -d $CEILOMETER_API_LOG_DIR ] &&  sudo mkdir -m 755 -p $CEILOMETER_API_LOG_DIR
     sudo chown $USER $CEILOMETER_API_LOG_DIR
 
-    if is_service_enabled rabbit ; then
-        iniset $CEILOMETER_CONF DEFAULT rpc_backend 'ceilometer.openstack.common.rpc.impl_kombu'
-        iniset $CEILOMETER_CONF DEFAULT rabbit_host $RABBIT_HOST
-        iniset $CEILOMETER_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
-    elif is_service_enabled qpid ; then
-        iniset $CEILOMETER_CONF DEFAULT rpc_backend 'ceilometer.openstack.common.rpc.impl_qpid'
-    fi
+    iniset_rpc_backend ceilometer $CEILOMETER_CONF DEFAULT
 
     iniset $CEILOMETER_CONF DEFAULT notification_topics 'notifications,glance_notifications'
     iniset $CEILOMETER_CONF DEFAULT verbose True
diff --git a/lib/database b/lib/database
index 4fba7c2..ebab333 100644
--- a/lib/database
+++ b/lib/database
@@ -29,20 +29,6 @@
 # Sourcing the database libs sets DATABASE_BACKENDS with the available list
 for f in $TOP_DIR/lib/databases/*; do source $f; done
 
-# If ``DATABASE_TYPE`` is defined here it's because the user has it in ``localrc``
-# or has called ``use_database``.  Both are deprecated so let's fix it up for now.
-if [[ -n $DATABASE_TYPE ]]; then
-    # This is now deprecated usage, set up a warning and try to be
-    # somewhat backward compatible for now.
-    DEPRECATED_TEXT="$DEPRECATED_TEXT\nThe database backend needs to be properly set in ENABLED_SERVICES; DATABASE_TYPE or use_database is deprecated localrc\n"
-    if [[ ! $ENABLED_SERVICES =~ $DATABASE_TYPE ]]; then
-        # It's not in enabled services but user has attempted to select a
-        # database, so just add it now
-        ENABLED_SERVICES+=,$DATABASE_TYPE
-        unset DATABASE_TYPE
-    fi
-fi
-
 # ``DATABASE_BACKENDS`` now contains a list of the supported databases
 # Look in ``ENABLED_SERVICES`` to see if one has been selected
 for db in $DATABASE_BACKENDS; do
diff --git a/lib/glance b/lib/glance
index 80d3902..a6b698f 100644
--- a/lib/glance
+++ b/lib/glance
@@ -114,9 +114,8 @@
         iniset $GLANCE_API_CONF DEFAULT notifier_strategy qpid
     elif [ -n "$RABBIT_HOST" ] &&  [ -n "$RABBIT_PASSWORD" ]; then
         iniset $GLANCE_API_CONF DEFAULT notifier_strategy rabbit
-        iniset $GLANCE_API_CONF DEFAULT rabbit_host $RABBIT_HOST
-        iniset $GLANCE_API_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
     fi
+    iniset_rpc_backend glance $GLANCE_API_CONF DEFAULT
     iniset $GLANCE_API_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/api
 
     cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI
@@ -187,8 +186,7 @@
     screen_it g-api "cd $GLANCE_DIR; $GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf"
     echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..."
     if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://$GLANCE_HOSTPORT; do sleep 1; done"; then
-      echo "g-api did not start"
-      exit 1
+      die $LINENO "g-api did not start"
     fi
 }
 
diff --git a/lib/keystone b/lib/keystone
index a1a57f8..2580351 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -323,8 +323,7 @@
     screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug"
     echo "Waiting for keystone to start..."
     if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s http://$SERVICE_HOST:$service_port/v2.0/ >/dev/null; do sleep 1; done"; then
-      echo "keystone did not start"
-      exit 1
+      die $LINENO "keystone did not start"
     fi
 
     # Start proxies if enabled
diff --git a/lib/nova b/lib/nova
index 849ec57..3749790 100644
--- a/lib/nova
+++ b/lib/nova
@@ -542,8 +542,7 @@
     screen_it n-api "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api"
     echo "Waiting for nova-api to start..."
     if ! wait_for_service $SERVICE_TIMEOUT http://$SERVICE_HOST:$service_port; then
-      echo "nova-api did not start"
-      exit 1
+      die $LINENO "nova-api did not start"
     fi
 
     # Start proxies if enabled
diff --git a/lib/quantum b/lib/quantum
index 3466162..3e41d8d 100644
--- a/lib/quantum
+++ b/lib/quantum
@@ -351,8 +351,7 @@
     screen_it q-svc "cd $QUANTUM_DIR && python $QUANTUM_DIR/bin/quantum-server --config-file $QUANTUM_CONF --config-file /$Q_PLUGIN_CONF_FILE"
     echo "Waiting for Quantum to start..."
     if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://$Q_HOST:$Q_PORT; do sleep 1; done"; then
-      echo "Quantum did not start"
-      exit 1
+      die $LINENO "Quantum did not start"
     fi
 }
 
@@ -396,8 +395,7 @@
     quantum_plugin_configure_common
 
     if [[ $Q_PLUGIN_CONF_PATH == '' || $Q_PLUGIN_CONF_FILENAME == '' || $Q_PLUGIN_CLASS == '' ]]; then
-        echo "Quantum plugin not set.. exiting"
-        exit 1
+        die $LINENO "Quantum plugin not set.. exiting"
     fi
 
     # If needed, move config file from ``$QUANTUM_DIR/etc/quantum`` to ``QUANTUM_CONF_DIR``
@@ -508,8 +506,7 @@
     if is_service_enabled $DATABASE_BACKENDS; then
         recreate_database $Q_DB_NAME utf8
     else
-        echo "A database must be enabled in order to use the $Q_PLUGIN Quantum plugin."
-        exit 1
+        die $LINENO "A database must be enabled in order to use the $Q_PLUGIN Quantum plugin."
     fi
 
     # Update either configuration file with plugin
@@ -659,11 +656,10 @@
     fi
     if ! timeout $timeout_sec sh -c "$check_command"; then
         if [[ "$expected" = "True" ]]; then
-            echo "[Fail] Couldn't ping server"
+            die $LINENO "[Fail] Couldn't ping server"
         else
-            echo "[Fail] Could ping server"
+            die $LINENO "[Fail] Could ping server"
         fi
-        exit 1
     fi
 }
 
@@ -677,8 +673,7 @@
     local probe_cmd = ""
     probe_cmd=`_get_probe_cmd_prefix $from_net`
     if ! timeout $timeout_sec sh -c "while ! $probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success ; do sleep 1; done"; then
-        echo "server didn't become ssh-able!"
-        exit 1
+        die $LINENO "server didn't become ssh-able!"
     fi
 }
 
diff --git a/lib/quantum_plugins/linuxbridge b/lib/quantum_plugins/linuxbridge
index 6d5d4e0..0756de4 100644
--- a/lib/quantum_plugins/linuxbridge
+++ b/lib/quantum_plugins/linuxbridge
@@ -30,11 +30,12 @@
 }
 
 function quantum_plugin_configure_dhcp_agent() {
-    :
+    iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager quantum.agent.dhcp_agent.DhcpAgentWithStateReport
 }
 
 function quantum_plugin_configure_l3_agent() {
     iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge
+    iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager quantum.agent.l3_agent.L3NATAgentWithStateReport
 }
 
 function quantum_plugin_configure_plugin_agent() {
diff --git a/lib/quantum_plugins/nicira b/lib/quantum_plugins/nicira
index bc9a36f..8c150b1 100644
--- a/lib/quantum_plugins/nicira
+++ b/lib/quantum_plugins/nicira
@@ -19,8 +19,7 @@
         conn=(${NVP_CONTROLLER_CONNECTION//\:/ })
         OVS_MGR_IP=${conn[0]}
     else
-        echo "Error - No controller specified. Unable to set a manager for OVS"
-        exit 1
+        die $LINENO "Error - No controller specified. Unable to set a manager for OVS"
     fi
     sudo ovs-vsctl set-manager ssl:$OVS_MGR_IP
 }
@@ -63,14 +62,12 @@
 
 function quantum_plugin_configure_l3_agent() {
    # Nicira plugin does not run L3 agent
-   echo "ERROR - q-l3 should must not be executed with Nicira plugin!"
-   exit 1
+   die $LINENO "q-l3 should must not be executed with Nicira plugin!"
 }
 
 function quantum_plugin_configure_plugin_agent() {
    # Nicira plugin does not run L2 agent
-   echo "ERROR - q-agt must not be executed with Nicira plugin!"
-   exit 1
+   die $LINENO "q-agt must not be executed with Nicira plugin!"
 }
 
 function quantum_plugin_configure_service() {
@@ -93,8 +90,7 @@
         if [[ "$DEFAULT_TZ_UUID" != "" ]]; then
             iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" default_tz_uuid $DEFAULT_TZ_UUID
         else
-            echo "ERROR - The nicira plugin won't work without a default transport zone."
-            exit 1
+            die $LINENO "The nicira plugin won't work without a default transport zone."
         fi
         if [[ "$DEFAULT_L3_GW_SVC_UUID" != "" ]]; then
             iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" default_l3_gw_service_uuid $DEFAULT_L3_GW_SVC_UUID
@@ -114,8 +110,7 @@
             # Only 1 controller can be specified in this case
             iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" nvp_controller_connection $NVP_CONTROLLER_CONNECTION
         else
-            echo "ERROR - The nicira plugin needs at least an NVP controller."
-            exit 1
+            die $LINENO "The nicira plugin needs at least an NVP controller."
         fi
         if [[ "$NVP_USER" != "" ]]; then
             iniset /$Q_PLUGIN_CONF_FILE "CLUSTER:$DC" nvp_user $NVP_USER
diff --git a/lib/quantum_plugins/openvswitch b/lib/quantum_plugins/openvswitch
index 726c6c3..a57336e 100644
--- a/lib/quantum_plugins/openvswitch
+++ b/lib/quantum_plugins/openvswitch
@@ -36,6 +36,7 @@
 
 function quantum_plugin_configure_l3_agent() {
     _quantum_ovs_base_configure_l3_agent
+    iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager quantum.agent.l3_agent.L3NATAgentWithStateReport
 }
 
 function quantum_plugin_configure_plugin_agent() {
@@ -49,9 +50,7 @@
         # REVISIT - also check kernel module support for GRE and patch ports
         OVS_VERSION=`ovs-vsctl --version | head -n 1 | awk '{print $4;}'`
         if [ $OVS_VERSION \< "1.4" ] && ! is_service_enabled q-svc ; then
-            echo "You are running OVS version $OVS_VERSION."
-            echo "OVS 1.4+ is required for tunneling between multiple hosts."
-            exit 1
+            die $LINENO "You are running OVS version $OVS_VERSION. OVS 1.4+ is required for tunneling between multiple hosts."
         fi
         iniset /$Q_PLUGIN_CONF_FILE OVS enable_tunneling True
         iniset /$Q_PLUGIN_CONF_FILE OVS local_ip $HOST_IP
diff --git a/lib/quantum_plugins/plumgrid b/lib/quantum_plugins/plumgrid
new file mode 100644
index 0000000..b49aa92
--- /dev/null
+++ b/lib/quantum_plugins/plumgrid
@@ -0,0 +1,37 @@
+# PLUMgrid Quantum Plugin
+# Edgar Magana emagana@plumgrid.com
+# ------------------------------------
+
+# Save trace settings
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+#source $TOP_DIR/lib/quantum_plugins/ovs_base
+
+function quantum_plugin_create_nova_conf() {
+
+    NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+}
+
+function quantum_plugin_setup_interface_driver() {
+    :
+}
+
+function quantum_plugin_configure_common() {
+    Q_PLUGIN_CONF_PATH=etc/quantum/plugins/plumgrid
+    Q_PLUGIN_CONF_FILENAME=plumgrid.ini
+    Q_DB_NAME="plumgrid_quantum"
+    Q_PLUGIN_CLASS="quantum.plugins.plumgrid.plumgrid_nos_plugin.plumgrid_plugin.QuantumPluginPLUMgridV2"
+}
+
+function quantum_plugin_configure_service() {
+    iniset /$Q_PLUGIN_CONF_FILE PLUMgridNOS nos_server localhost
+    iniset /$Q_PLUGIN_CONF_FILE PLUMgridNOS nos_server_port 7766
+}
+
+function quantum_plugin_configure_debug_command() {
+    :
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/quantum_plugins/ryu b/lib/quantum_plugins/ryu
index 2dfd4f7..d1d7382 100644
--- a/lib/quantum_plugins/ryu
+++ b/lib/quantum_plugins/ryu
@@ -17,7 +17,9 @@
     _quantum_ovs_base_install_agent_packages
 
     # quantum_ryu_agent requires ryu module
+    install_package $(get_packages "ryu")
     install_ryu
+    configure_ryu
 }
 
 function quantum_plugin_configure_common() {
diff --git a/lib/quantum_thirdparty/ryu b/lib/quantum_thirdparty/ryu
index 7a01923..f1e9e7c 100644
--- a/lib/quantum_thirdparty/ryu
+++ b/lib/quantum_thirdparty/ryu
@@ -17,24 +17,15 @@
 RYU_OFP_PORT=${RYU_OFP_PORT:-6633}
 # Ryu Applications
 RYU_APPS=${RYU_APPS:-ryu.app.simple_isolation,ryu.app.rest}
-# Ryu configuration
-RYU_CONF_CONTENTS=${RYU_CONF_CONTENTS:-"
---app_lists=$RYU_APPS
---wsapi_host=$RYU_API_HOST
---wsapi_port=$RYU_API_PORT
---ofp_listen_host=$RYU_OFP_HOST
---ofp_tcp_listen_port=$RYU_OFP_PORT
---quantum_url=http://$Q_HOST:$Q_PORT
---quantum_admin_username=$Q_ADMIN_USERNAME
---quantum_admin_password=$SERVICE_PASSWORD
---quantum_admin_tenant_name=$SERVICE_TENANT_NAME
---quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0
---quantum_auth_strategy=$Q_AUTH_STRATEGY
---quantum_controller_addr=tcp:$RYU_OFP_HOST:$RYU_OFP_PORT
-"}
 
+# configure_ryu can be called multiple times as quantum_pluing/ryu may call
+# this function for quantum-ryu-agent
+_RYU_CONFIGURED=${_RYU_CONFIGURED:-False}
 function configure_ryu() {
-    setup_develop $RYU_DIR
+    if [[ "$_RYU_CONFIGURED" == "False" ]]; then
+        setup_develop $RYU_DIR
+        _RYU_CONFIGURED=True
+    fi
 }
 
 function init_ryu() {
@@ -46,6 +37,21 @@
     RYU_CONF=$RYU_CONF_DIR/ryu.conf
     sudo rm -rf $RYU_CONF
 
+    # Ryu configuration
+    RYU_CONF_CONTENTS=${RYU_CONF_CONTENTS:-"[DEFAULT]
+app_lists=$RYU_APPS
+wsapi_host=$RYU_API_HOST
+wsapi_port=$RYU_API_PORT
+ofp_listen_host=$RYU_OFP_HOST
+ofp_tcp_listen_port=$RYU_OFP_PORT
+quantum_url=http://$Q_HOST:$Q_PORT
+quantum_admin_username=$Q_ADMIN_USERNAME
+quantum_admin_password=$SERVICE_PASSWORD
+quantum_admin_tenant_name=$SERVICE_TENANT_NAME
+quantum_admin_auth_url=$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0
+quantum_auth_strategy=$Q_AUTH_STRATEGY
+quantum_controller_addr=tcp:$RYU_OFP_HOST:$RYU_OFP_PORT
+"}
     echo "${RYU_CONF_CONTENTS}" > $RYU_CONF
 }
 
@@ -62,7 +68,7 @@
 }
 
 function start_ryu() {
-    screen_it ryu "cd $RYU_DIR && $RYU_DIR/bin/ryu-manager --flagfile $RYU_CONF"
+    screen_it ryu "cd $RYU_DIR && $RYU_DIR/bin/ryu-manager --config-file $RYU_CONF"
 }
 
 function stop_ryu() {
diff --git a/lib/rpc_backend b/lib/rpc_backend
index f35f9db..02614ea 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -39,8 +39,7 @@
     fi
 
     if is_service_enabled qpid && ! qpid_is_supported; then
-        echo "Qpid support is not available for this version of your distribution."
-        exit 1
+        die $LINENO "Qpid support is not available for this version of your distribution."
     fi
 }
 
@@ -58,6 +57,8 @@
             install_package qpid-cpp-server-daemon
         elif is_ubuntu; then
             install_package qpidd
+            sudo sed -i '/PLAIN/!s/mech_list: /mech_list: PLAIN /' /etc/sasl2/qpidd.conf
+            sudo chmod o+r /etc/qpid/qpidd.sasldb
         else
             exit_distro_not_supported "qpid installation"
         fi
@@ -100,6 +101,11 @@
         iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_zmq
     elif is_service_enabled qpid; then
         iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_qpid
+        if is_ubuntu; then
+            QPID_PASSWORD=`sudo strings /etc/qpid/qpidd.sasldb | grep -B1 admin | head -1`
+            iniset $file $section qpid_password $QPID_PASSWORD
+            iniset $file $section qpid_username admin
+        fi
     elif is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then
         iniset $file $section rpc_backend ${package}.openstack.common.rpc.impl_kombu
         iniset $file $section rabbit_host $RABBIT_HOST
diff --git a/lib/tempest b/lib/tempest
index d17b32d..9cc19ae 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -238,6 +238,9 @@
     iniset $TEMPEST_CONF "compute-admin" password "$password" # DEPRECATED
 
     # network
+    if is_service_enabled quantum; then
+        iniset $TEMPEST_CONF network quantum_available "True"
+    fi
     iniset $TEMPEST_CONF network api_version 2.0
     iniset $TEMPEST_CONF network tenant_networks_reachable "$tenant_networks_reachable"
     iniset $TEMPEST_CONF network public_network_id "$public_network_id"
diff --git a/stack.sh b/stack.sh
index 86b835a..6952fba 100755
--- a/stack.sh
+++ b/stack.sh
@@ -55,8 +55,7 @@
 # allow you to safely override those settings.
 
 if [[ ! -r $TOP_DIR/stackrc ]]; then
-    echo "ERROR: missing $TOP_DIR/stackrc - did you grab more than just stack.sh?"
-    exit 1
+    log_error $LINENO "missing $TOP_DIR/stackrc - did you grab more than just stack.sh?"
 fi
 source $TOP_DIR/stackrc
 
@@ -93,8 +92,7 @@
 if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|f16|f17|f18|opensuse-12.2) ]]; then
     echo "WARNING: this script has not been tested on $DISTRO"
     if [[ "$FORCE" != "yes" ]]; then
-        echo "If you wish to run this script anyway run with FORCE=yes"
-        exit 1
+        die $LINENO "If you wish to run this script anyway run with FORCE=yes"
     fi
 fi
 
@@ -105,16 +103,14 @@
 # ``stack.sh`` keeps function libraries here
 # Make sure ``$TOP_DIR/lib`` directory is present
 if [ ! -d $TOP_DIR/lib ]; then
-    echo "ERROR: missing devstack/lib"
-    exit 1
+    log_error $LINENO "missing devstack/lib"
 fi
 
 # ``stack.sh`` keeps the list of ``apt`` and ``rpm`` dependencies and config
 # templates and other useful files in the ``files`` subdirectory
 FILES=$TOP_DIR/files
 if [ ! -d $FILES ]; then
-    echo "ERROR: missing devstack/files"
-    exit 1
+    log_error $LINENO "missing devstack/files"
 fi
 
 SCREEN_NAME=${SCREEN_NAME:-stack}
@@ -246,9 +242,7 @@
         fi
     done
     if [ "$HOST_IP" == "" ]; then
-        echo "Could not determine host ip address."
-        echo "Either localrc specified dhcp on ${HOST_IP_IFACE} or defaulted"
-        exit 1
+        die $LINENO "Could not determine host ip address. Either localrc specified dhcp on ${HOST_IP_IFACE} or defaulted"
     fi
 fi