Refactor error logging
It is hard to grep errors in current log. so in this patch,
I'm updating die function which also writes log for
screen_log_dir/error.log.
In future, we may categolize negative fault by using
this error.log.
Change-Id: I70a8cfe67ed408284f5c88c762c6bb8acb8ecdb2
diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh
index a92c0d9..3c83725 100755
--- a/exercises/aggregates.sh
+++ b/exercises/aggregates.sh
@@ -56,7 +56,7 @@
if [ $(nova aggregate-list | grep -c " $aggregate_name ") == 0 ]; then
echo "SUCCESS $aggregate_name not present"
else
- echo "ERROR found aggregate: $aggregate_name"
+ die $LINENO "found aggregate: $aggregate_name"
exit -1
fi
}
@@ -67,15 +67,14 @@
AGGREGATE2_ID=$(nova aggregate-create $AGGREGATE2_NAME $AGGREGATE_A_ZONE | grep " $AGGREGATE2_NAME " | get_field 1)
# check aggregate created
-nova aggregate-list | grep -q " $AGGREGATE_NAME " || die "Aggregate $AGGREGATE_NAME not created"
+nova aggregate-list | grep -q " $AGGREGATE_NAME " || die $LINENO "Aggregate $AGGREGATE_NAME not created"
# Ensure creating a duplicate fails
# =================================
if nova aggregate-create $AGGREGATE_NAME $AGGREGATE_A_ZONE; then
- echo "ERROR could create duplicate aggregate"
- exit -1
+ die $LINENO "could create duplicate aggregate"
fi
@@ -113,7 +112,7 @@
nova aggregate-details $AGGREGATE_ID | grep $META_DATA_1_KEY
nova aggregate-details $AGGREGATE_ID | grep $META_DATA_3_KEY
-nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY && die "ERROR metadata was not cleared"
+nova aggregate-details $AGGREGATE_ID | grep $META_DATA_2_KEY && die $LINENO "ERROR metadata was not cleared"
nova aggregate-set-metadata $AGGREGATE_ID $META_DATA_3_KEY $META_DATA_1_KEY
nova aggregate-details $AGGREGATE_ID | egrep "{u'availability_zone': u'$AGGREGATE_A_ZONE'}|{}"
@@ -129,8 +128,7 @@
nova aggregate-add-host $AGGREGATE_ID $FIRST_HOST
nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST
if nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST; then
- echo "ERROR could add duplicate host to single aggregate"
- exit -1
+ die $LINENO "could add duplicate host to single aggregate"
fi
nova aggregate-remove-host $AGGREGATE2_ID $FIRST_HOST
nova aggregate-remove-host $AGGREGATE_ID $FIRST_HOST
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
index 679091b..14d0049 100755
--- a/exercises/boot_from_volume.sh
+++ b/exercises/boot_from_volume.sh
@@ -72,7 +72,7 @@
# Grab the id of the image to launch
IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
-die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
+die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
# Security Groups
# ---------------
@@ -140,7 +140,7 @@
# Create the bootable volume
start_time=$(date +%s)
cinder create --image-id $IMAGE --display_name=$VOL_NAME --display_description "test bootable volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \
- die "Failure creating volume $VOL_NAME"
+ die $LINENO "Failure creating volume $VOL_NAME"
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
echo "Volume $VOL_NAME not created"
exit 1
@@ -150,7 +150,7 @@
# Get volume ID
VOL_ID=$(cinder list | grep $VOL_NAME | get_field 1)
-die_if_not_set VOL_ID "Failure retrieving volume ID for $VOL_NAME"
+die_if_not_set $LINENO VOL_ID "Failure retrieving volume ID for $VOL_NAME"
# Boot instance
# -------------
@@ -159,7 +159,7 @@
# <dev_name>=<id>:<type>:<size(GB)>:<delete_on_terminate>
# Leaving the middle two fields blank appears to do-the-right-thing
VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --block-device-mapping vda=$VOL_ID --security_groups=$SECGROUP --key_name $KEY_NAME $VM_NAME | grep ' id ' | get_field 2)
-die_if_not_set VM_UUID "Failure launching $VM_NAME"
+die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME"
# Check that the status is active within ACTIVE_TIMEOUT seconds
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
@@ -169,7 +169,7 @@
# Get the instance IP
IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2)
-die_if_not_set IP "Failure retrieving IP address"
+die_if_not_set $LINENO IP "Failure retrieving IP address"
# Private IPs can be pinged in single node deployments
ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT
@@ -178,7 +178,7 @@
# --------
# Delete volume backed instance
-nova delete $VM_UUID || die "Failure deleting instance $VM_NAME"
+nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME"
if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then
echo "Server $VM_NAME not deleted"
exit 1
@@ -192,7 +192,7 @@
# Delete volume
start_time=$(date +%s)
-cinder delete $VOL_ID || die "Failure deleting volume $VOLUME_NAME"
+cinder delete $VOL_ID || die $LINENO "Failure deleting volume $VOLUME_NAME"
if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then
echo "Volume $VOL_NAME not deleted"
exit 1
@@ -201,7 +201,7 @@
echo "Completed cinder delete in $((end_time - start_time)) seconds"
# Delete secgroup
-nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP"
+nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP"
set +o xtrace
echo "*********************************************************************"
diff --git a/exercises/bundle.sh b/exercises/bundle.sh
index 12f2732..dce36aa 100755
--- a/exercises/bundle.sh
+++ b/exercises/bundle.sh
@@ -49,21 +49,20 @@
BUCKET=testbucket
IMAGE=bundle.img
truncate -s 5M /tmp/$IMAGE
-euca-bundle-image -i /tmp/$IMAGE || die "Failure bundling image $IMAGE"
+euca-bundle-image -i /tmp/$IMAGE || die $LINENO "Failure bundling image $IMAGE"
-euca-upload-bundle --debug -b $BUCKET -m /tmp/$IMAGE.manifest.xml || die "Failure uploading bundle $IMAGE to $BUCKET"
+euca-upload-bundle --debug -b $BUCKET -m /tmp/$IMAGE.manifest.xml || die $LINENO "Failure uploading bundle $IMAGE to $BUCKET"
AMI=`euca-register $BUCKET/$IMAGE.manifest.xml | cut -f2`
-die_if_not_set AMI "Failure registering $BUCKET/$IMAGE"
+die_if_not_set $LINENO AMI "Failure registering $BUCKET/$IMAGE"
# Wait for the image to become available
if ! timeout $REGISTER_TIMEOUT sh -c "while euca-describe-images | grep $AMI | grep -q available; do sleep 1; done"; then
- echo "Image $AMI not available within $REGISTER_TIMEOUT seconds"
- exit 1
+ die $LINENO "Image $AMI not available within $REGISTER_TIMEOUT seconds"
fi
# Clean up
-euca-deregister $AMI || die "Failure deregistering $AMI"
+euca-deregister $AMI || die $LINENO "Failure deregistering $AMI"
set +o xtrace
echo "*********************************************************************"
diff --git a/exercises/euca.sh b/exercises/euca.sh
index 8b15da8..50d4744 100755
--- a/exercises/euca.sh
+++ b/exercises/euca.sh
@@ -56,68 +56,62 @@
# Find a machine image to boot
IMAGE=`euca-describe-images | grep machine | grep ${DEFAULT_IMAGE_NAME} | cut -f2 | head -n1`
-die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
+die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
# Add a secgroup
if ! euca-describe-groups | grep -q $SECGROUP; then
euca-add-group -d "$SECGROUP description" $SECGROUP
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! euca-describe-groups | grep -q $SECGROUP; do sleep 1; done"; then
- echo "Security group not created"
- exit 1
+ die $LINENO "Security group not created"
fi
fi
# Launch it
INSTANCE=`euca-run-instances -g $SECGROUP -t $DEFAULT_INSTANCE_TYPE $IMAGE | grep INSTANCE | cut -f2`
-die_if_not_set INSTANCE "Failure launching instance"
+die_if_not_set $LINENO INSTANCE "Failure launching instance"
# Assure it has booted within a reasonable time
if ! timeout $RUNNING_TIMEOUT sh -c "while ! euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then
- echo "server didn't become active within $RUNNING_TIMEOUT seconds"
- exit 1
+ die $LINENO "server didn't become active within $RUNNING_TIMEOUT seconds"
fi
# Volumes
# -------
if [[ "$ENABLED_SERVICES" =~ "c-vol" ]]; then
VOLUME_ZONE=`euca-describe-availability-zones | head -n1 | cut -f2`
- die_if_not_set VOLUME_ZONE "Failure to find zone for volume"
+ die_if_not_set $LINENO VOLUME_ZONE "Failure to find zone for volume"
VOLUME=`euca-create-volume -s 1 -z $VOLUME_ZONE | cut -f2`
- die_if_not_set VOLUME "Failure to create volume"
+ die_if_not_set $LINENO VOLUME "Failure to create volume"
# Test that volume has been created
VOLUME=`euca-describe-volumes | cut -f2`
- die_if_not_set VOLUME "Failure to get volume"
+ die_if_not_set $LINENO VOLUME "Failure to get volume"
# Test volume has become available
if ! timeout $RUNNING_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then
- echo "volume didnt become available within $RUNNING_TIMEOUT seconds"
- exit 1
+ die $LINENO "volume didnt become available within $RUNNING_TIMEOUT seconds"
fi
# Attach volume to an instance
euca-attach-volume -i $INSTANCE -d $ATTACH_DEVICE $VOLUME || \
- die "Failure attaching volume $VOLUME to $INSTANCE"
+ die $LINENO "Failure attaching volume $VOLUME to $INSTANCE"
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q in-use; do sleep 1; done"; then
- echo "Could not attach $VOLUME to $INSTANCE"
- exit 1
+ die $LINENO "Could not attach $VOLUME to $INSTANCE"
fi
# Detach volume from an instance
euca-detach-volume $VOLUME || \
- die "Failure detaching volume $VOLUME to $INSTANCE"
+ die $LINENO "Failure detaching volume $VOLUME to $INSTANCE"
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! euca-describe-volumes $VOLUME | grep -q available; do sleep 1; done"; then
- echo "Could not detach $VOLUME to $INSTANCE"
- exit 1
+ die $LINENO "Could not detach $VOLUME to $INSTANCE"
fi
# Remove volume
euca-delete-volume $VOLUME || \
- die "Failure to delete volume"
+ die $LINENO "Failure to delete volume"
if ! timeout $ACTIVE_TIMEOUT sh -c "while euca-describe-volumes | grep $VOLUME; do sleep 1; done"; then
- echo "Could not delete $VOLUME"
- exit 1
+ die $LINENO "Could not delete $VOLUME"
fi
else
echo "Volume Tests Skipped"
@@ -125,58 +119,55 @@
# Allocate floating address
FLOATING_IP=`euca-allocate-address | cut -f2`
-die_if_not_set FLOATING_IP "Failure allocating floating IP"
+die_if_not_set $LINENO FLOATING_IP "Failure allocating floating IP"
# Associate floating address
euca-associate-address -i $INSTANCE $FLOATING_IP || \
- die "Failure associating address $FLOATING_IP to $INSTANCE"
+ die $LINENO "Failure associating address $FLOATING_IP to $INSTANCE"
# Authorize pinging
euca-authorize -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \
- die "Failure authorizing rule in $SECGROUP"
+ die $LINENO "Failure authorizing rule in $SECGROUP"
# Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds
ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT
# Revoke pinging
euca-revoke -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \
- die "Failure revoking rule in $SECGROUP"
+ die $LINENO "Failure revoking rule in $SECGROUP"
# Release floating address
euca-disassociate-address $FLOATING_IP || \
- die "Failure disassociating address $FLOATING_IP"
+ die $LINENO "Failure disassociating address $FLOATING_IP"
# Wait just a tick for everything above to complete so release doesn't fail
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep $INSTANCE | grep -q $FLOATING_IP; do sleep 1; done"; then
- echo "Floating ip $FLOATING_IP not disassociated within $ASSOCIATE_TIMEOUT seconds"
- exit 1
+ die $LINENO "Floating ip $FLOATING_IP not disassociated within $ASSOCIATE_TIMEOUT seconds"
fi
# Release floating address
euca-release-address $FLOATING_IP || \
- die "Failure releasing address $FLOATING_IP"
+ die $LINENO "Failure releasing address $FLOATING_IP"
# Wait just a tick for everything above to complete so terminate doesn't fail
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep -q $FLOATING_IP; do sleep 1; done"; then
- echo "Floating ip $FLOATING_IP not released within $ASSOCIATE_TIMEOUT seconds"
- exit 1
+ die $LINENO "Floating ip $FLOATING_IP not released within $ASSOCIATE_TIMEOUT seconds"
fi
# Terminate instance
euca-terminate-instances $INSTANCE || \
- die "Failure terminating instance $INSTANCE"
+ die $LINENO "Failure terminating instance $INSTANCE"
# Assure it has terminated within a reasonable time. The behaviour of this
# case changed with bug/836978. Requesting the status of an invalid instance
# will now return an error message including the instance id, so we need to
# filter that out.
if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -ve \"\\\(InstanceNotFound\\\|InvalidInstanceID\[.\]NotFound\\\)\" | grep -q $INSTANCE; do sleep 1; done"; then
- echo "server didn't terminate within $TERMINATE_TIMEOUT seconds"
- exit 1
+ die $LINENO "server didn't terminate within $TERMINATE_TIMEOUT seconds"
fi
# Delete secgroup
-euca-delete-group $SECGROUP || die "Failure deleting security group $SECGROUP"
+euca-delete-group $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP"
set +o xtrace
echo "*********************************************************************"
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index 34ab69d..b4e1c42 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -71,7 +71,7 @@
# Grab the id of the image to launch
IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
-die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
+die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
# Security Groups
# ---------------
@@ -83,8 +83,7 @@
if ! nova secgroup-list | grep -q $SECGROUP; then
nova secgroup-create $SECGROUP "$SECGROUP description"
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then
- echo "Security group not created"
- exit 1
+ die $LINENO "Security group not created"
fi
fi
@@ -115,7 +114,7 @@
# Clean-up from previous runs
nova delete $VM_NAME || true
if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then
- echo "server didn't terminate!"
+ die $LINENO "server didn't terminate!"
exit 1
fi
@@ -123,17 +122,16 @@
# -------------
VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2)
-die_if_not_set VM_UUID "Failure launching $VM_NAME"
+die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME"
# Check that the status is active within ACTIVE_TIMEOUT seconds
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
- echo "server didn't become active!"
- exit 1
+ die $LINENO "server didn't become active!"
fi
# Get the instance IP
IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2)
-die_if_not_set IP "Failure retrieving IP address"
+die_if_not_set $LINENO IP "Failure retrieving IP address"
# Private IPs can be pinged in single node deployments
ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT
@@ -143,17 +141,16 @@
# Allocate a floating IP from the default pool
FLOATING_IP=$(nova floating-ip-create | grep $DEFAULT_FLOATING_POOL | get_field 1)
-die_if_not_set FLOATING_IP "Failure creating floating IP from pool $DEFAULT_FLOATING_POOL"
+die_if_not_set $LINENO FLOATING_IP "Failure creating floating IP from pool $DEFAULT_FLOATING_POOL"
# List floating addresses
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep -q $FLOATING_IP; do sleep 1; done"; then
- echo "Floating IP not allocated"
- exit 1
+ die $LINENO "Floating IP not allocated"
fi
# Add floating IP to our server
nova add-floating-ip $VM_UUID $FLOATING_IP || \
- die "Failure adding floating IP $FLOATING_IP to $VM_NAME"
+ die $LINENO "Failure adding floating IP $FLOATING_IP to $VM_NAME"
# Test we can ping our floating IP within ASSOCIATE_TIMEOUT seconds
ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT
@@ -161,18 +158,17 @@
if ! is_service_enabled quantum; then
# Allocate an IP from second floating pool
TEST_FLOATING_IP=$(nova floating-ip-create $TEST_FLOATING_POOL | grep $TEST_FLOATING_POOL | get_field 1)
- die_if_not_set TEST_FLOATING_IP "Failure creating floating IP in $TEST_FLOATING_POOL"
+ die_if_not_set $LINENO TEST_FLOATING_IP "Failure creating floating IP in $TEST_FLOATING_POOL"
# list floating addresses
if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova floating-ip-list | grep $TEST_FLOATING_POOL | grep -q $TEST_FLOATING_IP; do sleep 1; done"; then
- echo "Floating IP not allocated"
- exit 1
+ die $LINENO "Floating IP not allocated"
fi
fi
# Dis-allow icmp traffic (ping)
nova secgroup-delete-rule $SECGROUP icmp -1 -1 0.0.0.0/0 || \
- die "Failure deleting security group rule from $SECGROUP"
+ die $LINENO "Failure deleting security group rule from $SECGROUP"
# FIXME (anthony): make xs support security groups
if [ "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then
@@ -186,24 +182,23 @@
if ! is_service_enabled quantum; then
# Delete second floating IP
nova floating-ip-delete $TEST_FLOATING_IP || \
- die "Failure deleting floating IP $TEST_FLOATING_IP"
+ die $LINENO "Failure deleting floating IP $TEST_FLOATING_IP"
fi
# Delete the floating ip
nova floating-ip-delete $FLOATING_IP || \
- die "Failure deleting floating IP $FLOATING_IP"
+ die $LINENO "Failure deleting floating IP $FLOATING_IP"
# Delete instance
-nova delete $VM_UUID || die "Failure deleting instance $VM_NAME"
+nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME"
# Wait for termination
if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then
- echo "Server $VM_NAME not deleted"
- exit 1
+ die $LINENO "Server $VM_NAME not deleted"
fi
# Delete secgroup
nova secgroup-delete $SECGROUP || \
- die "Failure deleting security group $SECGROUP"
+ die $LINENO "Failure deleting security group $SECGROUP"
set +o xtrace
echo "*********************************************************************"
diff --git a/exercises/horizon.sh b/exercises/horizon.sh
index c5dae3a..5d778c9 100755
--- a/exercises/horizon.sh
+++ b/exercises/horizon.sh
@@ -36,7 +36,7 @@
is_service_enabled horizon || exit 55
# can we get the front page
-curl http://$SERVICE_HOST 2>/dev/null | grep -q '<h3>Log In</h3>' || die "Horizon front page not functioning!"
+curl http://$SERVICE_HOST 2>/dev/null | grep -q '<h3>Log In</h3>' || die $LINENO "Horizon front page not functioning!"
set +o xtrace
echo "*********************************************************************"
diff --git a/exercises/quantum-adv-test.sh b/exercises/quantum-adv-test.sh
index bc33fe8..5c4b16e 100755
--- a/exercises/quantum-adv-test.sh
+++ b/exercises/quantum-adv-test.sh
@@ -264,7 +264,7 @@
--image $(get_image_id) \
$NIC \
$TENANT-server$NUM | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'`
- die_if_not_set VM_UUID "Failure launching $TENANT-server$NUM" VM_UUID
+ die_if_not_set $LINENO VM_UUID "Failure launching $TENANT-server$NUM"
confirm_server_active $VM_UUID
}
@@ -309,8 +309,7 @@
function shutdown_vms {
foreach_tenant_vm 'shutdown_vm ${%TENANT%_NAME} %NUM%'
if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q ACTIVE; do sleep 1; done"; then
- echo "Some VMs failed to shutdown"
- false
+ die $LINENO "Some VMs failed to shutdown"
fi
}
diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh
index a33c9c6..b73afdf 100755
--- a/exercises/sec_groups.sh
+++ b/exercises/sec_groups.sh
@@ -68,7 +68,7 @@
# Delete secgroup
nova secgroup-delete $SEC_GROUP_NAME || \
- die "Failure deleting security group $SEC_GROUP_NAME"
+ die $LINENO "Failure deleting security group $SEC_GROUP_NAME"
set +o xtrace
echo "*********************************************************************"
diff --git a/exercises/swift.sh b/exercises/swift.sh
index a75f955..46ac2c5 100755
--- a/exercises/swift.sh
+++ b/exercises/swift.sh
@@ -45,20 +45,20 @@
# =============
# Check if we have to swift via keystone
-swift stat || die "Failure geting status"
+swift stat || die $LINENO "Failure geting status"
# We start by creating a test container
-swift post $CONTAINER || die "Failure creating container $CONTAINER"
+swift post $CONTAINER || die $LINENO "Failure creating container $CONTAINER"
# add some files into it.
-swift upload $CONTAINER /etc/issue || die "Failure uploading file to container $CONTAINER"
+swift upload $CONTAINER /etc/issue || die $LINENO "Failure uploading file to container $CONTAINER"
# list them
-swift list $CONTAINER || die "Failure listing contents of container $CONTAINER"
+swift list $CONTAINER || die $LINENO "Failure listing contents of container $CONTAINER"
# And we may want to delete them now that we have tested that
# everything works.
-swift delete $CONTAINER || die "Failure deleting container $CONTAINER"
+swift delete $CONTAINER || die $LINENO "Failure deleting container $CONTAINER"
set +o xtrace
echo "*********************************************************************"
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index 45cb0c8..7913641 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -70,7 +70,7 @@
# Grab the id of the image to launch
IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
-die_if_not_set IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
+die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
# Security Groups
# ---------------
@@ -114,25 +114,23 @@
# Clean-up from previous runs
nova delete $VM_NAME || true
if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then
- echo "server didn't terminate!"
- exit 1
+ die $LINENO "server didn't terminate!"
fi
# Boot instance
# -------------
VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE --security_groups=$SECGROUP $VM_NAME | grep ' id ' | get_field 2)
-die_if_not_set VM_UUID "Failure launching $VM_NAME"
+die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME"
# Check that the status is active within ACTIVE_TIMEOUT seconds
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
- echo "server didn't become active!"
- exit 1
+ die $LINENO "server didn't become active!"
fi
# Get the instance IP
IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2)
-die_if_not_set IP "Failure retrieving IP address"
+die_if_not_set $LINENO IP "Failure retrieving IP address"
# Private IPs can be pinged in single node deployments
ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT
@@ -142,42 +140,38 @@
# Verify it doesn't exist
if [[ -n $(cinder list | grep $VOL_NAME | head -1 | get_field 2) ]]; then
- echo "Volume $VOL_NAME already exists"
- exit 1
+ die $LINENO "Volume $VOL_NAME already exists"
fi
# Create a new volume
start_time=$(date +%s)
cinder create --display_name $VOL_NAME --display_description "test volume: $VOL_NAME" $DEFAULT_VOLUME_SIZE || \
- die "Failure creating volume $VOL_NAME"
+ die $LINENO "Failure creating volume $VOL_NAME"
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
- echo "Volume $VOL_NAME not created"
- exit 1
+ die $LINENO "Volume $VOL_NAME not created"
fi
end_time=$(date +%s)
echo "Completed cinder create in $((end_time - start_time)) seconds"
# Get volume ID
VOL_ID=$(cinder list | grep $VOL_NAME | head -1 | get_field 1)
-die_if_not_set VOL_ID "Failure retrieving volume ID for $VOL_NAME"
+die_if_not_set $LINENO VOL_ID "Failure retrieving volume ID for $VOL_NAME"
# Attach to server
DEVICE=/dev/vdb
start_time=$(date +%s)
nova volume-attach $VM_UUID $VOL_ID $DEVICE || \
- die "Failure attaching volume $VOL_NAME to $VM_NAME"
+ die $LINENO "Failure attaching volume $VOL_NAME to $VM_NAME"
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep in-use; do sleep 1; done"; then
- echo "Volume $VOL_NAME not attached to $VM_NAME"
- exit 1
+ die $LINENO "Volume $VOL_NAME not attached to $VM_NAME"
fi
end_time=$(date +%s)
echo "Completed volume-attach in $((end_time - start_time)) seconds"
VOL_ATTACH=$(cinder list | grep $VOL_NAME | head -1 | get_field -1)
-die_if_not_set VOL_ATTACH "Failure retrieving $VOL_NAME status"
+die_if_not_set $LINENO VOL_ATTACH "Failure retrieving $VOL_NAME status"
if [[ "$VOL_ATTACH" != $VM_UUID ]]; then
- echo "Volume not attached to correct instance"
- exit 1
+ die $LINENO "Volume not attached to correct instance"
fi
# Clean up
@@ -185,33 +179,30 @@
# Detach volume
start_time=$(date +%s)
-nova volume-detach $VM_UUID $VOL_ID || die "Failure detaching volume $VOL_NAME from $VM_NAME"
+nova volume-detach $VM_UUID $VOL_ID || die $LINENO "Failure detaching volume $VOL_NAME from $VM_NAME"
if ! timeout $ACTIVE_TIMEOUT sh -c "while ! cinder list | grep $VOL_NAME | grep available; do sleep 1; done"; then
- echo "Volume $VOL_NAME not detached from $VM_NAME"
- exit 1
+ die $LINENO "Volume $VOL_NAME not detached from $VM_NAME"
fi
end_time=$(date +%s)
echo "Completed volume-detach in $((end_time - start_time)) seconds"
# Delete volume
start_time=$(date +%s)
-cinder delete $VOL_ID || die "Failure deleting volume $VOL_NAME"
+cinder delete $VOL_ID || die $LINENO "Failure deleting volume $VOL_NAME"
if ! timeout $ACTIVE_TIMEOUT sh -c "while cinder list | grep $VOL_NAME; do sleep 1; done"; then
- echo "Volume $VOL_NAME not deleted"
- exit 1
+ die $LINENO "Volume $VOL_NAME not deleted"
fi
end_time=$(date +%s)
echo "Completed cinder delete in $((end_time - start_time)) seconds"
# Delete instance
-nova delete $VM_UUID || die "Failure deleting instance $VM_NAME"
+nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME"
if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then
- echo "Server $VM_NAME not deleted"
- exit 1
+ die $LINENO "Server $VM_NAME not deleted"
fi
# Delete secgroup
-nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP"
+nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP"
set +o xtrace
echo "*********************************************************************"