Merge "Updates for tools/info.sh"
diff --git a/.gitignore b/.gitignore
index c49b4a3..1840352 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,8 +7,10 @@
 localrc
 local.sh
 files/*.gz
+files/*.qcow2
 files/images
 files/pip-*
+files/get-pip.py
 stack-screenrc
 *.pem
 accrc
@@ -17,3 +19,5 @@
 devstack-docs-*
 docs/
 docs-files
+.localrc.auto
+local.conf
diff --git a/driver_certs/cinder_driver_cert.sh b/driver_certs/cinder_driver_cert.sh
index 18bef8b..edcc6d4 100755
--- a/driver_certs/cinder_driver_cert.sh
+++ b/driver_certs/cinder_driver_cert.sh
@@ -2,6 +2,22 @@
 
 # **cinder_cert.sh**
 
+# This script is a simple wrapper around the tempest volume api tests
+# It requires that you have a working and functional devstack install
+# and that you've enabled your device driver by making the necessary
+# modifications to /etc/cinder/cinder.conf
+
+# This script will refresh your openstack repo's and restart the cinder
+# services to pick up your driver changes.
+# please NOTE; this script assumes your devstack install is functional
+# and includes tempest. A good first step is to make sure you can
+# create volumes on your device before you even try and run this script.
+
+# It also assumes default install location (/opt/stack/xxx)
+# to aid in debug, you should also verify that you've added
+# an output directory for screen logs:
+#     SCREEN_LOGDIR=/opt/stack/screen-logs
+
 CERT_DIR=$(cd $(dirname "$0") && pwd)
 TOP_DIR=$(cd $CERT_DIR/..; pwd)
 
@@ -73,9 +89,9 @@
 sleep 5
 
 # run tempest api/volume/test_*
-log_message "Run the actual tempest volume tests (run_tests.sh -N tempest.api.volume.test_*)...", True
+log_message "Run the actual tempest volume tests (./tools/pretty_tox.sh api.volume_*)...", True
 exec 2> >(tee -a $TEMPFILE)
-`./run_tests.sh -N tempest.api.volume.test_*`
+`./tools/pretty_tox.sh api.volume`
 if [[ $? = 0 ]]; then
     log_message "CONGRATULATIONS!!!  Device driver PASSED!", True
     log_message "Submit output: ($TEMPFILE)"
diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh
index 1b1ac06..d223301 100755
--- a/exercises/aggregates.sh
+++ b/exercises/aggregates.sh
@@ -43,6 +43,10 @@
 # Test as the admin user
 . $TOP_DIR/openrc admin admin
 
+# If nova api is not enabled we exit with exitcode 55 so that
+# the exercise is skipped
+is_service_enabled n-api || exit 55
+
 # Cells does not support aggregates.
 is_service_enabled n-cell && exit 55
 
diff --git a/exercises/bundle.sh b/exercises/bundle.sh
index b83678a..5470960 100755
--- a/exercises/bundle.sh
+++ b/exercises/bundle.sh
@@ -39,6 +39,10 @@
 rm -f $TOP_DIR/cert.pem
 rm -f $TOP_DIR/pk.pem
 
+# If nova api is not enabled we exit with exitcode 55 so that
+# the exercise is skipped
+is_service_enabled n-api || exit 55
+
 # Get Certificates
 nova x509-get-root-cert $TOP_DIR/cacert.pem
 nova x509-create-cert $TOP_DIR/pk.pem $TOP_DIR/cert.pem
diff --git a/exercises/euca.sh b/exercises/euca.sh
index ed521e4..51b2644 100755
--- a/exercises/euca.sh
+++ b/exercises/euca.sh
@@ -41,6 +41,10 @@
 # Import exercise configuration
 source $TOP_DIR/exerciserc
 
+# If nova api is not enabled we exit with exitcode 55 so that
+# the exercise is skipped
+is_service_enabled n-api || exit 55
+
 # Skip if the hypervisor is Docker
 [[ "$VIRT_DRIVER" == "docker" ]] && exit 55
 
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index 7055278..4ca90a5 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -38,6 +38,10 @@
 # Import exercise configuration
 source $TOP_DIR/exerciserc
 
+# If nova api is not enabled we exit with exitcode 55 so that
+# the exercise is skipped
+is_service_enabled n-api || exit 55
+
 # Skip if the hypervisor is Docker
 [[ "$VIRT_DRIVER" == "docker" ]] && exit 55
 
diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh
index 0c0d42f..1343f11 100755
--- a/exercises/neutron-adv-test.sh
+++ b/exercises/neutron-adv-test.sh
@@ -185,6 +185,14 @@
     fi
 }
 
+function neutron_debug_admin {
+    local os_username=$OS_USERNAME
+    local os_tenant_id=$OS_TENANT_ID
+    source $TOP_DIR/openrc admin admin
+    neutron-debug $@
+    source $TOP_DIR/openrc $os_username $os_tenant_id
+}
+
 function add_tenant {
     local TENANT=$1
     local USER=$2
@@ -241,7 +249,7 @@
     local NET_ID=$(neutron net-create --tenant-id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' )
     die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $TENANT_ID $NET_NAME $EXTRA"
     neutron subnet-create --ip-version 4 --tenant-id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR
-    neutron-debug probe-create --device-owner compute $NET_ID
+    neutron_debug_admin probe-create --device-owner compute $NET_ID
     source $TOP_DIR/openrc demo demo
 }
 
diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh
index eb32cc7..d71a1e0 100755
--- a/exercises/sec_groups.sh
+++ b/exercises/sec_groups.sh
@@ -33,6 +33,10 @@
 # Import exercise configuration
 source $TOP_DIR/exerciserc
 
+# If nova api is not enabled we exit with exitcode 55 so that
+# the exercise is skipped
+is_service_enabled n-api || exit 55
+
 # Skip if the hypervisor is Docker
 [[ "$VIRT_DRIVER" == "docker" ]] && exit 55
 
diff --git a/files/default_catalog.templates b/files/default_catalog.templates
index 277904a..e64f68f 100644
--- a/files/default_catalog.templates
+++ b/files/default_catalog.templates
@@ -47,7 +47,17 @@
 catalog.RegionOne.image.internalURL = http://%SERVICE_HOST%:9292
 catalog.RegionOne.image.name = Image Service
 
-catalog.RegionOne.orchestration.publicURL = http://%SERVICE_HOST%:8000/v1
-catalog.RegionOne.orchestration.adminURL = http://%SERVICE_HOST%:8000/v1
-catalog.RegionOne.orchestration.internalURL = http://%SERVICE_HOST%:8000/v1
+catalog.RegionOne.cloudformation.publicURL = http://%SERVICE_HOST%:8000/v1
+catalog.RegionOne.cloudformation.adminURL = http://%SERVICE_HOST%:8000/v1
+catalog.RegionOne.cloudformation.internalURL = http://%SERVICE_HOST%:8000/v1
+catalog.RegionOne.cloudformation.name = Heat CloudFormation Service
+
+catalog.RegionOne.orchestration.publicURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s
+catalog.RegionOne.orchestration.adminURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s
+catalog.RegionOne.orchestration.internalURL = http://%SERVICE_HOST%:8004/v1/$(tenant_id)s
 catalog.RegionOne.orchestration.name = Heat Service
+
+catalog.RegionOne.metering.publicURL = http://%SERVICE_HOST%:8777/v1
+catalog.RegionOne.metering.adminURL = http://%SERVICE_HOST%:8777/v1
+catalog.RegionOne.metering.internalURL = http://%SERVICE_HOST%:8777/v1
+catalog.RegionOne.metering.name = Telemetry Service
diff --git a/files/keystone_data.sh b/files/keystone_data.sh
index ea2d52d..d477c42 100755
--- a/files/keystone_data.sh
+++ b/files/keystone_data.sh
@@ -6,7 +6,6 @@
 # ------------------------------------------------------------------
 # service              glance     admin
 # service              heat       service        # if enabled
-# service              ceilometer admin          # if enabled
 # Tempest Only:
 # alt_demo             alt_demo  Member
 #
@@ -28,16 +27,6 @@
 export SERVICE_ENDPOINT=$SERVICE_ENDPOINT
 SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service}
 
-function get_id () {
-    echo `"$@" | awk '/ id / { print $4 }'`
-}
-
-# Lookups
-SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
-ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
-MEMBER_ROLE=$(keystone role-list | awk "/ Member / { print \$2 }")
-
-
 # Roles
 # -----
 
@@ -45,53 +34,52 @@
 # The admin role in swift allows a user to act as an admin for their tenant,
 # but ResellerAdmin is needed for a user to act as any tenant. The name of this
 # role is also configurable in swift-proxy.conf
-RESELLER_ROLE=$(get_id keystone role-create --name=ResellerAdmin)
+keystone role-create --name=ResellerAdmin
 # Service role, so service users do not have to be admins
-SERVICE_ROLE=$(get_id keystone role-create --name=service)
+keystone role-create --name=service
 
 
 # Services
 # --------
 
 if [[ "$ENABLED_SERVICES" =~ "n-api" ]] && [[ "$ENABLED_SERVICES" =~ "s-proxy" || "$ENABLED_SERVICES" =~ "swift" ]]; then
-    NOVA_USER=$(keystone user-list | awk "/ nova / { print \$2 }")
     # Nova needs ResellerAdmin role to download images when accessing
     # swift through the s3 api.
     keystone user-role-add \
-        --tenant-id $SERVICE_TENANT \
-        --user-id $NOVA_USER \
-        --role-id $RESELLER_ROLE
+        --tenant $SERVICE_TENANT_NAME \
+        --user nova \
+        --role ResellerAdmin
 fi
 
 # Heat
 if [[ "$ENABLED_SERVICES" =~ "heat" ]]; then
-    HEAT_USER=$(get_id keystone user-create --name=heat \
+    keystone user-create --name=heat \
         --pass="$SERVICE_PASSWORD" \
-        --tenant_id $SERVICE_TENANT \
-        --email=heat@example.com)
-    keystone user-role-add --tenant-id $SERVICE_TENANT \
-        --user-id $HEAT_USER \
-        --role-id $SERVICE_ROLE
+        --tenant $SERVICE_TENANT_NAME \
+        --email=heat@example.com
+    keystone user-role-add --tenant $SERVICE_TENANT_NAME \
+        --user heat \
+        --role service
     # heat_stack_user role is for users created by Heat
     keystone role-create --name heat_stack_user
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-        HEAT_CFN_SERVICE=$(get_id keystone service-create \
+        keystone service-create \
             --name=heat-cfn \
             --type=cloudformation \
-            --description="Heat CloudFormation Service")
+            --description="Heat CloudFormation Service"
         keystone endpoint-create \
             --region RegionOne \
-            --service_id $HEAT_CFN_SERVICE \
+            --service heat-cfn \
             --publicurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" \
             --adminurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1" \
             --internalurl "http://$SERVICE_HOST:$HEAT_API_CFN_PORT/v1"
-        HEAT_SERVICE=$(get_id keystone service-create \
+        keystone service-create \
             --name=heat \
             --type=orchestration \
-            --description="Heat Service")
+            --description="Heat Service"
         keystone endpoint-create \
             --region RegionOne \
-            --service_id $HEAT_SERVICE \
+            --service heat \
             --publicurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
             --adminurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \
             --internalurl "http://$SERVICE_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s"
@@ -100,23 +88,23 @@
 
 # Glance
 if [[ "$ENABLED_SERVICES" =~ "g-api" ]]; then
-    GLANCE_USER=$(get_id keystone user-create \
+    keystone user-create \
         --name=glance \
         --pass="$SERVICE_PASSWORD" \
-        --tenant_id $SERVICE_TENANT \
-        --email=glance@example.com)
+        --tenant $SERVICE_TENANT_NAME \
+        --email=glance@example.com
     keystone user-role-add \
-        --tenant-id $SERVICE_TENANT \
-        --user-id $GLANCE_USER \
-        --role-id $ADMIN_ROLE
+        --tenant $SERVICE_TENANT_NAME \
+        --user glance \
+        --role admin
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-        GLANCE_SERVICE=$(get_id keystone service-create \
+        keystone service-create \
             --name=glance \
             --type=image \
-            --description="Glance Image Service")
+            --description="Glance Image Service"
         keystone endpoint-create \
             --region RegionOne \
-            --service_id $GLANCE_SERVICE \
+            --service glance \
             --publicurl "http://$SERVICE_HOST:9292" \
             --adminurl "http://$SERVICE_HOST:9292" \
             --internalurl "http://$SERVICE_HOST:9292"
@@ -124,42 +112,23 @@
 fi
 
 # Ceilometer
-if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]]; then
-    CEILOMETER_USER=$(get_id keystone user-create --name=ceilometer \
-        --pass="$SERVICE_PASSWORD" \
-        --tenant_id $SERVICE_TENANT \
-        --email=ceilometer@example.com)
-    keystone user-role-add --tenant-id $SERVICE_TENANT \
-        --user-id $CEILOMETER_USER \
-        --role-id $ADMIN_ROLE
+if [[ "$ENABLED_SERVICES" =~ "ceilometer" ]] && [[ "$ENABLED_SERVICES" =~ "s-proxy" || "$ENABLED_SERVICES" =~ "swift" ]]; then
     # Ceilometer needs ResellerAdmin role to access swift account stats.
-    keystone user-role-add --tenant-id $SERVICE_TENANT \
-        --user-id $CEILOMETER_USER \
-        --role-id $RESELLER_ROLE
-    if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-        CEILOMETER_SERVICE=$(get_id keystone service-create \
-            --name=ceilometer \
-            --type=metering \
-            --description="Ceilometer Service")
-        keystone endpoint-create \
-            --region RegionOne \
-            --service_id $CEILOMETER_SERVICE \
-            --publicurl "http://$SERVICE_HOST:8777" \
-            --adminurl "http://$SERVICE_HOST:8777" \
-            --internalurl "http://$SERVICE_HOST:8777"
-    fi
+    keystone user-role-add --tenant $SERVICE_TENANT_NAME \
+        --user ceilometer \
+        --role ResellerAdmin
 fi
 
 # EC2
 if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-        EC2_SERVICE=$(get_id keystone service-create \
+        keystone service-create \
             --name=ec2 \
             --type=ec2 \
-            --description="EC2 Compatibility Layer")
+            --description="EC2 Compatibility Layer"
         keystone endpoint-create \
             --region RegionOne \
-            --service_id $EC2_SERVICE \
+            --service ec2 \
             --publicurl "http://$SERVICE_HOST:8773/services/Cloud" \
             --adminurl "http://$SERVICE_HOST:8773/services/Admin" \
             --internalurl "http://$SERVICE_HOST:8773/services/Cloud"
@@ -169,13 +138,13 @@
 # S3
 if [[ "$ENABLED_SERVICES" =~ "n-obj" || "$ENABLED_SERVICES" =~ "swift3" ]]; then
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-        S3_SERVICE=$(get_id keystone service-create \
+        keystone service-create \
             --name=s3 \
             --type=s3 \
-            --description="S3")
+            --description="S3"
         keystone endpoint-create \
             --region RegionOne \
-            --service_id $S3_SERVICE \
+            --service s3 \
             --publicurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \
             --adminurl "http://$SERVICE_HOST:$S3_SERVICE_PORT" \
             --internalurl "http://$SERVICE_HOST:$S3_SERVICE_PORT"
@@ -185,14 +154,14 @@
 if [[ "$ENABLED_SERVICES" =~ "tempest" ]]; then
     # Tempest has some tests that validate various authorization checks
     # between two regular users in separate tenants
-    ALT_DEMO_TENANT=$(get_id keystone tenant-create \
-        --name=alt_demo)
-    ALT_DEMO_USER=$(get_id keystone user-create \
+    keystone tenant-create \
+        --name=alt_demo
+    keystone user-create \
         --name=alt_demo \
         --pass="$ADMIN_PASSWORD" \
-        --email=alt_demo@example.com)
+        --email=alt_demo@example.com
     keystone user-role-add \
-        --tenant-id $ALT_DEMO_TENANT \
-        --user-id $ALT_DEMO_USER \
-        --role-id $MEMBER_ROLE
+        --tenant alt_demo \
+        --user alt_demo \
+        --role Member
 fi
diff --git a/files/rpms/ceilometer-collector b/files/rpms/ceilometer-collector
index d7b7ea8..c91bac3 100644
--- a/files/rpms/ceilometer-collector
+++ b/files/rpms/ceilometer-collector
@@ -1,3 +1,4 @@
 selinux-policy-targeted
 mongodb-server
 pymongo
+mongodb # NOPRIME
diff --git a/functions b/functions
index 92b61ed..73d65ce 100644
--- a/functions
+++ b/functions
@@ -1150,6 +1150,9 @@
 
 
 # Stop a service in screen
+# If a PID is available use it, kill the whole process group via TERM
+# If screen is being used kill the screen window; this will catch processes
+# that did not leave a PID behind
 # screen_stop service
 function screen_stop() {
     SCREEN_NAME=${SCREEN_NAME:-stack}
@@ -1159,7 +1162,7 @@
     if is_service_enabled $1; then
         # Kill via pid if we have one available
         if [[ -r $SERVICE_DIR/$SCREEN_NAME/$1.pid ]]; then
-            pkill -TERM -P $(cat $SERVICE_DIR/$SCREEN_NAME/$1.pid)
+            pkill -TERM -P -$(cat $SERVICE_DIR/$SCREEN_NAME/$1.pid)
             rm $SERVICE_DIR/$SCREEN_NAME/$1.pid
         fi
         if [[ "$USE_SCREEN" = "True" ]]; then
@@ -1301,7 +1304,8 @@
     echo "cd $REQUIREMENTS_DIR; $SUDO_CMD python update.py $project_dir"
 
     # Don't update repo if local changes exist
-    (cd $project_dir && git diff --quiet)
+    # Don't use buggy "git diff --quiet"
+    (cd $project_dir && git diff --exit-code >/dev/null)
     local update_requirements=$?
 
     if [ $update_requirements -eq 0 ]; then
@@ -1539,7 +1543,7 @@
         # NOTE: For backwards compatibility reasons, colons may be used in place
         # of semi-colons for property delimiters but they are not permitted
         # characters in NTFS filesystems.
-        property_string=`echo "$IMAGE_NAME" | grep -oP '(?<=-)(?!.*-).+[:;].+[:;].+$'`
+        property_string=`echo "$IMAGE_NAME" | grep -oP '(?<=-)(?!.*-).*[:;].*[:;].*$'`
         IFS=':;' read -a props <<< "$property_string"
         vmdk_disktype="${props[0]:-$vmdk_disktype}"
         vmdk_adapter_type="${props[1]:-$vmdk_adapter_type}"
diff --git a/lib/ceilometer b/lib/ceilometer
index 211303f..6f3896f 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -48,8 +48,50 @@
 # Set up database backend
 CEILOMETER_BACKEND=${CEILOMETER_BACKEND:-mysql}
 
+# Ceilometer connection info.
+CEILOMETER_SERVICE_PROTOCOL=http
+CEILOMETER_SERVICE_HOST=$SERVICE_HOST
+CEILOMETER_SERVICE_PORT=${CEILOMETER_SERVICE_PORT:-8777}
+#
+
 # Functions
 # ---------
+#
+# create_ceilometer_accounts() - Set up common required ceilometer accounts
+
+create_ceilometer_accounts() {
+
+    SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
+
+    # Ceilometer
+    if [[ "$ENABLED_SERVICES" =~ "ceilometer-api" ]]; then
+        CEILOMETER_USER=$(keystone user-create \
+            --name=ceilometer \
+            --pass="$SERVICE_PASSWORD" \
+            --tenant_id $SERVICE_TENANT \
+            --email=ceilometer@example.com \
+            | grep " id " | get_field 2)
+        keystone user-role-add \
+            --tenant-id $SERVICE_TENANT \
+            --user-id $CEILOMETER_USER \
+            --role-id $ADMIN_ROLE
+        if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+            CEILOMETER_SERVICE=$(keystone service-create \
+                --name=ceilometer \
+                --type=metering \
+                --description="OpenStack Telemetry Service" \
+                | grep " id " | get_field 2)
+            keystone endpoint-create \
+                --region RegionOne \
+                --service_id $CEILOMETER_SERVICE \
+                --publicurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT" \
+                --adminurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT" \
+                --internalurl "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT"
+        fi
+    fi
+}
+
 
 # cleanup_ceilometer() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
@@ -109,6 +151,8 @@
 
 function configure_mongodb() {
     if is_fedora; then
+        # install mongodb client
+        install_package mongodb
         # ensure smallfiles selected to minimize freespace requirements
         sudo sed -i '/--smallfiles/!s/OPTIONS=\"/OPTIONS=\"--smallfiles /' /etc/sysconfig/mongod
 
diff --git a/lib/cinder b/lib/cinder
index 111b974..5397308 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -390,7 +390,7 @@
                 --adminurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \
                 --internalurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s"
             CINDER_V2_SERVICE=$(keystone service-create \
-                --name=cinder \
+                --name=cinderv2 \
                 --type=volumev2 \
                 --description="Cinder Volume Service V2" \
                 | grep " id " | get_field 2)
diff --git a/lib/glance b/lib/glance
index 21c1fa5..55d5fb3 100644
--- a/lib/glance
+++ b/lib/glance
@@ -125,7 +125,7 @@
         iniset $GLANCE_API_CONF DEFAULT swift_store_key $SERVICE_PASSWORD
         iniset $GLANCE_API_CONF DEFAULT swift_store_create_container_on_put True
 
-        iniset_multiline DEFAULT known_stores glance.store.filesystem.Store glance.store.http.Store glance.store.swift.Store
+        iniset $GLANCE_API_CONF DEFAULT known_stores "glance.store.filesystem.Store, glance.store.http.Store, glance.store.swift.Store"
     fi
 
     cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI
diff --git a/lib/heat b/lib/heat
index e35305b..0307c64 100644
--- a/lib/heat
+++ b/lib/heat
@@ -110,15 +110,12 @@
     [[ "$HEAT_STANDALONE" = "True" ]] && iniset $HEAT_CONF paste_deploy flavor standalone
 
     # OpenStack API
-    iniset $HEAT_CONF heat_api bind_host $HEAT_API_HOST
     iniset $HEAT_CONF heat_api bind_port $HEAT_API_PORT
 
     # Cloudformation API
-    iniset $HEAT_CONF heat_api_cfn bind_host $HEAT_API_CFN_HOST
     iniset $HEAT_CONF heat_api_cfn bind_port $HEAT_API_CFN_PORT
 
     # Cloudwatch API
-    iniset $HEAT_CONF heat_api_cloudwatch bind_host $HEAT_API_CW_HOST
     iniset $HEAT_CONF heat_api_cloudwatch bind_port $HEAT_API_CW_PORT
 
     # heat environment
diff --git a/lib/ironic b/lib/ironic
index 1ff3c81..afbc3e0 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -33,7 +33,6 @@
 IRONIC_CONF_DIR=${IRONIC_CONF_DIR:-/etc/ironic}
 IRONIC_CONF_FILE=$IRONIC_CONF_DIR/ironic.conf
 IRONIC_ROOTWRAP_CONF=$IRONIC_CONF_DIR/rootwrap.conf
-IRONIC_ROOTWRAP_FILTERS=$IRONIC_CONF_DIR/rootwrap.d
 IRONIC_POLICY_JSON=$IRONIC_CONF_DIR/policy.json
 
 # Support entry points installation of console scripts
@@ -118,7 +117,7 @@
 # Sets conductor specific settings.
 function configure_ironic_conductor() {
     cp $IRONIC_DIR/etc/ironic/rootwrap.conf $IRONIC_ROOTWRAP_CONF
-    cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_ROOTWRAP_FILTERS
+    cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_CONF_DIR
 
     iniset $IRONIC_CONF DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF
 }
diff --git a/lib/keystone b/lib/keystone
index a7e5d66..0850fb2 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -28,7 +28,6 @@
 XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
-
 # Defaults
 # --------
 
@@ -246,14 +245,14 @@
     fi
 
     # Set up logging
-    LOGGING_ROOT="devel"
     if [ "$SYSLOG" != "False" ]; then
-        LOGGING_ROOT="$LOGGING_ROOT,production"
+        iniset $KEYSTONE_CONF DEFAULT use_syslog "True"
     fi
-    KEYSTONE_LOG_CONFIG="--log-config $KEYSTONE_CONF_DIR/logging.conf"
-    cp $KEYSTONE_DIR/etc/logging.conf.sample $KEYSTONE_CONF_DIR/logging.conf
-    iniset $KEYSTONE_CONF_DIR/logging.conf logger_root level "DEBUG"
-    iniset $KEYSTONE_CONF_DIR/logging.conf logger_root handlers "devel,production"
+
+    # Format logging
+    if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
+        setup_colorized_logging $KEYSTONE_CONF DEFAULT
+    fi
 
     if is_apache_enabled_service key; then
         _config_keystone_apache_wsgi
@@ -411,7 +410,7 @@
         screen_it key "cd $KEYSTONE_DIR && sudo tail -f /var/log/$APACHE_NAME/keystone"
     else
         # Start Keystone in a screen window
-        screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG --debug"
+        screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF --debug"
     fi
 
     echo "Waiting for keystone to start..."
diff --git a/lib/neutron b/lib/neutron
index 43f43f9..960f11b 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -1,5 +1,5 @@
 # lib/neutron
-# functions - funstions specific to neutron
+# functions - functions specific to neutron
 
 # Dependencies:
 # ``functions`` file
@@ -505,8 +505,7 @@
         [ ! -z "$pid" ] && sudo kill -9 $pid
     fi
     if is_service_enabled q-meta; then
-        pid=$(ps aux | awk '/neutron-ns-metadata-proxy/ { print $2 }')
-        [ ! -z "$pid" ] && sudo kill -9 $pid
+        sudo pkill -9 neutron-ns-metadata-proxy || :
     fi
 
     if is_service_enabled q-lbaas; then
@@ -611,9 +610,6 @@
     iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT verbose False
     iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT debug False
     iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
-    iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT root_helper "$Q_RR_COMMAND"
-    # Intermediate fix until Neutron patch lands and then line above will
-    # be cleaned.
     iniset $NEUTRON_TEST_CONFIG_FILE agent root_helper "$Q_RR_COMMAND"
 
     _neutron_setup_interface_driver $NEUTRON_TEST_CONFIG_FILE
@@ -958,6 +954,11 @@
     _neutron_third_party_do stop
 }
 
+# check_neutron_third_party_integration() - Check that third party integration is sane
+function check_neutron_third_party_integration() {
+    _neutron_third_party_do check
+}
+
 
 # Restore xtrace
 $XTRACE
diff --git a/lib/neutron_plugins/brocade b/lib/neutron_plugins/brocade
index f9275ca..8e18d04 100644
--- a/lib/neutron_plugins/brocade
+++ b/lib/neutron_plugins/brocade
@@ -38,7 +38,7 @@
 }
 
 function neutron_plugin_configure_plugin_agent() {
-    AGENT_BINARY="$NEUTON_BIN_DIR/neutron-linuxbridge-agent"
+    AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-linuxbridge-agent"
 }
 
 function neutron_plugin_setup_interface_driver() {
diff --git a/lib/neutron_thirdparty/README.md b/lib/neutron_thirdparty/README.md
index b289f58..2460e5c 100644
--- a/lib/neutron_thirdparty/README.md
+++ b/lib/neutron_thirdparty/README.md
@@ -34,3 +34,6 @@
 
 * ``stop_<third_party>``:
   stop running processes (non-screen)
+
+* ``check_<third_party>``:
+  verify that the integration between neutron server and third-party components is sane
diff --git a/lib/neutron_thirdparty/bigswitch_floodlight b/lib/neutron_thirdparty/bigswitch_floodlight
index ebde067..1fd4fd8 100644
--- a/lib/neutron_thirdparty/bigswitch_floodlight
+++ b/lib/neutron_thirdparty/bigswitch_floodlight
@@ -45,5 +45,9 @@
     :
 }
 
+function check_bigswitch_floodlight() {
+    :
+}
+
 # Restore xtrace
 $MY_XTRACE
diff --git a/lib/neutron_thirdparty/midonet b/lib/neutron_thirdparty/midonet
index 7928bca..e672528 100644
--- a/lib/neutron_thirdparty/midonet
+++ b/lib/neutron_thirdparty/midonet
@@ -56,5 +56,9 @@
     :
 }
 
+function check_midonet() {
+    :
+}
+
 # Restore xtrace
 $MY_XTRACE
diff --git a/lib/neutron_thirdparty/ryu b/lib/neutron_thirdparty/ryu
index 3b825a1..5edf273 100644
--- a/lib/neutron_thirdparty/ryu
+++ b/lib/neutron_thirdparty/ryu
@@ -75,5 +75,9 @@
     :
 }
 
+function check_ryu() {
+    :
+}
+
 # Restore xtrace
 $MY_XTRACE
diff --git a/lib/neutron_thirdparty/trema b/lib/neutron_thirdparty/trema
index bdc2356..2b12564 100644
--- a/lib/neutron_thirdparty/trema
+++ b/lib/neutron_thirdparty/trema
@@ -109,5 +109,9 @@
     sudo TREMA_TMP=$TREMA_TMP_DIR trema killall
 }
 
+function check_trema() {
+    :
+}
+
 # Restore xtrace
 $MY_XTRACE
diff --git a/lib/neutron_thirdparty/vmware_nsx b/lib/neutron_thirdparty/vmware_nsx
index 70d3482..4eb177a 100644
--- a/lib/neutron_thirdparty/vmware_nsx
+++ b/lib/neutron_thirdparty/vmware_nsx
@@ -78,5 +78,9 @@
     done
 }
 
+function check_vmware_nsx() {
+    neutron-check-nsx-config $NEUTRON_CONF_DIR/plugins/vmware/nsx.ini
+}
+
 # Restore xtrace
 $MY_XTRACE
diff --git a/lib/nova b/lib/nova
index 4eb56b8..a4edb53 100644
--- a/lib/nova
+++ b/lib/nova
@@ -338,7 +338,7 @@
                 --adminurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \
                 --internalurl "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s"
             NOVA_V3_SERVICE=$(keystone service-create \
-                --name=nova \
+                --name=novav3 \
                 --type=computev3 \
                 --description="Nova Compute Service V3" \
                 | grep " id " | get_field 2)
@@ -649,6 +649,13 @@
     fi
 
     if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
+        # Enable client side traces for libvirt
+        local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:event 3:json 3:file 1:util"
+        local log_outputs="1:file:/var/log/libvirt/libvirtd.log"
+        # Enable server side traces for libvirtd
+        echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf
+        echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf
+
         # The group **$LIBVIRT_GROUP** is added to the current user in this script.
         # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group.
         screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf'"
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index ef40e7a..6f90f4a 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -93,9 +93,6 @@
         fi
     fi
 
-    # Change the libvirtd log level to DEBUG.
-    sudo sed -i s/"#log_level = 3"/"log_level = 1"/ /etc/libvirt/libvirtd.conf
-
     # The user that nova runs as needs to be member of **libvirtd** group otherwise
     # nova-compute will be unable to use libvirt.
     if ! getent group $LIBVIRT_GROUP >/dev/null; then
diff --git a/lib/savanna b/lib/savanna
index bb4dfe6..c7d59f7 100644
--- a/lib/savanna
+++ b/lib/savanna
@@ -95,9 +95,7 @@
     iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $SERVICE_TENANT_NAME
     iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG
 
-    recreate_database savanna utf8
-    iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE database sql_connection `database_connection_url savanna`
-    inicomment $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE database connection
+    iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE database connection `database_connection_url savanna`
 
     if is_service_enabled neutron; then
         iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT use_neutron true
@@ -105,6 +103,9 @@
     fi
 
     iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT use_syslog $SYSLOG
+
+    recreate_database savanna utf8
+    $SAVANNA_BIN_DIR/savanna-db-manage --config-file $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE upgrade head
 }
 
 # install_savanna() - Collect source and prepare
diff --git a/lib/tempest b/lib/tempest
index 08c0553..ef9dfe2 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -329,7 +329,7 @@
     iniset $TEMPEST_CONF network-feature-enabled api_extensions "${NETWORK_API_EXTENSIONS:-all}"
 
     # service_available
-    for service in nova cinder glance neutron swift heat horizon ceilometer ironic savanna trove; do
+    for service in nova cinder glance neutron swift heat horizon ceilometer ironic savanna trove marconi; do
         if is_service_enabled $service ; then
             iniset $TEMPEST_CONF service_available $service "True"
         else
diff --git a/samples/localrc b/samples/local.conf
similarity index 86%
rename from samples/localrc
rename to samples/local.conf
index 80cf0e7..c8126c2 100644
--- a/samples/localrc
+++ b/samples/local.conf
@@ -1,19 +1,22 @@
-# Sample ``localrc`` for user-configurable variables in ``stack.sh``
+# Sample ``local.conf`` for user-configurable variables in ``stack.sh``
 
 # NOTE: Copy this file to the root ``devstack`` directory for it to
 # work properly.
 
-# ``localrc`` is a user-maintained setings file that is sourced from ``stackrc``.
+# ``local.conf`` is a user-maintained setings file that is sourced from ``stackrc``.
 # This gives it the ability to override any variables set in ``stackrc``.
 # Also, most of the settings in ``stack.sh`` are written to only be set if no
-# value has already been set; this lets ``localrc`` effectively override the
+# value has already been set; this lets ``local.conf`` effectively override the
 # default values.
 
 # This is a collection of some of the settings we have found to be useful
 # in our DevStack development environments. Additional settings are described
-# in http://devstack.org/localrc.html
+# in http://devstack.org/local.conf.html
 # These should be considered as samples and are unsupported DevStack code.
 
+# The ``localrc`` section replaces the old ``localrc`` configuration file.
+# Note that if ``localrc`` is present it will be used in favor of this section.
+[[local|localrc]]
 
 # Minimal Contents
 # ----------------
@@ -22,7 +25,7 @@
 # there are a few minimal variables set:
 
 # If the ``*_PASSWORD`` variables are not set here you will be prompted to enter
-# values for them by ``stack.sh`` and they will be added to ``localrc``.
+# values for them by ``stack.sh`` and they will be added to ``local.conf``.
 ADMIN_PASSWORD=nomoresecrete
 MYSQL_PASSWORD=stackdb
 RABBIT_PASSWORD=stackqueue
diff --git a/samples/local.sh b/samples/local.sh
index 970cbb9..664cb66 100755
--- a/samples/local.sh
+++ b/samples/local.sh
@@ -23,45 +23,47 @@
 # Destination path for installation ``DEST``
 DEST=${DEST:-/opt/stack}
 
+if is_service_enabled nova; then
 
-# Import ssh keys
-# ---------------
+    # Import ssh keys
+    # ---------------
 
-# Import keys from the current user into the default OpenStack user (usually
-# ``demo``)
+    # Import keys from the current user into the default OpenStack user (usually
+    # ``demo``)
 
-# Get OpenStack auth
-source $TOP_DIR/openrc
+    # Get OpenStack user auth
+    source $TOP_DIR/openrc
 
-# Add first keypair found in localhost:$HOME/.ssh
-for i in $HOME/.ssh/id_rsa.pub $HOME/.ssh/id_dsa.pub; do
-    if [[ -r $i ]]; then
-        nova keypair-add --pub_key=$i `hostname`
-        break
+    # Add first keypair found in localhost:$HOME/.ssh
+    for i in $HOME/.ssh/id_rsa.pub $HOME/.ssh/id_dsa.pub; do
+        if [[ -r $i ]]; then
+            nova keypair-add --pub_key=$i `hostname`
+            break
+        fi
+    done
+
+
+    # Create A Flavor
+    # ---------------
+
+    # Get OpenStack admin auth
+    source $TOP_DIR/openrc admin admin
+
+    # Name of new flavor
+    # set in ``localrc`` with ``DEFAULT_INSTANCE_TYPE=m1.micro``
+    MI_NAME=m1.micro
+
+    # Create micro flavor if not present
+    if [[ -z $(nova flavor-list | grep $MI_NAME) ]]; then
+        nova flavor-create $MI_NAME 6 128 0 1
     fi
-done
 
 
-# Create A Flavor
-# ---------------
+    # Other Uses
+    # ----------
 
-# Get OpenStack admin auth
-source $TOP_DIR/openrc admin admin
+    # Add tcp/22 and icmp to default security group
+    nova secgroup-add-rule default tcp 22 22 0.0.0.0/0
+    nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0
 
-# Name of new flavor
-# set in ``localrc`` with ``DEFAULT_INSTANCE_TYPE=m1.micro``
-MI_NAME=m1.micro
-
-# Create micro flavor if not present
-if [[ -z $(nova flavor-list | grep $MI_NAME) ]]; then
-    nova flavor-create $MI_NAME 6 128 0 1
 fi
-
-
-# Other Uses
-# ----------
-
-# Add tcp/22 and icmp to default security group
-nova secgroup-add-rule default tcp 22 22 0.0.0.0/0
-nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0
-
diff --git a/stack.sh b/stack.sh
index c303dc3..a2469f1 100755
--- a/stack.sh
+++ b/stack.sh
@@ -23,6 +23,13 @@
 # Make sure custom grep options don't get in the way
 unset GREP_OPTIONS
 
+# Sanitize language settings to avoid commands bailing out
+# with "unsupported locale setting" errors.
+unset LANG
+unset LANGUAGE
+LC_ALL=C
+export LC_ALL
+
 # Keep track of the devstack directory
 TOP_DIR=$(cd $(dirname "$0") && pwd)
 
@@ -291,6 +298,9 @@
 SYSSTAT_FILE=${SYSSTAT_FILE:-"sysstat.dat"}
 SYSSTAT_INTERVAL=${SYSSTAT_INTERVAL:-"1"}
 
+PIDSTAT_FILE=${PIDSTAT_FILE:-"pidstat.txt"}
+PIDSTAT_INTERVAL=${PIDSTAT_INTERVAL:-"5"}
+
 # Use color for logging output (only available if syslog is not used)
 LOG_COLOR=`trueorfalse True $LOG_COLOR`
 
@@ -305,9 +315,13 @@
 # Configure Projects
 # ==================
 
-# Source project function libraries
+# Import apache functions
 source $TOP_DIR/lib/apache
+
+# Import TLS functions
 source $TOP_DIR/lib/tls
+
+# Source project function libraries
 source $TOP_DIR/lib/infra
 source $TOP_DIR/lib/oslo
 source $TOP_DIR/lib/stackforge
@@ -860,11 +874,27 @@
 # -------
 
 # If enabled, systat has to start early to track OpenStack service startup.
-if is_service_enabled sysstat;then
+if is_service_enabled sysstat; then
+    # what we want to measure
+    # -u : cpu statitics
+    # -q : load
+    # -b : io load rates
+    # -w : process creation and context switch rates
+    SYSSTAT_OPTS="-u -q -b -w"
     if [[ -n ${SCREEN_LOGDIR} ]]; then
-        screen_it sysstat "cd ; sar -o $SCREEN_LOGDIR/$SYSSTAT_FILE $SYSSTAT_INTERVAL"
+        screen_it sysstat "cd $TOP_DIR; ./tools/sar_filter.py $SYSSTAT_OPTS -o $SCREEN_LOGDIR/$SYSSTAT_FILE $SYSSTAT_INTERVAL"
     else
-        screen_it sysstat "sar $SYSSTAT_INTERVAL"
+        screen_it sysstat "./tools/sar_filter.py $SYSSTAT_OPTS $SYSSTAT_INTERVAL"
+    fi
+fi
+
+if is_service_enabled pidstat; then
+    # Per-process stats
+    PIDSTAT_OPTS="-l -p ALL -T ALL"
+    if [[ -n ${SCREEN_LOGDIR} ]]; then
+        screen_it pidstat "cd $TOP_DIR; pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL > $SCREEN_LOGDIR/$PIDSTAT_FILE"
+    else
+        screen_it pidstat "pidstat $PIDSTAT_OPTS $PIDSTAT_INTERVAL"
     fi
 fi
 
@@ -901,6 +931,10 @@
         create_trove_accounts
     fi
 
+    if is_service_enabled ceilometer; then
+        create_ceilometer_accounts
+    fi
+
     if is_service_enabled swift || is_service_enabled s-proxy; then
         create_swift_accounts
     fi
@@ -1098,6 +1132,15 @@
     iniset $NOVA_CONF DEFAULT s3_affix_tenant "True"
 fi
 
+# Create a randomized default value for the keymgr's fixed_key
+if is_service_enabled nova; then
+    FIXED_KEY=""
+    for i in $(seq 1 64);
+        do FIXED_KEY+=$(echo "obase=16; $(($RANDOM % 16))" | bc);
+    done;
+    iniset $NOVA_CONF keymgr fixed_key "$FIXED_KEY"
+fi
+
 if is_service_enabled zeromq; then
     echo_summary "Starting zermomq receiver"
     screen_it zeromq "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-rpc-zmq-receiver"
@@ -1112,6 +1155,7 @@
 if is_service_enabled q-svc; then
     echo_summary "Starting Neutron"
     start_neutron_service_and_check
+    check_neutron_third_party_integration
 elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then
     NM_CONF=${NOVA_CONF}
     if is_service_enabled n-cell; then
diff --git a/stackrc b/stackrc
index 49fb26b..8a0280e 100644
--- a/stackrc
+++ b/stackrc
@@ -284,6 +284,9 @@
     vsphere)
         DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-debian-2.6.32-i686}
         IMAGE_URLS=${IMAGE_URLS:-"http://partnerweb.vmware.com/programs/vmdkimage/debian-2.6.32-i686.vmdk"};;
+    xenserver)
+        DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.0-x86_64-disk}
+        IMAGE_URLS=${IMAGE_URLS:-"https://github.com/downloads/citrix-openstack/warehouse/cirros-0.3.0-x86_64-disk.vhd.tgz"};;
     *) # Default to Cirros with kernel, ramdisk and disk image
         DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.1-x86_64-uec}
         IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz"};;
diff --git a/tools/sar_filter.py b/tools/sar_filter.py
new file mode 100755
index 0000000..ed8c196
--- /dev/null
+++ b/tools/sar_filter.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 Samsung Electronics Corp. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import re
+import subprocess
+import sys
+
+
+def is_data_line(line):
+    timestamp, data = parse_line(line)
+    return re.search('\d\.d', data)
+
+
+def parse_line(line):
+    m = re.search('(\d\d:\d\d:\d\d \w\w)(\s+((\S+)\s*)+)', line)
+    if m:
+        date = m.group(1)
+        data = m.group(2).rstrip()
+        return date, data
+    else:
+        return None, None
+
+
+process = subprocess.Popen(
+    "sar %s" % " ".join(sys.argv[1:]),
+    shell=True,
+    stdout=subprocess.PIPE,
+    stderr=subprocess.STDOUT)
+
+# Poll process for new output until finished
+
+start_time = ""
+header = ""
+data_line = ""
+printed_header = False
+current_ts = None
+while True:
+    nextline = process.stdout.readline()
+    if nextline == '' and process.poll() is not None:
+        break
+
+    date, data = parse_line(nextline)
+    # stop until we get to the first set of real lines
+    if not date:
+        continue
+
+    # now we eat the header lines, and only print out the header
+    # if we've never seen them before
+    if not start_time:
+        start_time = date
+        header += "%s   %s" % (date, data)
+    elif date == start_time:
+        header += "   %s" % data
+    elif not printed_header:
+        printed_header = True
+        print header
+
+    # now we know this is a data line, printing out if the timestamp
+    # has changed, and stacking up otherwise.
+    nextline = process.stdout.readline()
+    date, data = parse_line(nextline)
+    if date != current_ts:
+        current_ts = date
+        print data_line
+        data_line = "%s   %s" % (date, data)
+    else:
+        data_line += "   %s" % data
+
+    sys.stdout.flush()
diff --git a/tools/xen/README.md b/tools/xen/README.md
index 06192ed..ee1abcc 100644
--- a/tools/xen/README.md
+++ b/tools/xen/README.md
@@ -70,6 +70,9 @@
 Of course, use real passwords if this machine is exposed.
 
     cat > ./localrc <<EOF
+    # At the moment, we depend on github's snapshot function.
+    GIT_BASE="http://github.com"
+
     # Passwords
     # NOTE: these need to be specified, otherwise devstack will try
     # to prompt for these passwords, blocking the install process.
diff --git a/tools/xen/functions b/tools/xen/functions
index 563303d..97c56bc 100644
--- a/tools/xen/functions
+++ b/tools/xen/functions
@@ -1,5 +1,14 @@
 #!/bin/bash
 
+function die_with_error {
+    local err_msg
+
+    err_msg="$1"
+
+    echo "$err_msg" >&2
+    exit 1
+}
+
 function xapi_plugin_location {
     for PLUGIN_DIR in "/etc/xapi.d/plugins/" "/usr/lib/xcp/plugins/" "/usr/lib/xapi/plugins"; do
         if [ -d $PLUGIN_DIR ]; then
@@ -11,7 +20,7 @@
 }
 
 function zip_snapshot_location {
-    echo $1 | sed "s:\.git$::;s:$:/zipball/$2:g"
+    echo $1 | sed "s,^git://,http://,g;s:\.git$::;s:$:/zipball/$2:g"
 }
 
 function create_directory_for_kernels {
@@ -41,7 +50,9 @@
     local EXTRACTED_FILES=$(mktemp -d)
 
     {
-        wget -nv $ZIPBALL_URL -O $LOCAL_ZIPBALL --no-check-certificate
+        if ! wget -nv $ZIPBALL_URL -O $LOCAL_ZIPBALL --no-check-certificate; then
+            die_with_error "Failed to download [$ZIPBALL_URL]"
+        fi
         unzip -q -o $LOCAL_ZIPBALL -d $EXTRACTED_FILES
         rm -f $LOCAL_ZIPBALL
     } >&2
diff --git a/tools/xen/mocks b/tools/xen/mocks
index 94b0ca4..3b9b05c 100644
--- a/tools/xen/mocks
+++ b/tools/xen/mocks
@@ -35,7 +35,7 @@
 
 function wget {
     if [[ $@ =~ "failurl" ]]; then
-        exit 1
+        return 1
     fi
     echo "wget $@" >> $LIST_OF_ACTIONS
 }
@@ -73,10 +73,14 @@
         done
         return 1
     fi
-    echo "Mock test does not implement the requested function"
+    echo "Mock test does not implement the requested function: ${1:-}"
     exit 1
 }
 
+function die_with_error {
+    echo "$1" >> $DEAD_MESSAGES
+}
+
 function xe {
     cat $XE_RESPONSE
     {
diff --git a/tools/xen/test_functions.sh b/tools/xen/test_functions.sh
index 0ae2cb7..373d996 100755
--- a/tools/xen/test_functions.sh
+++ b/tools/xen/test_functions.sh
@@ -29,6 +29,9 @@
 
     XE_CALLS=$(mktemp)
     truncate -s 0 $XE_CALLS
+
+    DEAD_MESSAGES=$(mktemp)
+    truncate -s 0 $DEAD_MESSAGES
 }
 
 # Teardown
@@ -64,6 +67,10 @@
     grep -qe "^$1\$" $XE_CALLS
 }
 
+function assert_died_with {
+    diff -u <(echo "$1") $DEAD_MESSAGES
+}
+
 function mock_out {
     local FNNAME="$1"
     local OUTPUT="$2"
@@ -109,16 +116,22 @@
     grep "[ -d /usr/lib/xcp/plugins/ ]" $LIST_OF_ACTIONS
 }
 
-function test_zip_snapshot_location {
+function test_zip_snapshot_location_http {
     diff \
-    <(zip_snapshot_location "git://git.openstack.org/openstack/nova.git" "master") \
-    <(echo "git://git.openstack.org/openstack/nova/zipball/master")
+    <(zip_snapshot_location "http://github.com/openstack/nova.git" "master") \
+    <(echo "http://github.com/openstack/nova/zipball/master")
+}
+
+function test_zip_snapsot_location_git {
+    diff \
+    <(zip_snapshot_location "git://github.com/openstack/nova.git" "master") \
+    <(echo "http://github.com/openstack/nova/zipball/master")
 }
 
 function test_create_directory_for_kernels {
     (
         . mocks
-        mock_out get_local_sr uuid1
+        mock_out get_local_sr_path /var/run/sr-mount/uuid1
         create_directory_for_kernels
     )
 
@@ -141,7 +154,7 @@
 function test_create_directory_for_images {
     (
         . mocks
-        mock_out get_local_sr uuid1
+        mock_out get_local_sr_path /var/run/sr-mount/uuid1
         create_directory_for_images
     )
 
@@ -179,7 +192,7 @@
     local IGNORE
     IGNORE=$(. mocks && extract_remote_zipball "failurl")
 
-    assert_previous_command_failed
+    assert_died_with "Failed to download [failurl]"
 }
 
 function test_find_nova_plugins {
@@ -199,8 +212,7 @@
 
     [ "$RESULT" == "uuid123" ]
 
-    assert_xe_min
-    assert_xe_param "sr-list" "name-label=Local storage"
+    assert_xe_param "pool-list" params=default-SR minimal=true
 }
 
 function test_get_local_sr_path {
diff --git a/unstack.sh b/unstack.sh
index 77dbe07..31f6f01 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -30,18 +30,31 @@
     exit 1
 fi
 
+
+# Configure Projects
+# ==================
+
 # Import apache functions
 source $TOP_DIR/lib/apache
 
-# Get project function libraries
-source $TOP_DIR/lib/baremetal
-source $TOP_DIR/lib/cinder
+# Import TLS functions
+source $TOP_DIR/lib/tls
+
+# Source project function libraries
+source $TOP_DIR/lib/infra
+source $TOP_DIR/lib/oslo
+source $TOP_DIR/lib/stackforge
+source $TOP_DIR/lib/horizon
 source $TOP_DIR/lib/keystone
 source $TOP_DIR/lib/glance
 source $TOP_DIR/lib/nova
-source $TOP_DIR/lib/horizon
+source $TOP_DIR/lib/cinder
 source $TOP_DIR/lib/swift
+source $TOP_DIR/lib/ceilometer
+source $TOP_DIR/lib/heat
 source $TOP_DIR/lib/neutron
+source $TOP_DIR/lib/baremetal
+source $TOP_DIR/lib/ldap
 source $TOP_DIR/lib/ironic
 source $TOP_DIR/lib/trove