Merge "Stop doing special things with setuptools"
diff --git a/README.md b/README.md
index 6426e9a..99e9838 100644
--- a/README.md
+++ b/README.md
@@ -12,10 +12,14 @@
 
 IMPORTANT: Be sure to carefully read `stack.sh` and any other scripts you execute before you run them, as they install software and may alter your networking configuration.  We strongly recommend that you run `stack.sh` in a clean and disposable vm when you are first getting started.
 
-# Devstack on Xenserver
+# DevStack on Xenserver
 
 If you would like to use Xenserver as the hypervisor, please refer to the instructions in `./tools/xen/README.md`.
 
+# DevStack on Docker
+
+If you would like to use Docker as the hypervisor, please refer to the instructions in `./tools/docker/README.md`.
+
 # Versions
 
 The devstack master branch generally points to trunk versions of OpenStack components.  For older, stable versions, look for branches named stable/[release] in the DevStack repo.  For example, you can do the following to create a diablo OpenStack cloud:
diff --git a/clean.sh b/clean.sh
index f7d15df..6ceb5a4 100755
--- a/clean.sh
+++ b/clean.sh
@@ -33,6 +33,7 @@
 source $TOP_DIR/lib/database
 source $TOP_DIR/lib/rpc_backend
 
+source $TOP_DIR/lib/oslo
 source $TOP_DIR/lib/tls
 source $TOP_DIR/lib/horizon
 source $TOP_DIR/lib/keystone
@@ -64,6 +65,11 @@
 cleanup_neutron
 cleanup_swift
 
+# Do the hypervisor cleanup until this can be moved back into lib/nova
+if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
+    cleanup_nova_hypervisor
+fi
+
 # cinder doesn't always clean up the volume group as it might be used elsewhere...
 # clean it up if it is a loop device
 VG_DEV=$(sudo losetup -j $DATA_DIR/${VOLUME_GROUP}-backing-file | awk -F':' '/backing-file/ { print $1}')
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
index a3a14eb..fe27bd0 100755
--- a/exercises/boot_from_volume.sh
+++ b/exercises/boot_from_volume.sh
@@ -44,6 +44,9 @@
 # the exercise is skipped
 is_service_enabled cinder || exit 55
 
+# Also skip if the hypervisor is Docker
+[[ "$VIRT_DRIVER" == "docker" ]] && exit 55
+
 # Instance type to create
 DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
 
@@ -174,7 +177,8 @@
 fi
 
 # Get the instance IP
-IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2)
+IP=$(get_instance_ip $VM_UUID $PRIVATE_NETWORK_NAME)
+
 die_if_not_set $LINENO IP "Failure retrieving IP address"
 
 # Private IPs can be pinged in single node deployments
diff --git a/exercises/docker.sh b/exercises/docker.sh
new file mode 100755
index 0000000..0672bc0
--- /dev/null
+++ b/exercises/docker.sh
@@ -0,0 +1,105 @@
+#!/usr/bin/env bash
+
+# **docker**
+
+# Test Docker hypervisor
+
+echo "*********************************************************************"
+echo "Begin DevStack Exercise: $0"
+echo "*********************************************************************"
+
+# This script exits on an error so that errors don't compound and you see
+# only the first error that occurred.
+set -o errexit
+
+# Print the commands being run so that we can see the command that triggers
+# an error.  It is also useful for following allowing as the install occurs.
+set -o xtrace
+
+
+# Settings
+# ========
+
+# Keep track of the current directory
+EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
+TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
+
+# Import common functions
+source $TOP_DIR/functions
+
+# Import configuration
+source $TOP_DIR/openrc
+
+# Import exercise configuration
+source $TOP_DIR/exerciserc
+
+# Skip if the hypervisor is not Docker
+[[ "$VIRT_DRIVER" == "docker" ]] || exit 55
+
+# Import docker functions and declarations
+source $TOP_DIR/lib/nova_plugins/hypervisor-docker
+
+# Image and flavor are ignored but the CLI requires them...
+
+# Instance type to create
+DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
+
+# Boot this image, use first AMI image if unset
+DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami}
+
+# Instance name
+VM_NAME=ex-docker
+
+
+# Launching a server
+# ==================
+
+# Grab the id of the image to launch
+IMAGE=$(glance image-list | egrep " $DOCKER_IMAGE_NAME:latest " | get_field 1)
+die_if_not_set $LINENO IMAGE "Failure getting image $DOCKER_IMAGE_NAME"
+
+# Select a flavor
+INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1)
+if [[ -z "$INSTANCE_TYPE" ]]; then
+    # grab the first flavor in the list to launch if default doesn't exist
+   INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1)
+fi
+
+# Clean-up from previous runs
+nova delete $VM_NAME || true
+if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then
+    die $LINENO "server didn't terminate!"
+fi
+
+# Boot instance
+# -------------
+
+VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE $VM_NAME | grep ' id ' | get_field 2)
+die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME"
+
+# Check that the status is active within ACTIVE_TIMEOUT seconds
+if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
+    die $LINENO "server didn't become active!"
+fi
+
+# Get the instance IP
+IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2)
+die_if_not_set $LINENO IP "Failure retrieving IP address"
+
+# Private IPs can be pinged in single node deployments
+ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT
+
+# Clean up
+# --------
+
+# Delete instance
+nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME"
+if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then
+    die $LINENO "Server $VM_NAME not deleted"
+fi
+
+set +o xtrace
+echo "*********************************************************************"
+echo "SUCCESS: End DevStack Exercise: $0"
+echo "*********************************************************************"
+
diff --git a/exercises/euca.sh b/exercises/euca.sh
index b8b283a..64c0014 100755
--- a/exercises/euca.sh
+++ b/exercises/euca.sh
@@ -41,6 +41,9 @@
 # Import exercise configuration
 source $TOP_DIR/exerciserc
 
+# Skip if the hypervisor is Docker
+[[ "$VIRT_DRIVER" == "docker" ]] && exit 55
+
 # Instance type to create
 DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
 
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index ac65cf7..2833b65 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -38,6 +38,9 @@
 # Import exercise configuration
 source $TOP_DIR/exerciserc
 
+# Skip if the hypervisor is Docker
+[[ "$VIRT_DRIVER" == "docker" ]] && exit 55
+
 # Instance type to create
 DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
 
@@ -132,7 +135,7 @@
 fi
 
 # Get the instance IP
-IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2)
+IP=$(get_instance_ip $VM_UUID $PRIVATE_NETWORK_NAME)
 die_if_not_set $LINENO IP "Failure retrieving IP address"
 
 # Private IPs can be pinged in single node deployments
diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh
index 4367e2e..abb29cf 100755
--- a/exercises/neutron-adv-test.sh
+++ b/exercises/neutron-adv-test.sh
@@ -272,12 +272,12 @@
 }
 
 function ping_ip {
-   # Test agent connection.  Assumes namespaces are disabled, and
-   # that DHCP is in use, but not L3
-   local VM_NAME=$1
-   local NET_NAME=$2
-   IP=`nova show $VM_NAME | grep 'network' | awk '{print $5}'`
-   ping_check $NET_NAME $IP $BOOT_TIMEOUT
+     # Test agent connection.  Assumes namespaces are disabled, and
+     # that DHCP is in use, but not L3
+     local VM_NAME=$1
+     local NET_NAME=$2
+     IP=$(get_instance_ip $VM_NAME $NET_NAME)
+     ping_check $NET_NAME $IP $BOOT_TIMEOUT
 }
 
 function check_vm {
diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh
index 6b67291..7d80570 100755
--- a/exercises/sec_groups.sh
+++ b/exercises/sec_groups.sh
@@ -33,6 +33,9 @@
 # Import exercise configuration
 source $TOP_DIR/exerciserc
 
+# Skip if the hypervisor is Docker
+[[ "$VIRT_DRIVER" == "docker" ]] && exit 55
+
 
 # Testing Security Groups
 # =======================
diff --git a/exercises/trove.sh b/exercises/trove.sh
new file mode 100755
index 0000000..d48d5fe
--- /dev/null
+++ b/exercises/trove.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+
+# **trove.sh**
+
+# Sanity check that trove started if enabled
+
+echo "*********************************************************************"
+echo "Begin DevStack Exercise: $0"
+echo "*********************************************************************"
+
+# This script exits on an error so that errors don't compound and you see
+# only the first error that occurred.
+set -o errexit
+
+# Print the commands being run so that we can see the command that triggers
+# an error.  It is also useful for following allowing as the install occurs.
+set -o xtrace
+
+
+# Settings
+# ========
+
+# Keep track of the current directory
+EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
+TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
+
+# Import common functions
+source $TOP_DIR/functions
+
+# Import configuration
+source $TOP_DIR/openrc
+
+# Import exercise configuration
+source $TOP_DIR/exerciserc
+
+is_service_enabled trove || exit 55
+
+# can we get a list versions
+curl http://$SERVICE_HOST:8779/ 2>/dev/null | grep -q 'versions' || die $LINENO "Trove API not functioning!"
+
+set +o xtrace
+echo "*********************************************************************"
+echo "SUCCESS: End DevStack Exercise: $0"
+echo "*********************************************************************"
+
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index b2b391c..e536d16 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -42,6 +42,9 @@
 # exercise is skipped.
 is_service_enabled cinder || exit 55
 
+# Also skip if the hypervisor is Docker
+[[ "$VIRT_DRIVER" == "docker" ]] && exit 55
+
 # Instance type to create
 DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
 
@@ -135,7 +138,8 @@
 fi
 
 # Get the instance IP
-IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2)
+IP=$(get_instance_ip $VM_UUID $PRIVATE_NETWORK_NAME)
+
 die_if_not_set $LINENO IP "Failure retrieving IP address"
 
 # Private IPs can be pinged in single node deployments
diff --git a/files/apts/cinder b/files/apts/cinder
index c45b97f..f8e3b6d 100644
--- a/files/apts/cinder
+++ b/files/apts/cinder
@@ -1,3 +1,7 @@
 tgt
 lvm2
 qemu-utils
+libpq-dev
+python-dev
+open-iscsi
+open-iscsi-utils # Deprecated since quantal dist:lucid,oneiric,precise
diff --git a/files/apts/glance b/files/apts/glance
index a05e9f2..26826a5 100644
--- a/files/apts/glance
+++ b/files/apts/glance
@@ -1,5 +1,10 @@
 gcc
+libffi-dev          # testonly
+libmysqlclient-dev  # testonly
+libpq-dev           # testonly
+libssl-dev          # testonly
 libxml2-dev
+libxslt1-dev        # testonly
 python-dev
 python-eventlet
 python-routes
@@ -10,3 +15,4 @@
 python-pastedeploy
 python-xattr
 python-iso8601
+zlib1g-dev           # testonly
diff --git a/files/apts/trove b/files/apts/trove
new file mode 100644
index 0000000..09dcee8
--- /dev/null
+++ b/files/apts/trove
@@ -0,0 +1 @@
+libxslt1-dev   # testonly
diff --git a/files/default_catalog.templates b/files/default_catalog.templates
index 1ecf890..277904a 100644
--- a/files/default_catalog.templates
+++ b/files/default_catalog.templates
@@ -24,6 +24,12 @@
 catalog.RegionOne.volume.name = Volume Service
 
 
+catalog.RegionOne.volumev2.publicURL = http://%SERVICE_HOST%:8776/v2/$(tenant_id)s
+catalog.RegionOne.volumev2.adminURL = http://%SERVICE_HOST%:8776/v2/$(tenant_id)s
+catalog.RegionOne.volumev2.internalURL = http://%SERVICE_HOST%:8776/v2/$(tenant_id)s
+catalog.RegionOne.volumev2.name = Volume Service V2
+
+
 catalog.RegionOne.ec2.publicURL = http://%SERVICE_HOST%:8773/services/Cloud
 catalog.RegionOne.ec2.adminURL = http://%SERVICE_HOST%:8773/services/Admin
 catalog.RegionOne.ec2.internalURL = http://%SERVICE_HOST%:8773/services/Cloud
diff --git a/files/keystone_data.sh b/files/keystone_data.sh
index 45f9c81..3f3137c 100755
--- a/files/keystone_data.sh
+++ b/files/keystone_data.sh
@@ -58,9 +58,9 @@
     # Nova needs ResellerAdmin role to download images when accessing
     # swift through the s3 api.
     keystone user-role-add \
-        --tenant_id $SERVICE_TENANT \
-        --user_id $NOVA_USER \
-        --role_id $RESELLER_ROLE
+        --tenant-id $SERVICE_TENANT \
+        --user-id $NOVA_USER \
+        --role-id $RESELLER_ROLE
 fi
 
 # Heat
@@ -69,9 +69,9 @@
                                               --pass="$SERVICE_PASSWORD" \
                                               --tenant_id $SERVICE_TENANT \
                                               --email=heat@example.com)
-    keystone user-role-add --tenant_id $SERVICE_TENANT \
-                           --user_id $HEAT_USER \
-                           --role_id $SERVICE_ROLE
+    keystone user-role-add --tenant-id $SERVICE_TENANT \
+                           --user-id $HEAT_USER \
+                           --role-id $SERVICE_ROLE
     # heat_stack_user role is for users created by Heat
     keystone role-create --name heat_stack_user
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
@@ -106,9 +106,9 @@
         --tenant_id $SERVICE_TENANT \
         --email=glance@example.com)
     keystone user-role-add \
-        --tenant_id $SERVICE_TENANT \
-        --user_id $GLANCE_USER \
-        --role_id $ADMIN_ROLE
+        --tenant-id $SERVICE_TENANT \
+        --user-id $GLANCE_USER \
+        --role-id $ADMIN_ROLE
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
         GLANCE_SERVICE=$(get_id keystone service-create \
             --name=glance \
@@ -129,13 +129,13 @@
                                               --pass="$SERVICE_PASSWORD" \
                                               --tenant_id $SERVICE_TENANT \
                                               --email=ceilometer@example.com)
-    keystone user-role-add --tenant_id $SERVICE_TENANT \
-                           --user_id $CEILOMETER_USER \
-                           --role_id $ADMIN_ROLE
+    keystone user-role-add --tenant-id $SERVICE_TENANT \
+                           --user-id $CEILOMETER_USER \
+                           --role-id $ADMIN_ROLE
     # Ceilometer needs ResellerAdmin role to access swift account stats.
-    keystone user-role-add --tenant_id $SERVICE_TENANT \
-                           --user_id $CEILOMETER_USER \
-                           --role_id $RESELLER_ROLE
+    keystone user-role-add --tenant-id $SERVICE_TENANT \
+                           --user-id $CEILOMETER_USER \
+                           --role-id $RESELLER_ROLE
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
         CEILOMETER_SERVICE=$(get_id keystone service-create \
             --name=ceilometer \
@@ -192,7 +192,7 @@
         --pass="$ADMIN_PASSWORD" \
         --email=alt_demo@example.com)
     keystone user-role-add \
-        --tenant_id $ALT_DEMO_TENANT \
-        --user_id $ALT_DEMO_USER \
-        --role_id $MEMBER_ROLE
+        --tenant-id $ALT_DEMO_TENANT \
+        --user-id $ALT_DEMO_USER \
+        --role-id $MEMBER_ROLE
 fi
diff --git a/files/rpms-suse/cinder b/files/rpms-suse/cinder
index 8f4a5a7..55078da 100644
--- a/files/rpms-suse/cinder
+++ b/files/rpms-suse/cinder
@@ -1,3 +1,6 @@
 lvm2
 tgt
 qemu-tools
+python-devel
+postgresql-devel
+open-iscsi
diff --git a/files/rpms-suse/trove b/files/rpms-suse/trove
new file mode 100644
index 0000000..09dcee8
--- /dev/null
+++ b/files/rpms-suse/trove
@@ -0,0 +1 @@
+libxslt1-dev   # testonly
diff --git a/files/rpms/cinder b/files/rpms/cinder
index 19dedff..c4edb68 100644
--- a/files/rpms/cinder
+++ b/files/rpms/cinder
@@ -1,3 +1,6 @@
 lvm2
 scsi-target-utils
 qemu-img
+python-devel
+postgresql-devel
+iscsi-initiator-utils
diff --git a/files/rpms/glance b/files/rpms/glance
index 0f113ea..dd66171 100644
--- a/files/rpms/glance
+++ b/files/rpms/glance
@@ -1,5 +1,10 @@
 gcc
+libffi-devel        # testonly
 libxml2-devel
+libxslt-devel       # testonly
+mysql-devel         # testonly
+openssl-devel       # testonly
+postgresql-devel    # testonly
 python-argparse
 python-devel
 python-eventlet
@@ -9,3 +14,4 @@
 python-sqlalchemy
 python-wsgiref
 pyxattr
+zlib-devel          # testonly
diff --git a/files/rpms/trove b/files/rpms/trove
new file mode 100644
index 0000000..09dcee8
--- /dev/null
+++ b/files/rpms/trove
@@ -0,0 +1 @@
+libxslt1-dev   # testonly
diff --git a/files/sources.list b/files/sources.list
deleted file mode 100644
index 77a1bfb..0000000
--- a/files/sources.list
+++ /dev/null
@@ -1,9 +0,0 @@
-deb http://mirror.rackspace.com/ubuntu/ %DIST% main restricted
-deb http://mirror.rackspace.com/ubuntu/ %DIST%-updates main restricted
-deb http://mirror.rackspace.com/ubuntu/ %DIST% universe
-deb http://mirror.rackspace.com/ubuntu/ %DIST%-updates universe
-deb http://mirror.rackspace.com/ubuntu/ %DIST% multiverse
-deb http://mirror.rackspace.com/ubuntu/ %DIST%-updates multiverse
-deb http://security.ubuntu.com/ubuntu %DIST%-security main restricted
-deb http://security.ubuntu.com/ubuntu %DIST%-security universe
-deb http://security.ubuntu.com/ubuntu %DIST%-security multiverse
diff --git a/functions b/functions
index e9c6061..f996ba8 100644
--- a/functions
+++ b/functions
@@ -317,16 +317,36 @@
                 continue
             fi
 
+            # Assume we want this package
+            package=${line%#*}
+            inst_pkg=1
+
+            # Look for # dist:xxx in comment
             if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then
                 # We are using BASH regexp matching feature.
                 package=${BASH_REMATCH[1]}
                 distros=${BASH_REMATCH[2]}
                 # In bash ${VAR,,} will lowecase VAR
-                [[ ${distros,,} =~ ${DISTRO,,} ]] && echo $package
-                continue
+                # Look for a match in the distro list
+                if [[ ! ${distros,,} =~ ${DISTRO,,} ]]; then
+                    # If no match then skip this package
+                    inst_pkg=0
+                fi
             fi
 
-            echo ${line%#*}
+            # Look for # testonly in comment
+            if [[ $line =~ (.*)#.*testonly.* ]]; then
+                package=${BASH_REMATCH[1]}
+                # Are we installing test packages? (test for the default value)
+                if [[ $INSTALL_TESTONLY_PACKAGES = "False" ]]; then
+                    # If not installing test packages the skip this package
+                    inst_pkg=0
+                fi
+            fi
+
+            if [[ $inst_pkg = 1 ]]; then
+                echo $package
+            fi
         done
         IFS=$OIFS
     done
@@ -387,8 +407,9 @@
         # CentOS release 5.5 (Final)
         # CentOS Linux release 6.0 (Final)
         # Fedora release 16 (Verne)
+        # XenServer release 6.2.0-70446c (xenenterprise)
         os_CODENAME=""
-        for r in "Red Hat" CentOS Fedora; do
+        for r in "Red Hat" CentOS Fedora XenServer; do
             os_VENDOR=$r
             if [[ -n "`grep \"$r\" /etc/redhat-release`" ]]; then
                 ver=`sed -e 's/^.* \(.*\) (\(.*\)).*$/\1\|\2/' /etc/redhat-release`
@@ -451,6 +472,8 @@
     elif [[ "$os_VENDOR" =~ (Red Hat) || "$os_VENDOR" =~ (CentOS) ]]; then
         # Drop the . release as we assume it's compatible
         DISTRO="rhel${os_RELEASE::1}"
+    elif [[ "$os_VENDOR" =~ (XenServer) ]]; then
+        DISTRO="xs$os_RELEASE"
     else
         # Catch-all for now is Vendor + Release + Update
         DISTRO="$os_VENDOR-$os_RELEASE.$os_UPDATE"
@@ -756,6 +779,7 @@
 #   **glance** returns true if any service enabled start with **g-**
 #   **neutron** returns true if any service enabled start with **q-**
 #   **swift** returns true if any service enabled start with **s-**
+#   **trove** returns true if any service enabled start with **tr-**
 #   For backward compatibility if we have **swift** in ENABLED_SERVICES all the
 #   **s-** services will be enabled. This will be deprecated in the future.
 #
@@ -775,6 +799,7 @@
         [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && return 0
         [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0
         [[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0
+        [[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && return 0
         [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && return 0
         [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && return 0
     done
@@ -909,7 +934,7 @@
     fi
 
     if [[ "$os_PACKAGE" = "deb" ]]; then
-        dpkg -l "$@" > /dev/null
+        dpkg -s "$@" > /dev/null 2> /dev/null
     elif [[ "$os_PACKAGE" = "rpm" ]]; then
         rpm --quiet -q "$@"
     else
@@ -944,13 +969,9 @@
         CMD_PIP=$(get_pip_command)
     fi
 
-    if is_fedora && [[ $DISTRO =~ (rhel6) ]]; then
-        # RHEL6 pip by default doesn't have this (was introduced
-        # around 0.8.1 or so)
-        PIP_USE_MIRRORS=${PIP_USE_MIRRORS:-False}
-    else
-        PIP_USE_MIRRORS=${PIP_USE_MIRRORS:-True}
-    fi
+    # Mirror option not needed anymore because pypi has CDN available,
+    # but it's useful in certain circumstances
+    PIP_USE_MIRRORS=${PIP_USE_MIRRORS:-False}
     if [[ "$PIP_USE_MIRRORS" != "False" ]]; then
         PIP_MIRROR_OPT="--use-mirrors"
     fi
@@ -1064,7 +1085,7 @@
             sleep 1.5
 
             NL=`echo -ne '\015'`
-            screen -S $SCREEN_NAME -p $1 -X stuff "$2 || touch \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL"
+            screen -S $SCREEN_NAME -p $1 -X stuff "$2 || echo \"$1 failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/$1.failure\"$NL"
         else
             # Spawn directly without screen
             run_process "$1" "$2" >$SERVICE_DIR/$SCREEN_NAME/$service.pid
@@ -1237,11 +1258,29 @@
     if [[ "$image_url" =~ '.vmdk' ]]; then
         IMAGE="$FILES/${IMAGE_FNAME}"
         IMAGE_NAME="${IMAGE_FNAME%.vmdk}"
-        glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format bare --disk-format vmdk --property vmware-disktype="preallocated" < "${IMAGE}"
+
+        # Before we can upload vmdk type images to glance, we need to know it's
+        # disk type, storage adapter, and networking adapter. These values are
+        # passed to glance as custom properties. We take these values from the
+        # vmdk filename, which is expected in the following format:
+        #
+        #     <name>-<disk type>:<storage adapter>:<network adapter>
+        #
+        # If the filename does not follow the above format then the vsphere
+        # driver will supply default values.
+        property_string=`echo "$IMAGE_NAME" | grep -oP '(?<=-)(?!.*-).+:.+:.+$'`
+        if [[ ! -z "$property_string" ]]; then
+            IFS=':' read -a props <<< "$property_string"
+            vmdk_disktype="${props[0]}"
+            vmdk_adapter_type="${props[1]}"
+            vmdk_net_adapter="${props[2]}"
+        fi
+
+        glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format bare --disk-format vmdk --property vmware-disktype="$vmdk_disktype" --property vmware_adaptertype="$vmdk_adapter_type" --property hw_vif_model="$vmdk_net_adapter" < "${IMAGE}"
         return
     fi
 
-    # XenServer-ovf-format images are provided as .vhd.tgz as well
+    # XenServer-vhd-ovf-format images are provided as .vhd.tgz
     # and should not be decompressed prior to loading
     if [[ "$image_url" =~ '.vhd.tgz' ]]; then
         IMAGE="$FILES/${IMAGE_FNAME}"
@@ -1250,6 +1289,22 @@
         return
     fi
 
+    # .xen-raw.tgz suggests a Xen capable raw image inside a tgz.
+    # and should not be decompressed prior to loading.
+    # Setting metadata, so PV mode is used.
+    if [[ "$image_url" =~ '.xen-raw.tgz' ]]; then
+        IMAGE="$FILES/${IMAGE_FNAME}"
+        IMAGE_NAME="${IMAGE_FNAME%.xen-raw.tgz}"
+        glance \
+          --os-auth-token $token \
+          --os-image-url http://$GLANCE_HOSTPORT \
+          image-create \
+            --name "$IMAGE_NAME" --is-public=True \
+            --container-format=tgz --disk-format=raw \
+            --property vm_mode=xen < "${IMAGE}"
+        return
+    fi
+
     KERNEL=""
     RAMDISK=""
     DISK_FORMAT=""
@@ -1419,7 +1474,6 @@
     local check_command=""
     MULTI_HOST=`trueorfalse False $MULTI_HOST`
     if [[ "$MULTI_HOST" = "True" && "$from_net" = "$PRIVATE_NETWORK_NAME" ]]; then
-        sleep $boot_timeout
         return
     fi
     if [[ "$expected" = "True" ]]; then
@@ -1437,6 +1491,19 @@
     fi
 }
 
+# Get ip of instance
+function get_instance_ip(){
+    local vm_id=$1
+    local network_name=$2
+    local nova_result="$(nova show $vm_id)"
+    local ip=$(echo "$nova_result" | grep "$network_name" | get_field 2)
+    if [[ $ip = "" ]];then
+        echo "$nova_result"
+        die $LINENO "[Fail] Coudn't get ipaddress of VM"
+        exit 1
+    fi
+    echo $ip
+}
 
 # ssh check
 
@@ -1597,6 +1664,37 @@
 }
 
 
+# ``policy_add policy_file policy_name policy_permissions``
+#
+# Add a policy to a policy.json file
+# Do nothing if the policy already exists
+
+function policy_add() {
+    local policy_file=$1
+    local policy_name=$2
+    local policy_perm=$3
+
+    if grep -q ${policy_name} ${policy_file}; then
+        echo "Policy ${policy_name} already exists in ${policy_file}"
+        return
+    fi
+
+    # Add a terminating comma to policy lines without one
+    # Remove the closing '}' and all lines following to the end-of-file
+    local tmpfile=$(mktemp)
+    uniq ${policy_file} | sed -e '
+        s/]$/],/
+        /^[}]/,$d
+    ' > ${tmpfile}
+
+    # Append policy and closing brace
+    echo "    \"${policy_name}\": ${policy_perm}" >>${tmpfile}
+    echo "}" >>${tmpfile}
+
+    mv ${tmpfile} ${policy_file}
+}
+
+
 # Restore xtrace
 $XTRACE
 
diff --git a/lib/baremetal b/lib/baremetal
index 8f6c3f1..52af420 100644
--- a/lib/baremetal
+++ b/lib/baremetal
@@ -215,7 +215,16 @@
     # ensure /tftpboot is prepared
     sudo mkdir -p /tftpboot
     sudo mkdir -p /tftpboot/pxelinux.cfg
-    sudo cp /usr/lib/syslinux/pxelinux.0 /tftpboot/
+
+    PXEBIN=/usr/share/syslinux/pxelinux.0
+    if [ ! -f $PXEBIN ]; then
+        PXEBIN=/usr/lib/syslinux/pxelinux.0
+        if [ ! -f $PXEBIN ]; then
+            die $LINENO "pxelinux.0 (from SYSLINUX) not found."
+        fi
+    fi
+
+    sudo cp $PXEBIN /tftpboot/
     sudo chown -R $STACK_USER:$LIBVIRT_GROUP /tftpboot
 
     # ensure $NOVA_STATE_PATH/baremetal is prepared
@@ -291,7 +300,7 @@
     out=$($BM_IMAGE_BUILD_DIR/bin/disk-image-get-kernel \
             -x -d $TOP_DIR/files -o bm-deploy -i $file)
     if [ $? -ne 0 ]; then
-        die "Failed to get kernel and ramdisk from $file"
+        die $LINENO "Failed to get kernel and ramdisk from $file"
     fi
     XTRACE=$(set +o | grep xtrace)
     set +o xtrace
@@ -439,9 +448,9 @@
        "$BM_FLAVOR_ROOT_DISK" \
        "$mac_1" \
        | grep ' id ' | get_field 2 )
-    [ $? -eq 0 ] || [ "$id" ] || die "Error adding baremetal node"
+    [ $? -eq 0 ] || [ "$id" ] || die $LINENO "Error adding baremetal node"
     id2=$(nova baremetal-interface-add "$id" "$mac_2" )
-    [ $? -eq 0 ] || [ "$id2" ] || die "Error adding interface to barmetal node $id"
+    [ $? -eq 0 ] || [ "$id2" ] || die $LINENO "Error adding interface to barmetal node $id"
 }
 
 
diff --git a/lib/ceilometer b/lib/ceilometer
index 8768122..2afbc88 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -5,7 +5,7 @@
 #   enable_service ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api
 #
 # To ensure Ceilometer alarming services are enabled also, further add to the localrc:
-#   enable_service ceilometer-alarm-notify ceilometer-alarm-eval
+#   enable_service ceilometer-alarm-notifier ceilometer-alarm-singleton
 
 # Dependencies:
 # - functions
@@ -43,7 +43,7 @@
 CEILOMETER_BIN_DIR=$(get_python_exec_prefix)
 
 # Set up database backend
-CEILOMETER_BACKEND=${CEILOMETER_BACKEND:-mongodb}
+CEILOMETER_BACKEND=${CEILOMETER_BACKEND:-mysql}
 
 # Functions
 # ---------
@@ -138,14 +138,14 @@
     screen_it ceilometer-acentral "ceilometer-agent-central --config-file $CEILOMETER_CONF"
     screen_it ceilometer-collector "ceilometer-collector --config-file $CEILOMETER_CONF"
     screen_it ceilometer-api "ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF"
-    screen_it ceilometer-alarm-notify "ceilometer-alarm-notifier --config-file $CEILOMETER_CONF"
-    screen_it ceilometer-alarm-eval "ceilometer-alarm-singleton --config-file $CEILOMETER_CONF"
+    screen_it ceilometer-alarm-notifier "ceilometer-alarm-notifier --config-file $CEILOMETER_CONF"
+    screen_it ceilometer-alarm-singleton "ceilometer-alarm-singleton --config-file $CEILOMETER_CONF"
 }
 
 # stop_ceilometer() - Stop running processes
 function stop_ceilometer() {
     # Kill the ceilometer screen windows
-    for serv in ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api ceilometer-alarm-notify ceilometer-alarm-eval; do
+    for serv in ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-singleton; do
         screen -S $SCREEN_NAME -p $serv -X kill
     done
 }
diff --git a/lib/cinder b/lib/cinder
index 3472dcd..7f1544b 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -212,7 +212,7 @@
 
     cp $CINDER_DIR/etc/cinder/cinder.conf.sample $CINDER_CONF
     iniset $CINDER_CONF DEFAULT auth_strategy keystone
-    iniset $CINDER_CONF DEFAULT debug True
+    iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
     iniset $CINDER_CONF DEFAULT verbose True
     if [ "$CINDER_MULTI_LVM_BACKEND" = "True" ]; then
         iniset $CINDER_CONF DEFAULT enabled_backends lvmdriver-1,lvmdriver-2
@@ -226,6 +226,7 @@
         iniset $CINDER_CONF DEFAULT volume_group $VOLUME_GROUP
         iniset $CINDER_CONF DEFAULT volume_name_template ${VOLUME_NAME_PREFIX}%s
     fi
+    iniset $CINDER_CONF DEFAULT my_ip "$CINDER_SERVICE_HOST"
     iniset $CINDER_CONF DEFAULT iscsi_helper tgtadm
     iniset $CINDER_CONF DEFAULT sql_connection `database_connection_url cinder`
     iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI
@@ -234,6 +235,10 @@
     iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH
     iniset $CINDER_CONF DEFAULT periodic_interval $CINDER_PERIODIC_INTERVAL
 
+    if is_service_enabled ceilometer; then
+        iniset $CINDER_CONF DEFAULT notification_driver "cinder.openstack.common.notifier.rpc_notifier"
+    fi
+
     if is_service_enabled tls-proxy; then
         # Set the service port for a proxy to take the original
         iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT
@@ -283,6 +288,12 @@
             CINDER_GLUSTERFS_SHARES=$(echo $CINDER_GLUSTERFS_SHARES | tr ";" "\n")
             echo "$CINDER_GLUSTERFS_SHARES" > $CINDER_CONF_DIR/glusterfs_shares
         fi
+    elif [ "$CINDER_DRIVER" == "vsphere" ]; then
+        echo_summary "Using VMware vCenter driver"
+        iniset $CINDER_CONF DEFAULT vmware_host_ip "$VMWAREAPI_IP"
+        iniset $CINDER_CONF DEFAULT vmware_host_username "$VMWAREAPI_USER"
+        iniset $CINDER_CONF DEFAULT vmware_host_password "$VMWAREAPI_PASSWORD"
+        iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver"
     fi
 
     if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then
@@ -324,9 +335,9 @@
             --email=cinder@example.com \
             | grep " id " | get_field 2)
         keystone user-role-add \
-            --tenant_id $SERVICE_TENANT \
-            --user_id $CINDER_USER \
-            --role_id $ADMIN_ROLE
+            --tenant-id $SERVICE_TENANT \
+            --user-id $CINDER_USER \
+            --role-id $ADMIN_ROLE
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
             CINDER_SERVICE=$(keystone service-create \
                 --name=cinder \
@@ -339,6 +350,18 @@
                 --publicurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \
                 --adminurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \
                 --internalurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s"
+            CINDER_V2_SERVICE=$(keystone service-create \
+                --name=cinder \
+                --type=volumev2 \
+                --description="Cinder Volume Service V2" \
+                | grep " id " | get_field 2)
+            keystone endpoint-create \
+                --region RegionOne \
+                --service_id $CINDER_V2_SERVICE \
+                --publicurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \
+                --adminurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \
+                --internalurl "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s"
+
         fi
     fi
 }
@@ -444,6 +467,7 @@
 function install_cinderclient() {
     git_clone $CINDERCLIENT_REPO $CINDERCLIENT_DIR $CINDERCLIENT_BRANCH
     setup_develop $CINDERCLIENT_DIR
+    sudo install -D -m 0644 -o $STACK_USER {$CINDERCLIENT_DIR/tools/,/etc/bash_completion.d/}cinder.bash_completion
 }
 
 # apply config.d approach for cinder volumes directory
diff --git a/lib/glance b/lib/glance
index 583f879..64d8b06 100644
--- a/lib/glance
+++ b/lib/glance
@@ -71,7 +71,7 @@
 
     # Copy over our glance configurations and update them
     cp $GLANCE_DIR/etc/glance-registry.conf $GLANCE_REGISTRY_CONF
-    iniset $GLANCE_REGISTRY_CONF DEFAULT debug True
+    iniset $GLANCE_REGISTRY_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
     inicomment $GLANCE_REGISTRY_CONF DEFAULT log_file
     local dburl=`database_connection_url glance`
     iniset $GLANCE_REGISTRY_CONF DEFAULT sql_connection $dburl
@@ -87,7 +87,7 @@
     iniset $GLANCE_REGISTRY_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/registry
 
     cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF
-    iniset $GLANCE_API_CONF DEFAULT debug True
+    iniset $GLANCE_API_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
     inicomment $GLANCE_API_CONF DEFAULT log_file
     iniset $GLANCE_API_CONF DEFAULT sql_connection $dburl
     iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG
@@ -108,6 +108,10 @@
     fi
     iniset_rpc_backend glance $GLANCE_API_CONF DEFAULT
     iniset $GLANCE_API_CONF keystone_authtoken signing_dir $GLANCE_AUTH_CACHE_DIR/api
+    if [ "$VIRT_DRIVER" = 'xenserver' ]; then
+        iniset $GLANCE_API_CONF DEFAULT container_formats "ami,ari,aki,bare,ovf,tgz"
+        iniset $GLANCE_API_CONF DEFAULT disk_formats "ami,ari,aki,vhd,raw,iso"
+    fi
 
     # Store the images in swift if enabled.
     if is_service_enabled s-proxy; then
@@ -123,7 +127,7 @@
     cp -p $GLANCE_DIR/etc/glance-api-paste.ini $GLANCE_API_PASTE_INI
 
     cp $GLANCE_DIR/etc/glance-cache.conf $GLANCE_CACHE_CONF
-    iniset $GLANCE_CACHE_CONF DEFAULT debug True
+    iniset $GLANCE_CACHE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
     inicomment $GLANCE_CACHE_CONF DEFAULT log_file
     iniset $GLANCE_CACHE_CONF DEFAULT use_syslog $SYSLOG
     iniset $GLANCE_CACHE_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/
diff --git a/lib/heat b/lib/heat
index 1b715f2..afa0eeb 100644
--- a/lib/heat
+++ b/lib/heat
@@ -31,6 +31,10 @@
 HEATCLIENT_DIR=$DEST/python-heatclient
 HEAT_AUTH_CACHE_DIR=${HEAT_AUTH_CACHE_DIR:-/var/cache/heat}
 HEAT_STANDALONE=`trueorfalse False $HEAT_STANDALONE`
+HEAT_CONF_DIR=/etc/heat
+HEAT_CONF=$HEAT_CONF_DIR/heat.conf
+HEAT_ENV_DIR=$HEAT_CONF_DIR/environment.d
+HEAT_TEMPLATES_DIR=$HEAT_CONF_DIR/templates
 
 # Functions
 # ---------
@@ -39,17 +43,20 @@
 # runs that a clean run would need to clean up
 function cleanup_heat() {
     sudo rm -rf $HEAT_AUTH_CACHE_DIR
+    sudo rm -rf $HEAT_ENV_DIR
+    sudo rm -rf $HEAT_TEMPLATES_DIR
 }
 
 # configure_heat() - Set config files, create data dirs, etc
 function configure_heat() {
     setup_develop $HEAT_DIR
 
-    HEAT_CONF_DIR=/etc/heat
     if [[ ! -d $HEAT_CONF_DIR ]]; then
         sudo mkdir -p $HEAT_CONF_DIR
     fi
     sudo chown $STACK_USER $HEAT_CONF_DIR
+    # remove old config files
+    rm -f $HEAT_CONF_DIR/heat-*.conf
 
     HEAT_API_CFN_HOST=${HEAT_API_CFN_HOST:-$SERVICE_HOST}
     HEAT_API_CFN_PORT=${HEAT_API_CFN_PORT:-8000}
@@ -64,88 +71,70 @@
 
     cp $HEAT_DIR/etc/heat/api-paste.ini $HEAT_API_PASTE_FILE
     cp $HEAT_DIR/etc/heat/policy.json $HEAT_POLICY_FILE
+    cp $HEAT_DIR/etc/heat/heat.conf.sample $HEAT_CONF
 
-    # Cloudformation API
-    HEAT_API_CFN_CONF=$HEAT_CONF_DIR/heat-api-cfn.conf
-    cp $HEAT_DIR/etc/heat/heat-api-cfn.conf $HEAT_API_CFN_CONF
-    iniset $HEAT_API_CFN_CONF DEFAULT debug True
-    inicomment $HEAT_API_CFN_CONF DEFAULT log_file
-    iniset $HEAT_API_CFN_CONF DEFAULT use_syslog $SYSLOG
-    iniset $HEAT_API_CFN_CONF DEFAULT bind_host $HEAT_API_CFN_HOST
-    iniset $HEAT_API_CFN_CONF DEFAULT bind_port $HEAT_API_CFN_PORT
-    iniset $HEAT_API_CFN_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST
-    iniset $HEAT_API_CFN_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT
-    iniset $HEAT_API_CFN_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
-    iniset $HEAT_API_CFN_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0
-    iniset $HEAT_API_CFN_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
-    iniset $HEAT_API_CFN_CONF keystone_authtoken admin_user heat
-    iniset $HEAT_API_CFN_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
-    iniset $HEAT_API_CFN_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR/api-cfn
-    iniset $HEAT_API_CFN_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0
-    iniset $HEAT_API_CFN_CONF ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens
-    [[ "$HEAT_STANDALONE" = "True" ]] && iniset $HEAT_API_CFN_CONF paste_deploy flavor standalone
+    # common options
+    iniset_rpc_backend heat $HEAT_CONF DEFAULT
+    iniset $HEAT_CONF DEFAULT heat_metadata_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT
+    iniset $HEAT_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1/waitcondition
+    iniset $HEAT_CONF DEFAULT heat_watch_server_url http://$HEAT_API_CW_HOST:$HEAT_API_CW_PORT
+    iniset $HEAT_CONF DEFAULT sql_connection `database_connection_url heat`
+    iniset $HEAT_CONF DEFAULT auth_encryption_key `hexdump -n 16 -v -e '/1 "%02x"' /dev/random`
 
-    iniset_rpc_backend heat $HEAT_API_CFN_CONF DEFAULT
+    # logging
+    iniset $HEAT_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+    iniset $HEAT_CONF DEFAULT use_syslog $SYSLOG
+    if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
+        # Add color to logging output
+        iniset $HEAT_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s%(color)s] %(instance)s%(color)s%(message)s"
+        iniset $HEAT_CONF DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s"
+        iniset $HEAT_CONF DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d"
+        iniset $HEAT_CONF DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s"
+    fi
+
+    # keystone authtoken
+    iniset $HEAT_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST
+    iniset $HEAT_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT
+    iniset $HEAT_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
+    iniset $HEAT_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0
+    iniset $HEAT_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
+    iniset $HEAT_CONF keystone_authtoken admin_user heat
+    iniset $HEAT_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
+    iniset $HEAT_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR
+
+    # ec2authtoken
+    iniset $HEAT_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0
+    iniset $HEAT_CONF ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens
+
+    # paste_deploy
+    [[ "$HEAT_STANDALONE" = "True" ]] && iniset $HEAT_CONF paste_deploy flavor standalone
 
     # OpenStack API
-    HEAT_API_CONF=$HEAT_CONF_DIR/heat-api.conf
-    cp $HEAT_DIR/etc/heat/heat-api.conf $HEAT_API_CONF
-    iniset $HEAT_API_CONF DEFAULT debug True
-    inicomment $HEAT_API_CONF DEFAULT log_file
-    iniset $HEAT_API_CONF DEFAULT use_syslog $SYSLOG
-    iniset $HEAT_API_CONF DEFAULT bind_host $HEAT_API_HOST
-    iniset $HEAT_API_CONF DEFAULT bind_port $HEAT_API_PORT
-    iniset $HEAT_API_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST
-    iniset $HEAT_API_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT
-    iniset $HEAT_API_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
-    iniset $HEAT_API_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0
-    iniset $HEAT_API_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
-    iniset $HEAT_API_CONF keystone_authtoken admin_user heat
-    iniset $HEAT_API_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
-    iniset $HEAT_API_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR/api
-    iniset $HEAT_API_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0
-    iniset $HEAT_API_CONF ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens
-    [[ "$HEAT_STANDALONE" = "True" ]] && iniset $HEAT_API_CONF paste_deploy flavor standalone
-    iniset_rpc_backend heat $HEAT_API_CONF DEFAULT
+    iniset $HEAT_CONF heat_api bind_host $HEAT_API_HOST
+    iniset $HEAT_CONF heat_api bind_port $HEAT_API_PORT
 
-
-    # engine
-    HEAT_ENGINE_CONF=$HEAT_CONF_DIR/heat-engine.conf
-    cp $HEAT_DIR/etc/heat/heat-engine.conf $HEAT_ENGINE_CONF
-    iniset $HEAT_ENGINE_CONF DEFAULT debug True
-    inicomment $HEAT_ENGINE_CONF DEFAULT log_file
-    iniset $HEAT_ENGINE_CONF DEFAULT use_syslog $SYSLOG
-    iniset $HEAT_ENGINE_CONF DEFAULT bind_host $HEAT_ENGINE_HOST
-    iniset $HEAT_ENGINE_CONF DEFAULT bind_port $HEAT_ENGINE_PORT
-    iniset $HEAT_ENGINE_CONF DEFAULT heat_metadata_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT
-    iniset $HEAT_ENGINE_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1/waitcondition
-    iniset $HEAT_ENGINE_CONF DEFAULT heat_watch_server_url http://$HEAT_API_CW_HOST:$HEAT_API_CW_PORT
-    iniset $HEAT_ENGINE_CONF DEFAULT sql_connection `database_connection_url heat`
-    iniset $HEAT_ENGINE_CONF DEFAULT auth_encryption_key `hexdump -n 16 -v -e '/1 "%02x"' /dev/random`
-
-    iniset_rpc_backend heat $HEAT_ENGINE_CONF DEFAULT
+    # Cloudformation API
+    iniset $HEAT_CONF heat_api_cfn bind_host $HEAT_API_CFN_HOST
+    iniset $HEAT_CONF heat_api_cfn bind_port $HEAT_API_CFN_PORT
 
     # Cloudwatch API
-    HEAT_API_CW_CONF=$HEAT_CONF_DIR/heat-api-cloudwatch.conf
-    cp $HEAT_DIR/etc/heat/heat-api-cloudwatch.conf $HEAT_API_CW_CONF
-    iniset $HEAT_API_CW_CONF DEFAULT debug True
-    inicomment $HEAT_API_CW_CONF DEFAULT log_file
-    iniset $HEAT_API_CW_CONF DEFAULT use_syslog $SYSLOG
-    iniset $HEAT_API_CW_CONF DEFAULT bind_host $HEAT_API_CW_HOST
-    iniset $HEAT_API_CW_CONF DEFAULT bind_port $HEAT_API_CW_PORT
-    iniset $HEAT_API_CW_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST
-    iniset $HEAT_API_CW_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT
-    iniset $HEAT_API_CW_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
-    iniset $HEAT_API_CW_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0
-    iniset $HEAT_API_CW_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
-    iniset $HEAT_API_CW_CONF keystone_authtoken admin_user heat
-    iniset $HEAT_API_CW_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
-    iniset $HEAT_API_CW_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR/api-cloudwatch
-    iniset $HEAT_API_CW_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0
-    iniset $HEAT_API_CW_CONF ec2authtoken keystone_ec2_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens
-    [[ "$HEAT_STANDALONE" = "True" ]] && iniset $HEAT_API_CW_CONF paste_deploy flavor standalone
+    iniset $HEAT_CONF heat_api_cloudwatch bind_host $HEAT_API_CW_HOST
+    iniset $HEAT_CONF heat_api_cloudwatch bind_port $HEAT_API_CW_PORT
 
-    iniset_rpc_backend heat $HEAT_API_CW_CONF DEFAULT
+    # Set limits to match tempest defaults
+    iniset $HEAT_CONF DEFAULT max_template_size 10240
+
+    # heat environment
+    sudo mkdir -p $HEAT_ENV_DIR
+    sudo chown $STACK_USER $HEAT_ENV_DIR
+    # copy the default environment
+    cp $HEAT_DIR/etc/heat/environment.d/* $HEAT_ENV_DIR/
+
+    # heat template resources.
+    sudo mkdir -p $HEAT_TEMPLATES_DIR
+    sudo chown $STACK_USER $HEAT_TEMPLATES_DIR
+    # copy the default templates
+    cp $HEAT_DIR/etc/heat/templates/* $HEAT_TEMPLATES_DIR/
 
 }
 
@@ -162,12 +151,8 @@
 # create_heat_cache_dir() - Part of the init_heat() process
 function create_heat_cache_dir() {
     # Create cache dirs
-    sudo mkdir -p $HEAT_AUTH_CACHE_DIR/api
-    sudo chown $STACK_USER $HEAT_AUTH_CACHE_DIR/api
-    sudo mkdir -p $HEAT_AUTH_CACHE_DIR/api-cfn
-    sudo chown $STACK_USER $HEAT_AUTH_CACHE_DIR/api-cfn
-    sudo mkdir -p $HEAT_AUTH_CACHE_DIR/api-cloudwatch
-    sudo chown $STACK_USER $HEAT_AUTH_CACHE_DIR/api-cloudwatch
+    sudo mkdir -p $HEAT_AUTH_CACHE_DIR
+    sudo chown $STACK_USER $HEAT_AUTH_CACHE_DIR
 }
 
 # install_heatclient() - Collect source and prepare
@@ -183,10 +168,10 @@
 
 # start_heat() - Start running processes, including screen
 function start_heat() {
-    screen_it h-eng "cd $HEAT_DIR; bin/heat-engine --config-file=$HEAT_CONF_DIR/heat-engine.conf"
-    screen_it h-api "cd $HEAT_DIR; bin/heat-api --config-dir=$HEAT_CONF_DIR/heat-api.conf"
-    screen_it h-api-cfn "cd $HEAT_DIR; bin/heat-api-cfn --config-dir=$HEAT_CONF_DIR/heat-api-cfn.conf"
-    screen_it h-api-cw "cd $HEAT_DIR; bin/heat-api-cloudwatch --config-dir=$HEAT_CONF_DIR/heat-api-cloudwatch.conf"
+    screen_it h-eng "cd $HEAT_DIR; bin/heat-engine --config-file=$HEAT_CONF"
+    screen_it h-api "cd $HEAT_DIR; bin/heat-api --config-file=$HEAT_CONF"
+    screen_it h-api-cfn "cd $HEAT_DIR; bin/heat-api-cfn --config-file=$HEAT_CONF"
+    screen_it h-api-cw "cd $HEAT_DIR; bin/heat-api-cloudwatch --config-file=$HEAT_CONF"
 }
 
 # stop_heat() - Stop running processes
@@ -197,6 +182,21 @@
     done
 }
 
+function disk_image_create {
+    local elements_path=$1
+    local elements=$2
+    local arch=$3
+    local output=$TOP_DIR/files/$4
+    if [[ -f "$output.qcow2" ]];
+    then
+        echo "Image file already exists: $output_file"
+    else
+        ELEMENTS_PATH=$elements_path disk-image-create \
+            $elements -a $arch -o $output
+    fi
+    # upload with fake URL so that image in $TOP_DIR/files is used
+    upload_image "http://localhost/$output.qcow2" $TOKEN
+}
 
 # Restore xtrace
 $XTRACE
diff --git a/lib/horizon b/lib/horizon
index 89bd659..e55bc15 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -50,7 +50,7 @@
         if [ -n "$line" ]; then
             sed -i -e "/^$section/,/^}/ s/^\( *'$option'\) *:.*$/\1: $value,/" $file
         else
-            sed -i -e "/^$section/ a\n    '$option': $value,\n" $file
+            sed -i -e "/^$section/a\    '$option': $value," $file
         fi
     else
         echo -e "\n\n$section = {\n    '$option': $value,\n}" >> $file
@@ -96,6 +96,16 @@
         _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_lb True
     fi
 
+    # enable firewall dashboard in case service is enabled
+    if is_service_enabled q-fwaas; then
+        _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_firewall True
+    fi
+
+    # enable VPN dashboard in case service is enabled
+    if is_service_enabled q-vpn; then
+        _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_vpn True
+    fi
+
     # Initialize the horizon database (it stores sessions and notices shown to
     # users).  The user system is external (keystone).
     cd $HORIZON_DIR
@@ -106,13 +116,13 @@
     sudo mkdir -p $HORIZON_DIR/.blackhole
 
     HORIZON_REQUIRE=''
-    local horizon_conf=/etc/$APACHE_NAME/$APACHE_CONF_DIR/horizon
+    local horizon_conf=/etc/$APACHE_NAME/$APACHE_CONF_DIR/horizon.conf
     if is_ubuntu; then
         # Clean up the old config name
         sudo rm -f /etc/apache2/sites-enabled/000-default
         # Be a good citizen and use the distro tools here
         sudo touch $horizon_conf
-        sudo a2ensite horizon
+        sudo a2ensite horizon.conf
         # WSGI isn't enabled by default, enable it
         sudo a2enmod wsgi
     elif is_fedora; then
diff --git a/lib/ironic b/lib/ironic
new file mode 100644
index 0000000..2ce5038
--- /dev/null
+++ b/lib/ironic
@@ -0,0 +1,222 @@
+# lib/ironic
+# Functions to control the configuration and operation of the **Ironic** service
+
+# Dependencies:
+# ``functions`` file
+# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
+# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined
+# ``SERVICE_HOST``
+# ``KEYSTONE_TOKEN_FORMAT`` must be defined
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# install_ironic
+# configure_ironic
+# init_ironic
+# start_ironic
+# stop_ironic
+# cleanup_ironic
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+
+# Set up default directories
+IRONIC_DIR=$DEST/ironic
+IRONIC_AUTH_CACHE_DIR=${IRONIC_AUTH_CACHE_DIR:-/var/cache/ironic}
+IRONIC_CONF_DIR=${IRONIC_CONF_DIR:-/etc/ironic}
+IRONIC_CONF_FILE=$IRONIC_CONF_DIR/ironic.conf
+IRONIC_ROOTWRAP_CONF=$IRONIC_CONF_DIR/rootwrap.conf
+IRONIC_ROOTWRAP_FILTERS=$IRONIC_CONF_DIR/rootwrap.d
+IRONIC_POLICY_JSON=$IRONIC_CONF_DIR/policy.json
+
+# Support entry points installation of console scripts
+IRONIC_BIN_DIR=$(get_python_exec_prefix)
+
+# Ironic connection info.  Note the port must be specified.
+IRONIC_SERVICE_PROTOCOL=http
+IRONIC_HOSTPORT=${IRONIC_HOSTPORT:-$SERVICE_HOST:6385}
+
+
+# Functions
+# ---------
+
+# cleanup_ironic() - Remove residual data files, anything left over from previous
+# runs that would need to clean up.
+function cleanup_ironic() {
+    sudo rm -rf $IRONIC_AUTH_CACHE_DIR
+}
+
+# configure_ironic() - Set config files, create data dirs, etc
+function configure_ironic() {
+    if [[ ! -d $IRONIC_CONF_DIR ]]; then
+        sudo mkdir -p $IRONIC_CONF_DIR
+    fi
+    sudo chown $STACK_USER $IRONIC_CONF_DIR
+
+    # Copy over ironic configuration file and configure common parameters.
+    cp $IRONIC_DIR/etc/ironic/ironic.conf.sample $IRONIC_CONF_FILE
+    iniset $IRONIC_CONF_FILE DEFAULT debug True
+    inicomment $IRONIC_CONF_FILE DEFAULT log_file
+    iniset $IRONIC_CONF_FILE DEFAULT sql_connection `database_connection_url ironic`
+    iniset $IRONIC_CONF_FILE DEFAULT use_syslog $SYSLOG
+
+    # Configure Ironic conductor, if it was enabled.
+    if is_service_enabled ir-cond; then
+        configure_ironic_conductor
+    fi
+
+    # Configure Ironic API, if it was enabled.
+    if is_service_enabled ir-api; then
+        configure_ironic_api
+    fi
+}
+
+# configure_ironic_api() - Is used by configure_ironic(). Performs
+# API specific configuration.
+function configure_ironic_api() {
+    iniset $IRONIC_CONF_FILE keystone_authtoken auth_host $KEYSTONE_AUTH_HOST
+    iniset $IRONIC_CONF_FILE keystone_authtoken auth_port $KEYSTONE_AUTH_PORT
+    iniset $IRONIC_CONF_FILE keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
+    iniset $IRONIC_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
+    iniset $IRONIC_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
+    iniset $IRONIC_CONF_FILE keystone_authtoken admin_user ironic
+    iniset $IRONIC_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD
+    if is_service_enabled qpid; then
+        iniset $IRONIC_CONF_FILE DEFAULT notifier_strategy qpid
+    elif [ -n "$RABBIT_HOST" ] &&  [ -n "$RABBIT_PASSWORD" ]; then
+        iniset $IRONIC_CONF_FILE DEFAULT notifier_strategy rabbit
+    fi
+    iniset_rpc_backend ironic $IRONIC_CONF_FILE DEFAULT
+    iniset $IRONIC_CONF_FILE keystone_authtoken signing_dir $IRONIC_AUTH_CACHE_DIR/api
+
+    cp -p $IRONIC_DIR/etc/ironic/policy.json $IRONIC_POLICY_JSON
+}
+
+# configure_ironic_conductor() - Is used by configure_ironic().
+# Sets conductor specific settings.
+function configure_ironic_conductor() {
+    cp $IRONIC_DIR/etc/ironic/rootwrap.conf $IRONIC_ROOTWRAP_CONF
+    cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_ROOTWRAP_FILTERS
+
+    iniset $IRONIC_CONF DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF
+}
+
+# create_ironic_cache_dir() - Part of the init_ironic() process
+function create_ironic_cache_dir() {
+    # Create cache dir
+    sudo mkdir -p $IRONIC_AUTH_CACHE_DIR/api
+    sudo chown $STACK_USER $IRONIC_AUTH_CACHE_DIR/api
+    rm -f $IRONIC_AUTH_CACHE_DIR/api/*
+    sudo mkdir -p $IRONIC_AUTH_CACHE_DIR/registry
+    sudo chown $STACK_USER $IRONIC_AUTH_CACHE_DIR/registry
+    rm -f $IRONIC_AUTH_CACHE_DIR/registry/*
+}
+
+# create_ironic_accounts() - Set up common required ironic accounts
+
+# Tenant               User       Roles
+# ------------------------------------------------------------------
+# service              ironic     admin        # if enabled
+create_ironic_accounts() {
+
+    SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    ADMIN_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
+
+    # Ironic
+    if [[ "$ENABLED_SERVICES" =~ "ir-api" ]]; then
+        IRONIC_USER=$(keystone user-create \
+            --name=ironic \
+            --pass="$SERVICE_PASSWORD" \
+            --tenant_id $SERVICE_TENANT \
+            --email=ironic@example.com \
+            | grep " id " | get_field 2)
+        keystone user-role-add \
+            --tenant_id $SERVICE_TENANT \
+            --user_id $IRONIC_USER \
+            --role_id $ADMIN_ROLE
+        if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+            IRONIC_SERVICE=$(keystone service-create \
+                --name=ironic \
+                --type=baremetal \
+                --description="Ironic baremetal provisioning service" \
+                | grep " id " | get_field 2)
+            keystone endpoint-create \
+                --region RegionOne \
+                --service_id $IRONIC_SERVICE \
+                --publicurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT/v1/" \
+                --adminurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT/v1/" \
+                --internalurl "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT/v1/"
+        fi
+    fi
+}
+
+
+# init_ironic() - Initialize databases, etc.
+function init_ironic() {
+    # (Re)create  ironic database
+    recreate_database ironic utf8
+
+    # Migrate ironic database
+    $IRONIC_BIN_DIR/ironic-dbsync
+
+    create_ironic_cache_dir
+
+    # Create keystone artifacts for Ironic.
+    create_ironic_accounts
+}
+
+# install_ironic() - Collect source and prepare
+function install_ironic() {
+    git_clone $IRONIC_REPO $IRONIC_DIR $IRONIC_BRANCH
+    setup_develop $IRONIC_DIR
+}
+
+# start_ironic() - Start running processes, including screen
+function start_ironic() {
+    # Start Ironic API server, if enabled.
+    if is_service_enabled ir-api; then
+        start_ironic_api
+    fi
+
+    # Start Ironic conductor, if enabled.
+    if is_service_enabled ir-cond; then
+        start_ironic_conductor
+    fi
+}
+
+# start_ironic_api() - Used by start_ironic().
+# Starts Ironic API server.
+function start_ironic_api() {
+    screen_it ir-api "cd $IRONIC_DIR; $IRONIC_BIN_DIR/ironic-api --config-file=$IRONIC_CONF_FILE"
+    echo "Waiting for ir-api ($IRONIC_HOSTPORT) to start..."
+    if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= wget -q -O- http://$IRONIC_HOSTPORT; do sleep 1; done"; then
+      die $LINENO "ir-api did not start"
+    fi
+}
+
+# start_ironic_conductor() - Used by start_ironic().
+# Starts Ironic conductor.
+function start_ironic_conductor() {
+    screen_it ir-cond "cd $IRONIC_DIR; $IRONIC_BIN_DIR/ironic-conductor --config-file=$IRONIC_CONF_FILE"
+    # TODO(romcheg): Find a way to check whether the conductor has started.
+}
+
+# stop_ironic() - Stop running processes
+function stop_ironic() {
+    # Kill the Ironic screen windows
+    screen -S $SCREEN_NAME -p ir-api -X kill
+    screen -S $SCREEN_NAME -p ir-cond -X kill
+}
+
+
+# Restore xtrace
+$XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/keystone b/lib/keystone
index e7e0544..535710f 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -217,9 +217,9 @@
         --name admin \
         | grep " id " | get_field 2)
     keystone user-role-add \
-        --user_id $ADMIN_USER \
-        --role_id $ADMIN_ROLE \
-        --tenant_id $ADMIN_TENANT
+        --user-id $ADMIN_USER \
+        --role-id $ADMIN_ROLE \
+        --tenant-id $ADMIN_TENANT
 
     # service
     SERVICE_TENANT=$(keystone tenant-create \
@@ -244,10 +244,10 @@
         --pass "$ADMIN_PASSWORD" \
         --email demo@example.com \
         | grep " id " | get_field 2)
-    keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $DEMO_TENANT
-    keystone user-role-add --user_id $ADMIN_USER --role_id $ADMIN_ROLE --tenant_id $DEMO_TENANT
-    keystone user-role-add --user_id $DEMO_USER --role_id $ANOTHER_ROLE --tenant_id $DEMO_TENANT
-    keystone user-role-add --user_id $DEMO_USER --role_id $MEMBER_ROLE --tenant_id $INVIS_TENANT
+    keystone user-role-add --user-id $DEMO_USER --role-id $MEMBER_ROLE --tenant-id $DEMO_TENANT
+    keystone user-role-add --user-id $ADMIN_USER --role-id $ADMIN_ROLE --tenant-id $DEMO_TENANT
+    keystone user-role-add --user-id $DEMO_USER --role-id $ANOTHER_ROLE --tenant-id $DEMO_TENANT
+    keystone user-role-add --user-id $DEMO_USER --role-id $MEMBER_ROLE --tenant-id $INVIS_TENANT
 
     # Keystone
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
@@ -289,6 +289,7 @@
 function install_keystoneclient() {
     git_clone $KEYSTONECLIENT_REPO $KEYSTONECLIENT_DIR $KEYSTONECLIENT_BRANCH
     setup_develop $KEYSTONECLIENT_DIR
+    sudo install -D -m 0644 -o $STACK_USER {$KEYSTONECLIENT_DIR/tools/,/etc/bash_completion.d/}keystone.bash_completion
 }
 
 # install_keystone() - Collect source and prepare
diff --git a/lib/neutron b/lib/neutron
index 306140a..5664ff2 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -207,6 +207,10 @@
 # Hardcoding for 1 service plugin for now
 source $TOP_DIR/lib/neutron_plugins/services/vpn
 
+# Firewall Service Plugin functions
+# --------------------------------
+source $TOP_DIR/lib/neutron_plugins/services/firewall
+
 # Use security group or not
 if has_neutron_plugin_security_group; then
     Q_USE_SECGROUP=${Q_USE_SECGROUP:-True}
@@ -230,6 +234,9 @@
     if is_service_enabled q-vpn; then
         _configure_neutron_vpn
     fi
+    if is_service_enabled q-fwaas; then
+        _configure_neutron_fwaas
+    fi
     if is_service_enabled q-svc; then
         _configure_neutron_service
     fi
@@ -250,18 +257,18 @@
 }
 
 function create_nova_conf_neutron() {
-    iniset $NOVA_CONF DEFAULT network_api_class "nova.network.quantumv2.api.API"
-    iniset $NOVA_CONF DEFAULT quantum_admin_username "$Q_ADMIN_USERNAME"
-    iniset $NOVA_CONF DEFAULT quantum_admin_password "$SERVICE_PASSWORD"
-    iniset $NOVA_CONF DEFAULT quantum_admin_auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0"
-    iniset $NOVA_CONF DEFAULT quantum_auth_strategy "$Q_AUTH_STRATEGY"
-    iniset $NOVA_CONF DEFAULT quantum_admin_tenant_name "$SERVICE_TENANT_NAME"
-    iniset $NOVA_CONF DEFAULT quantum_region_name "RegionOne"
-    iniset $NOVA_CONF DEFAULT quantum_url "http://$Q_HOST:$Q_PORT"
+    iniset $NOVA_CONF DEFAULT network_api_class "nova.network.neutronv2.api.API"
+    iniset $NOVA_CONF DEFAULT neutron_admin_username "$Q_ADMIN_USERNAME"
+    iniset $NOVA_CONF DEFAULT neutron_admin_password "$SERVICE_PASSWORD"
+    iniset $NOVA_CONF DEFAULT neutron_admin_auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0"
+    iniset $NOVA_CONF DEFAULT neutron_auth_strategy "$Q_AUTH_STRATEGY"
+    iniset $NOVA_CONF DEFAULT neutron_admin_tenant_name "$SERVICE_TENANT_NAME"
+    iniset $NOVA_CONF DEFAULT neutron_region_name "RegionOne"
+    iniset $NOVA_CONF DEFAULT neutron_url "http://$Q_HOST:$Q_PORT"
 
     if [[ "$Q_USE_SECGROUP" == "True" ]]; then
         LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver
-        iniset $NOVA_CONF DEFAULT security_group_api quantum
+        iniset $NOVA_CONF DEFAULT security_group_api neutron
     fi
 
     # set NOVA_VIF_DRIVER and optionally set options in nova_conf
@@ -270,7 +277,7 @@
     iniset $NOVA_CONF DEFAULT libvirt_vif_driver "$NOVA_VIF_DRIVER"
     iniset $NOVA_CONF DEFAULT linuxnet_interface_driver "$LINUXNET_VIF_DRIVER"
     if is_service_enabled q-meta; then
-        iniset $NOVA_CONF DEFAULT service_quantum_metadata_proxy "True"
+        iniset $NOVA_CONF DEFAULT service_neutron_metadata_proxy "True"
     fi
 }
 
@@ -294,9 +301,9 @@
             --email=neutron@example.com \
             | grep " id " | get_field 2)
         keystone user-role-add \
-            --tenant_id $SERVICE_TENANT \
-            --user_id $NEUTRON_USER \
-            --role_id $ADMIN_ROLE
+            --tenant-id $SERVICE_TENANT \
+            --user-id $NEUTRON_USER \
+            --role-id $ADMIN_ROLE
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
             NEUTRON_SERVICE=$(keystone service-create \
                 --name=neutron \
@@ -320,6 +327,9 @@
     # Since neutron command is executed in admin context at this point,
     # ``--tenant_id`` needs to be specified.
     if is_baremetal; then
+        if [[ "$PUBLIC_INTERFACE" == '' || "$OVS_PHYSICAL_BRIDGE" == '' ]]; then
+            die $LINENO "Neutron settings for baremetal not set.. exiting"
+        fi
         sudo ovs-vsctl add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE
         for IP in $(ip addr show dev $PUBLIC_INTERFACE | grep ' inet ' | awk '{print $2}'); do
             sudo ip addr del $IP dev $PUBLIC_INTERFACE
@@ -328,6 +338,7 @@
         NET_ID=$(neutron net-create $PHYSICAL_NETWORK --tenant_id $TENANT_ID --provider:network_type flat --provider:physical_network "$PHYSICAL_NETWORK" | grep ' id ' | get_field 2)
         SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2)
         sudo ifconfig $OVS_PHYSICAL_BRIDGE up
+        sudo route add default gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE
     else
         NET_ID=$(neutron net-create --tenant_id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2)
         SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY --name $PRIVATE_SUBNET_NAME $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2)
@@ -382,6 +393,7 @@
 function install_neutronclient() {
     git_clone $NEUTRONCLIENT_REPO $NEUTRONCLIENT_DIR $NEUTRONCLIENT_BRANCH
     setup_develop $NEUTRONCLIENT_DIR
+    sudo install -D -m 0644 -o $STACK_USER {$NEUTRONCLIENT_DIR/tools/,/etc/bash_completion.d/}neutron.bash_completion
 }
 
 # install_neutron_agent_packages() - Collect source and prepare
@@ -418,11 +430,17 @@
     screen_it q-agt "cd $NEUTRON_DIR && python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE"
     screen_it q-dhcp "cd $NEUTRON_DIR && python $AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE"
 
-    if is_service_enabled q-vpn; then
-        screen_it q-vpn "cd $NEUTRON_DIR && $AGENT_VPN_BINARY --config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE"
-    else
-        screen_it q-l3 "cd $NEUTRON_DIR && python $AGENT_L3_BINARY --config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE"
+    L3_CONF_FILES="--config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE"
+
+    if is_service_enabled q-fwaas; then
+        L3_CONF_FILES="$L3_CONF_FILES --config-file $Q_FWAAS_CONF_FILE"
     fi
+    if is_service_enabled q-vpn; then
+        screen_it q-vpn "cd $NEUTRON_DIR && $AGENT_VPN_BINARY $L3_CONF_FILES"
+    else
+        screen_it q-l3 "cd $NEUTRON_DIR && python $AGENT_L3_BINARY $L3_CONF_FILES"
+    fi
+
     screen_it q-meta "cd $NEUTRON_DIR && python $AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file=$Q_META_CONF_FILE"
 
     if [ "$VIRT_DRIVER" = 'xenserver' ]; then
@@ -479,7 +497,7 @@
     #    ``Q_PLUGIN_EXTRA_CONF_FILES=(file1, file2)``
     neutron_plugin_configure_common
 
-    if [[ $Q_PLUGIN_CONF_PATH == '' || $Q_PLUGIN_CONF_FILENAME == '' || $Q_PLUGIN_CLASS == '' ]]; then
+    if [[ "$Q_PLUGIN_CONF_PATH" == '' || "$Q_PLUGIN_CONF_FILENAME" == '' || "$Q_PLUGIN_CLASS" == '' ]]; then
         die $LINENO "Neutron plugin not set.. exiting"
     fi
 
@@ -507,6 +525,15 @@
         done
     fi
 
+    if [ "$VIRT_DRIVER" = 'fake' ]; then
+        # Disable arbitrary limits
+        iniset $NEUTRON_CONF quotas quota_network -1
+        iniset $NEUTRON_CONF quotas quota_subnet -1
+        iniset $NEUTRON_CONF quotas quota_port -1
+        iniset $NEUTRON_CONF quotas quota_security_group -1
+        iniset $NEUTRON_CONF quotas quota_security_group_rule -1
+    fi
+
     _neutron_setup_rootwrap
 }
 
@@ -537,10 +564,18 @@
     cp $NEUTRON_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE
 
     iniset $Q_DHCP_CONF_FILE DEFAULT verbose True
-    iniset $Q_DHCP_CONF_FILE DEFAULT debug True
+    iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
     iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
     iniset $Q_DHCP_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND"
 
+    # Define extra "DEFAULT" configuration options when q-dhcp is configured by
+    # defining the array ``Q_DHCP_EXTRA_DEFAULT_OPTS``.
+    # For Example: ``Q_DHCP_EXTRA_DEFAULT_OPTS=(foo=true bar=2)``
+    for I in "${Q_DHCP_EXTRA_DEFAULT_OPTS[@]}"; do
+        # Replace the first '=' with ' ' for iniset syntax
+        iniset $Q_DHCP_CONF_FILE DEFAULT ${I/=/ }
+    done
+
     _neutron_setup_interface_driver $Q_DHCP_CONF_FILE
 
     neutron_plugin_configure_dhcp_agent
@@ -554,10 +589,14 @@
     AGENT_L3_BINARY=${AGENT_L3_BINARY:-"$NEUTRON_BIN_DIR/neutron-l3-agent"}
     Q_L3_CONF_FILE=$NEUTRON_CONF_DIR/l3_agent.ini
 
+    if is_service_enabled q-fwaas; then
+        Q_FWAAS_CONF_FILE=$NEUTRON_CONF_DIR/fwaas_driver.ini
+    fi
+
     cp $NEUTRON_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE
 
     iniset $Q_L3_CONF_FILE DEFAULT verbose True
-    iniset $Q_L3_CONF_FILE DEFAULT debug True
+    iniset $Q_L3_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
     iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
     iniset $Q_L3_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND"
 
@@ -573,10 +612,12 @@
     cp $NEUTRON_DIR/etc/metadata_agent.ini $Q_META_CONF_FILE
 
     iniset $Q_META_CONF_FILE DEFAULT verbose True
-    iniset $Q_META_CONF_FILE DEFAULT debug True
+    iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
     iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP
     iniset $Q_META_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND"
 
+    _neutron_setup_keystone $Q_META_CONF_FILE DEFAULT True True True
+
 }
 
 function _configure_neutron_lbaas() {
@@ -584,6 +625,11 @@
     neutron_agent_lbaas_configure_agent
 }
 
+function _configure_neutron_fwaas() {
+    neutron_fwaas_configure_common
+    neutron_fwaas_configure_driver
+}
+
 function _configure_neutron_vpn()
 {
     neutron_vpn_install_agent_packages
@@ -597,7 +643,7 @@
     # ensure that an agent's configuration can override the default
     iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_COMMAND"
     iniset $NEUTRON_CONF DEFAULT verbose True
-    iniset $NEUTRON_CONF DEFAULT debug True
+    iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
 
     # Configure agent for plugin
     neutron_plugin_configure_plugin_agent
@@ -620,7 +666,7 @@
     fi
 
     iniset $NEUTRON_CONF DEFAULT verbose True
-    iniset $NEUTRON_CONF DEFAULT debug True
+    iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
     iniset $NEUTRON_CONF DEFAULT policy_file $Q_POLICY_FILE
     iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP
 
@@ -687,21 +733,29 @@
     local conf_file=$1
     local section=$2
     local use_auth_url=$3
+    local skip_auth_cache=$4
+    local use_service_port=$5
+    local keystone_port=$KEYSTONE_AUTH_PORT
+    if [[ -n $use_service_port ]]; then
+        keystone_port=$KEYSTONE_SERVICE_PORT
+    fi
     if [[ -n $use_auth_url ]]; then
-        iniset $conf_file $section auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0"
+        iniset $conf_file $section auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_AUTH_HOST:$keystone_port/v2.0"
     else
         iniset $conf_file $section auth_host $KEYSTONE_SERVICE_HOST
-        iniset $conf_file $section auth_port $KEYSTONE_AUTH_PORT
+        iniset $conf_file $section auth_port $keystone_port
         iniset $conf_file $section auth_protocol $KEYSTONE_SERVICE_PROTOCOL
     fi
     iniset $conf_file $section admin_tenant_name $SERVICE_TENANT_NAME
     iniset $conf_file $section admin_user $Q_ADMIN_USERNAME
     iniset $conf_file $section admin_password $SERVICE_PASSWORD
-    iniset $conf_file $section signing_dir $NEUTRON_AUTH_CACHE_DIR
-    # Create cache dir
-    sudo mkdir -p $NEUTRON_AUTH_CACHE_DIR
-    sudo chown $STACK_USER $NEUTRON_AUTH_CACHE_DIR
-    rm -f $NEUTRON_AUTH_CACHE_DIR/*
+    if [[ -z $skip_auth_cache ]]; then
+        iniset $conf_file $section signing_dir $NEUTRON_AUTH_CACHE_DIR
+        # Create cache dir
+        sudo mkdir -p $NEUTRON_AUTH_CACHE_DIR
+        sudo chown $STACK_USER $NEUTRON_AUTH_CACHE_DIR
+        rm -f $NEUTRON_AUTH_CACHE_DIR/*
+    fi
 }
 
 function _neutron_setup_interface_driver() {
diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet
index 4d343f5..0ad760b 100644
--- a/lib/neutron_plugins/midonet
+++ b/lib/neutron_plugins/midonet
@@ -31,7 +31,12 @@
 }
 
 function neutron_plugin_configure_dhcp_agent() {
-   die $LINENO "q-dhcp must not be executed with MidoNet plugin!"
+    DHCP_DRIVER=${DHCP_DRIVER:-"neutron.plugins.midonet.agent.midonet_driver.DhcpNoOpDriver"}
+    DHCP_INTERFACE_DRIVER=${DHCP_INTEFACE_DRIVER:-"neutron.plugins.midonet.agent.midonet_driver.MidonetInterfaceDriver"}
+    iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_driver $DHCP_DRIVER
+    iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver $DHCP_INTERFACE_DRIVER
+    iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces True
+    iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True
 }
 
 function neutron_plugin_configure_l3_agent() {
@@ -58,9 +63,6 @@
     if [[ "$MIDONET_PROVIDER_ROUTER_ID" != "" ]]; then
         iniset /$Q_PLUGIN_CONF_FILE MIDONET provider_router_id $MIDONET_PROVIDER_ROUTER_ID
     fi
-    if [[ "$MIDONET_METADATA_ROUTER_ID" != "" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE MIDONET metadata_router_id $MIDONET_METADATA_ROUTER_ID
-    fi
 }
 
 function neutron_plugin_setup_interface_driver() {
diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2
index 00bd716..71a0638 100644
--- a/lib/neutron_plugins/ml2
+++ b/lib/neutron_plugins/ml2
@@ -10,9 +10,9 @@
 Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-}
 # This has to be set here since the agent will set this in the config file
 if [[ "$Q_ML2_TENANT_NETWORK_TYPE" != "" ]]; then
-    Q_AGENT_EXTRA_AGENT_OPTS=(tunnel_types=$Q_ML2_TENANT_NETWORK_TYPE)
+    Q_AGENT_EXTRA_AGENT_OPTS+=(tunnel_types=$Q_ML2_TENANT_NETWORK_TYPE)
 elif [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then
-    Q_AGENT_EXTRA_AGENT_OPTS=(tunnel_types=gre)
+    Q_AGENT_EXTRA_AGENT_OPTS+=(tunnel_types=gre)
 fi
 
 # Default openvswitch L2 agent
@@ -20,7 +20,7 @@
 source $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent
 
 # List of MechanismDrivers to load
-Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-}
+Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-openvswitch,linuxbridge}
 # List of Type Drivers to load
 Q_ML2_PLUGIN_TYPE_DRIVERS=${Q_ML2_PLUGIN_TYPE_DRIVERS:-local,flat,vlan,gre,vxlan}
 # Default GRE TypeDriver options
@@ -46,18 +46,27 @@
     Q_PLUGIN_CONF_FILENAME=ml2_conf.ini
     Q_DB_NAME="neutron_ml2"
     Q_PLUGIN_CLASS="neutron.plugins.ml2.plugin.Ml2Plugin"
+    # The ML2 plugin delegates L3 routing/NAT functionality to
+    # the L3 service plugin which must therefore be specified.
+    Q_L3_PLUGIN_CLASS=${Q_L3_PLUGIN_CLASS:-"neutron.services.l3_router.l3_router_plugin.L3RouterPlugin"}
+    if  ini_has_option $NEUTRON_CONF DEFAULT service_plugins ; then
+        srv_plugins=$(iniget $NEUTRON_CONF DEFAULT service_plugins)","$Q_L3_PLUGIN_CLASS
+    else
+        srv_plugins=$Q_L3_PLUGIN_CLASS
+    fi
+    iniset $NEUTRON_CONF DEFAULT service_plugins $srv_plugins
 }
 
 function neutron_plugin_configure_service() {
     if [[ "$Q_ML2_TENANT_NETWORK_TYPE" != "" ]]; then
-        Q_SRV_EXTRA_OPTS=(tenant_network_types=$Q_ML2_TENANT_NETWORK_TYPE)
+        Q_SRV_EXTRA_OPTS+=(tenant_network_types=$Q_ML2_TENANT_NETWORK_TYPE)
     elif [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then
         # This assumes you want a simple configuration, and will overwrite
         # Q_SRV_EXTRA_OPTS if set in addition to ENABLE_TENANT_TUNNELS.
-        Q_SRV_EXTRA_OPTS=(tenant_network_types=gre)
+        Q_SRV_EXTRA_OPTS+=(tenant_network_types=gre)
         Q_ML2_PLUGIN_GRE_TYPE_OPTIONS=(tunnel_id_ranges=$TENANT_TUNNEL_RANGES)
     elif [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then
-        Q_SRV_EXTRA_OPTS=(tenant_network_types=vlan)
+        Q_SRV_EXTRA_OPTS+=(tenant_network_types=vlan)
     else
         echo "WARNING - The ml2 plugin is using local tenant networks, with no connectivity between hosts."
     fi
diff --git a/lib/neutron_plugins/nicira b/lib/neutron_plugins/nicira
index eabc417..e9deb64 100644
--- a/lib/neutron_plugins/nicira
+++ b/lib/neutron_plugins/nicira
@@ -90,7 +90,7 @@
         iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l3_gw_service_uuid $DEFAULT_L3_GW_SVC_UUID
         Q_L3_ENABLED=True
         Q_L3_ROUTER_PER_TENANT=True
-        iniset /$Q_PLUGIN_CONF_FILE nvp enable_metadata_access_network True
+        iniset /$Q_PLUGIN_CONF_FILE nvp metadata_mode access_network
     fi
     if [[ "$DEFAULT_L2_GW_SVC_UUID" != "" ]]; then
         iniset /$Q_PLUGIN_CONF_FILE DEFAULT default_l2_gw_service_uuid $DEFAULT_L2_GW_SVC_UUID
diff --git a/lib/neutron_plugins/services/firewall b/lib/neutron_plugins/services/firewall
new file mode 100644
index 0000000..1597e85
--- /dev/null
+++ b/lib/neutron_plugins/services/firewall
@@ -0,0 +1,27 @@
+# Neutron firewall plugin
+# ---------------------------
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+FWAAS_PLUGIN=neutron.services.firewall.fwaas_plugin.FirewallPlugin
+
+function neutron_fwaas_configure_common() {
+    if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then
+        Q_SERVICE_PLUGIN_CLASSES=$FWAAS_PLUGIN
+    else
+        Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$FWAAS_PLUGIN"
+    fi
+}
+
+function neutron_fwaas_configure_driver() {
+    FWAAS_DRIVER_CONF_FILENAME=/etc/neutron/fwaas_driver.ini
+    cp $NEUTRON_DIR/etc/fwaas_driver.ini $FWAAS_DRIVER_CONF_FILENAME
+
+    iniset_multiline $FWAAS_DRIVER_CONF_FILENAME fwaas enabled True
+    iniset_multiline $FWAAS_DRIVER_CONF_FILENAME fwaas driver "neutron.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver"
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/neutron_plugins/services/vpn b/lib/neutron_plugins/services/vpn
index 0a79a69..b8f5c7d 100644
--- a/lib/neutron_plugins/services/vpn
+++ b/lib/neutron_plugins/services/vpn
@@ -8,9 +8,10 @@
 
 AGENT_VPN_BINARY="$NEUTRON_BIN_DIR/neutron-vpn-agent"
 VPN_PLUGIN="neutron.services.vpn.plugin.VPNDriverPlugin"
+IPSEC_PACKAGE=${IPSEC_PACKAGE:-"openswan"}
 
 function neutron_vpn_install_agent_packages() {
-    install_package strongswan
+    install_package $IPSEC_PACKAGE
 }
 
 function neutron_vpn_configure_common() {
diff --git a/lib/neutron_thirdparty/midonet b/lib/neutron_thirdparty/midonet
index b3c726f..7928bca 100644
--- a/lib/neutron_thirdparty/midonet
+++ b/lib/neutron_thirdparty/midonet
@@ -10,22 +10,20 @@
 
 # MidoNet devstack destination dir
 MIDONET_DIR=${MIDONET_DIR:-$DEST/midonet}
+MIDONET_API_PORT=${MIDONET_API_PORT:-8080}
+MIDONET_API_URL=${MIDONET_API_URL:-http://localhost:$MIDONET_API_PORT/midonet-api}
 
 # MidoNet client repo
 MIDONET_CLIENT_REPO=${MIDONET_CLIENT_REPO:-https://github.com/midokura/python-midonetclient.git}
 MIDONET_CLIENT_BRANCH=${MIDONET_CLIENT_BRANCH:-master}
-MIDONET_CLIENT_DIR=$MIDONET_DIR/python-midonetclient
+MIDONET_CLIENT_DIR=${MIDONET_CLIENT_DIR:-$MIDONET_DIR/python-midonetclient}
 
 # MidoNet OpenStack repo
 MIDONET_OS_REPO=${MIDONET_OS_REPO:-https://github.com/midokura/midonet-openstack.git}
 MIDONET_OS_BRANCH=${MIDONET_OS_BRANCH:-master}
-MIDONET_OS_DIR=$MIDONET_DIR/midonet-openstack
+MIDONET_OS_DIR=${MIDONET_OS_DIR:-$MIDONET_DIR/midonet-openstack}
 MIDONET_SETUP_SCRIPT=${MIDONET_SETUP_SCRIPT:-$MIDONET_OS_DIR/bin/setup_midonet_topology.py}
 
-
-MIDOLMAN_LOG=${MIDOLMAN_LOG:-/var/log/midolman/midolman.log}
-MIDONET_API_LOG=${MIDONET_API_LOG:-/var/log/tomcat7/midonet-api.log}
-
 # Save trace setting
 MY_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
@@ -37,13 +35,11 @@
 function init_midonet() {
 
     # Initialize DB.  Evaluate the output of setup_midonet_topology.py to set
-    # env variables for provider router ID and metadata router ID
-    eval `python $MIDONET_SETUP_SCRIPT admin $ADMIN_PASSWORD $ADMIN_TENANT provider_devices`
+    # env variables for provider router ID.
+    eval `python $MIDONET_SETUP_SCRIPT $MIDONET_API_URL admin $ADMIN_PASSWORD admin provider_devices`
     die_if_not_set $LINENO provider_router_id "Error running midonet setup script, provider_router_id was not set."
-    die_if_not_set $LINENO metadata_router_id "Error running midonet setup script, metadata_router_id was not set."
 
     iniset /$Q_PLUGIN_CONF_FILE MIDONET provider_router_id $provider_router_id
-    iniset /$Q_PLUGIN_CONF_FILE MIDONET metadata_router_id $metadata_router_id
 }
 
 function install_midonet() {
diff --git a/lib/nova b/lib/nova
index 9c38498..9b766a9 100644
--- a/lib/nova
+++ b/lib/nova
@@ -169,6 +169,13 @@
     fi
 
     sudo rm -rf $NOVA_STATE_PATH $NOVA_AUTH_CACHE_DIR
+
+    # NOTE(dtroyer): This really should be called from here but due to the way
+    #                nova abuses the _cleanup() function we're moving it
+    #                directly into cleanup.sh until this can be fixed.
+    #if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
+    #    cleanup_nova_hypervisor
+    #fi
 }
 
 # configure_nova_rootwrap() - configure Nova's rootwrap
@@ -392,9 +399,9 @@
             --email=nova@example.com \
             | grep " id " | get_field 2)
         keystone user-role-add \
-            --tenant_id $SERVICE_TENANT \
-            --user_id $NOVA_USER \
-            --role_id $ADMIN_ROLE
+            --tenant-id $SERVICE_TENANT \
+            --user-id $NOVA_USER \
+            --role-id $ADMIN_ROLE
         if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
             NOVA_SERVICE=$(keystone service-create \
                 --name=nova \
@@ -430,7 +437,7 @@
     # (Re)create ``nova.conf``
     rm -f $NOVA_CONF
     iniset $NOVA_CONF DEFAULT verbose "True"
-    iniset $NOVA_CONF DEFAULT debug "True"
+    iniset $NOVA_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL"
     iniset $NOVA_CONF DEFAULT auth_strategy "keystone"
     iniset $NOVA_CONF DEFAULT allow_resize_to_same_host "True"
     iniset $NOVA_CONF DEFAULT api_paste_config "$NOVA_API_PASTE_INI"
@@ -444,6 +451,9 @@
     iniset $NOVA_CONF DEFAULT s3_port "$S3_SERVICE_PORT"
     iniset $NOVA_CONF DEFAULT osapi_compute_extension "nova.api.openstack.compute.contrib.standard_extensions"
     iniset $NOVA_CONF DEFAULT my_ip "$HOST_IP"
+    iniset $NOVA_CONF DEFAULT osapi_compute_workers "4"
+    iniset $NOVA_CONF DEFAULT ec2_workers "4"
+    iniset $NOVA_CONF DEFAULT metadata_workers "4"
     iniset $NOVA_CONF DEFAULT sql_connection `database_connection_url nova`
     if is_baremetal; then
         iniset $NOVA_CONF baremetal sql_connection `database_connection_url nova_bm`
@@ -451,6 +461,7 @@
     if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
         iniset $NOVA_CONF DEFAULT libvirt_type "$LIBVIRT_TYPE"
         iniset $NOVA_CONF DEFAULT libvirt_cpu_mode "none"
+        iniset $NOVA_CONF DEFAULT use_usb_tablet "False"
     fi
     iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x"
     iniset $NOVA_CONF osapi_v3 enabled "True"
@@ -645,12 +656,15 @@
 function install_novaclient() {
     git_clone $NOVACLIENT_REPO $NOVACLIENT_DIR $NOVACLIENT_BRANCH
     setup_develop $NOVACLIENT_DIR
+    sudo install -D -m 0644 -o $STACK_USER {$NOVACLIENT_DIR/tools/,/etc/bash_completion.d/}nova.bash_completion
 }
 
 # install_nova() - Collect source and prepare
 function install_nova() {
     if is_service_enabled n-cpu; then
-        if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
+        if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
+            install_nova_hypervisor
+        elif [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
             if is_ubuntu; then
                 install_package kvm
                 install_package libvirt-bin
@@ -682,6 +696,7 @@
 
     git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH
     setup_develop $NOVA_DIR
+    sudo install -D -m 0644 -o $STACK_USER {$NOVA_DIR/tools/,/etc/bash_completion.d/}nova-manage.bash_completion
 }
 
 # start_nova_api() - Start the API process ahead of other things
@@ -728,6 +743,9 @@
            screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM"
        done
     else
+        if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
+            start_nova_hypervisor
+        fi
         screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM"
     fi
     screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert"
@@ -754,6 +772,9 @@
     for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cond n-cell n-cell n-api-meta; do
         screen -S $SCREEN_NAME -p $serv -X kill
     done
+    if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
+        stop_nova_hypervisor
+    fi
 }
 
 
diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker
new file mode 100644
index 0000000..4c8fc27
--- /dev/null
+++ b/lib/nova_plugins/hypervisor-docker
@@ -0,0 +1,132 @@
+# lib/nova_plugins/docker
+# Configure the Docker hypervisor
+
+# Enable with:
+# VIRT_DRIVER=docker
+
+# Dependencies:
+# ``functions`` file
+# ``nova`` and ``glance`` configurations
+
+# install_nova_hypervisor - install any external requirements
+# configure_nova_hypervisor - make configuration changes, including those to other services
+# start_nova_hypervisor - start any external services
+# stop_nova_hypervisor - stop any external services
+# cleanup_nova_hypervisor - remove transient data and cache
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+
+# Set up default directories
+DOCKER_DIR=$DEST/docker
+DOCKER_REPO=${DOCKER_REPO:-https://github.com/dotcloud/openstack-docker.git}
+DOCKER_BRANCH=${DOCKER_BRANCH:-master}
+
+DOCKER_UNIX_SOCKET=/var/run/docker.sock
+DOCKER_PID_FILE=/var/run/docker.pid
+DOCKER_REGISTRY_PORT=${DOCKER_REGISTRY_PORT:-5042}
+
+DOCKER_IMAGE=${DOCKER_IMAGE:-http://get.docker.io/images/openstack/docker-ut.tar.gz}
+DOCKER_IMAGE_NAME=docker-busybox
+DOCKER_REGISTRY_IMAGE=${DOCKER_REGISTRY_IMAGE:-http://get.docker.io/images/openstack/docker-registry.tar.gz}
+DOCKER_REGISTRY_IMAGE_NAME=docker-registry
+DOCKER_REPOSITORY_NAME=${SERVICE_HOST}:${DOCKER_REGISTRY_PORT}/${DOCKER_IMAGE_NAME}
+
+DOCKER_PACKAGE_VERSION=${DOCKER_PACKAGE_VERSION:-0.6.1}
+DOCKER_APT_REPO=${DOCKER_APT_REPO:-https://get.docker.io/ubuntu}
+
+
+# Entry Points
+# ------------
+
+# clean_nova_hypervisor - Clean up an installation
+function cleanup_nova_hypervisor() {
+    stop_service docker
+
+    # Clean out work area
+    sudo rm -rf /var/lib/docker
+}
+
+# configure_nova_hypervisor - Set config files, create data dirs, etc
+function configure_nova_hypervisor() {
+    git_clone $DOCKER_REPO $DOCKER_DIR $DOCKER_BRANCH
+
+    ln -snf ${DOCKER_DIR}/nova-driver $NOVA_DIR/nova/virt/docker
+
+    iniset $NOVA_CONF DEFAULT compute_driver docker.DockerDriver
+    iniset $GLANCE_API_CONF DEFAULT container_formats ami,ari,aki,bare,ovf,docker
+
+    sudo cp -p ${DOCKER_DIR}/nova-driver/docker.filters $NOVA_CONF_DIR/rootwrap.d
+}
+
+# install_nova_hypervisor() - Install external components
+function install_nova_hypervisor() {
+    # So far this is Ubuntu only
+    if ! is_ubuntu; then
+        die $LINENO "Docker is only supported on Ubuntu at this time"
+    fi
+
+    # Make sure Docker is installed
+    if ! is_package_installed lxc-docker; then
+        die $LINENO "Docker is not installed.  Please run tools/docker/install_docker.sh"
+    fi
+
+    local docker_pid
+    read docker_pid <$DOCKER_PID_FILE
+    if [[ -z $docker_pid ]] || ! ps -p $docker_pid | grep [d]ocker; then
+        die $LINENO "Docker not running"
+    fi
+}
+
+# start_nova_hypervisor - Start any required external services
+function start_nova_hypervisor() {
+    local docker_pid
+    read docker_pid <$DOCKER_PID_FILE
+    if [[ -z $docker_pid ]] || ! ps -p $docker_pid | grep [d]ocker; then
+        die $LINENO "Docker not running, start the daemon"
+    fi
+
+    # Start the Docker registry container
+    docker run -d -p ${DOCKER_REGISTRY_PORT}:5000 \
+        -e SETTINGS_FLAVOR=openstack -e OS_USERNAME=${OS_USERNAME} \
+        -e OS_PASSWORD=${OS_PASSWORD} -e OS_TENANT_NAME=${OS_TENANT_NAME} \
+        -e OS_GLANCE_URL="${SERVICE_PROTOCOL}://${GLANCE_HOSTPORT}" \
+        -e OS_AUTH_URL=${OS_AUTH_URL} \
+        $DOCKER_REGISTRY_IMAGE_NAME ./docker-registry/run.sh
+
+    echo "Waiting for docker registry to start..."
+    DOCKER_REGISTRY=${SERVICE_HOST}:${DOCKER_REGISTRY_PORT}
+    if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl -s $DOCKER_REGISTRY; do sleep 1; done"; then
+        die $LINENO "docker-registry did not start"
+    fi
+
+    # Tag image if not already tagged
+    if ! docker images | grep $DOCKER_REPOSITORY_NAME; then
+        docker tag $DOCKER_IMAGE_NAME $DOCKER_REPOSITORY_NAME
+    fi
+
+    # Make sure we copied the image in Glance
+    DOCKER_IMAGE=$(glance image-list | egrep " $DOCKER_IMAGE_NAME ")
+    if ! is_set DOCKER_IMAGE ; then
+        docker push $DOCKER_REPOSITORY_NAME
+    fi
+}
+
+# stop_nova_hypervisor - Stop any external services
+function stop_nova_hypervisor() {
+    # Stop the docker registry container
+    docker kill $(docker ps | grep docker-registry | cut -d' ' -f1)
+}
+
+
+# Restore xtrace
+$MY_XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/oslo b/lib/oslo
index de5ec4e..f77a4fa 100644
--- a/lib/oslo
+++ b/lib/oslo
@@ -40,12 +40,12 @@
 
 # cleanup_oslo() - purge possibly old versions of oslo
 function cleanup_oslo() {
-    # this means we've got an old olso installed, lets get rid of it
-    if find /usr | grep oslo.config | grep -v oslo.config.egg-link > /dev/null; then
+    # this means we've got an old oslo installed, lets get rid of it
+    if ! python -c 'import oslo.config' 2>/dev/null; then
         echo "Found old oslo.config... removing to ensure consistency"
         local PIP_CMD=$(get_pip_command)
-        pip_install olso.config
-        sudo $PIP_CMD uninstall -y olso.config
+        pip_install oslo.config
+        sudo $PIP_CMD uninstall -y oslo.config
     fi
 }
 
diff --git a/lib/swift b/lib/swift
index 8e64152..f72beaf 100644
--- a/lib/swift
+++ b/lib/swift
@@ -464,7 +464,7 @@
 
     SWIFT_USER=$(keystone user-create --name=swift --pass="$SERVICE_PASSWORD" \
         --tenant_id $SERVICE_TENANT --email=swift@example.com | grep " id " | get_field 2)
-    keystone user-role-add --tenant_id $SERVICE_TENANT --user_id $SWIFT_USER --role_id $ADMIN_ROLE
+    keystone user-role-add --tenant-id $SERVICE_TENANT --user-id $SWIFT_USER --role-id $ADMIN_ROLE
 
     if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
         SWIFT_SERVICE=$(keystone service-create --name=swift --type="object-store" \
@@ -479,14 +479,14 @@
 
     SWIFT_TENANT_TEST1=$(keystone tenant-create --name=swifttenanttest1 | grep " id " | get_field 2)
     SWIFT_USER_TEST1=$(keystone user-create --name=swiftusertest1 --pass=testing --email=test@example.com | grep " id " | get_field 2)
-    keystone user-role-add --user_id $SWIFT_USER_TEST1 --role_id $ADMIN_ROLE --tenant_id $SWIFT_TENANT_TEST1
+    keystone user-role-add --user-id $SWIFT_USER_TEST1 --role-id $ADMIN_ROLE --tenant-id $SWIFT_TENANT_TEST1
 
     SWIFT_USER_TEST3=$(keystone user-create --name=swiftusertest3 --pass=testing3 --email=test3@example.com | grep " id " | get_field 2)
-    keystone user-role-add --user_id $SWIFT_USER_TEST3 --role_id $ANOTHER_ROLE --tenant_id $SWIFT_TENANT_TEST1
+    keystone user-role-add --user-id $SWIFT_USER_TEST3 --role-id $ANOTHER_ROLE --tenant-id $SWIFT_TENANT_TEST1
 
     SWIFT_TENANT_TEST2=$(keystone tenant-create --name=swifttenanttest2 | grep " id " | get_field 2)
     SWIFT_USER_TEST2=$(keystone user-create --name=swiftusertest2 --pass=testing2 --email=test2@example.com | grep " id " | get_field 2)
-    keystone user-role-add --user_id $SWIFT_USER_TEST2 --role_id $ADMIN_ROLE --tenant_id $SWIFT_TENANT_TEST2
+    keystone user-role-add --user-id $SWIFT_USER_TEST2 --role-id $ADMIN_ROLE --tenant-id $SWIFT_TENANT_TEST2
 }
 
 # init_swift() - Initialize rings
diff --git a/lib/tempest b/lib/tempest
index aaa7281..e48ccf2 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -24,6 +24,7 @@
 # ``DEFAULT_INSTANCE_TYPE``
 # ``DEFAULT_INSTANCE_USER``
 # ``CINDER_MULTI_LVM_BACKEND``
+# ``HEAT_CREATE_TEST_IMAGE``
 # ``stack.sh`` calls the entry points in this order:
 #
 # install_tempest
@@ -201,6 +202,7 @@
     mkdir -p $TEMPEST_STATE_PATH
     iniset $TEMPEST_CONF DEFAULT use_stderr False
     iniset $TEMPEST_CONF DEFAULT log_file tempest.log
+    iniset $TEMPEST_CONF DEFAULT debug True
 
     # Timeouts
     iniset $TEMPEST_CONF compute build_timeout $BUILD_TIMEOUT
@@ -271,10 +273,22 @@
     iniset $TEMPEST_CONF boto http_socket_timeout 30
     iniset $TEMPEST_CONF boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros}
 
+    # Orchestration test image
+    if [ $HEAT_CREATE_TEST_IMAGE == "True" ]; then
+        disk_image_create /usr/share/tripleo-image-elements "vm fedora heat-cfntools" "i386" "fedora-vm-heat-cfntools-tempest"
+        iniset $TEMPEST_CONF orchestration image_ref "fedora-vm-heat-cfntools-tempest"
+    fi
+
     # Scenario
     iniset $TEMPEST_CONF scenario img_dir "$FILES/images/cirros-0.3.1-x86_64-uec"
 
+    # Large Ops Number
+    iniset $TEMPEST_CONF scenario large_ops_number ${TEMPEST_LARGE_OPS_NUMBER:-0}
+
     # Volume
+    if is_service_enabled c-bak; then
+        iniset $TEMPEST_CONF volume volume_backup_enabled "True"
+    fi
     CINDER_MULTI_LVM_BACKEND=$(trueorfalse False $CINDER_MULTI_LVM_BACKEND)
     if [ $CINDER_MULTI_LVM_BACKEND == "True" ]; then
         iniset $TEMPEST_CONF volume multi_backend_enabled "True"
diff --git a/lib/trove b/lib/trove
new file mode 100644
index 0000000..e64ca5f
--- /dev/null
+++ b/lib/trove
@@ -0,0 +1,170 @@
+# lib/trove
+# Functions to control the configuration and operation of the **Trove** service
+
+# Dependencies:
+# ``functions`` file
+# ``DEST``, ``STACK_USER`` must be defined
+# ``SERVICE_{HOST|PROTOCOL|TOKEN}`` must be defined
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# install_trove
+# configure_trove
+# init_trove
+# start_trove
+# stop_trove
+# cleanup_trove
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+# Defaults
+# --------
+
+NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1}
+
+# Set up default configuration
+TROVE_DIR=$DEST/trove
+TROVECLIENT_DIR=$DEST/python-troveclient
+TROVE_CONF_DIR=/etc/trove
+TROVE_LOCAL_CONF_DIR=$TROVE_DIR/etc/trove
+TROVE_AUTH_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT//v$IDENTITY_API_VERSION
+TROVE_AUTH_CACHE_DIR=${TROVE_AUTH_CACHE_DIR:-/var/cache/trove}
+TROVE_BIN_DIR=/usr/local/bin
+
+# create_trove_accounts() - Set up common required trove accounts
+
+# Tenant               User       Roles
+# ------------------------------------------------------------------
+# service              trove     admin        # if enabled
+
+create_trove_accounts() {
+    # Trove
+    SERVICE_TENANT=$(keystone tenant-list | awk "/ $SERVICE_TENANT_NAME / { print \$2 }")
+    SERVICE_ROLE=$(keystone role-list | awk "/ admin / { print \$2 }")
+
+    if [[ "$ENABLED_SERVICES" =~ "trove" ]]; then
+        TROVE_USER=$(keystone user-create --name=trove \
+                                                  --pass="$SERVICE_PASSWORD" \
+                                                  --tenant_id $SERVICE_TENANT \
+                                                  --email=trove@example.com \
+                                                  | grep " id " | get_field 2)
+        keystone user-role-add --tenant-id $SERVICE_TENANT \
+                               --user-id $TROVE_USER \
+                               --role-id $SERVICE_ROLE
+        if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
+            TROVE_SERVICE=$(keystone service-create \
+                --name=trove \
+                --type=database \
+                --description="Trove Service" \
+                | grep " id " | get_field 2)
+            keystone endpoint-create \
+                --region RegionOne \
+                --service_id $TROVE_SERVICE \
+                --publicurl "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \
+                --adminurl "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \
+                --internalurl "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s"
+        fi
+    fi
+}
+
+# stack.sh entry points
+# ---------------------
+
+# cleanup_trove() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_trove() {
+    #Clean up dirs
+    rm -fr $TROVE_AUTH_CACHE_DIR/*
+    rm -fr $TROVE_CONF_DIR/*
+}
+
+# configure_troveclient() - Set config files, create data dirs, etc
+function configure_troveclient() {
+    setup_develop $TROVECLIENT_DIR
+}
+
+# configure_trove() - Set config files, create data dirs, etc
+function configure_trove() {
+    setup_develop $TROVE_DIR
+
+    # Create the trove conf dir and cache dirs if they don't exist
+    sudo mkdir -p ${TROVE_CONF_DIR}
+    sudo mkdir -p ${TROVE_AUTH_CACHE_DIR}
+    sudo chown -R $STACK_USER: ${TROVE_CONF_DIR}
+    sudo chown -R $STACK_USER: ${TROVE_AUTH_CACHE_DIR}
+
+    # Copy api-paste file over to the trove conf dir and configure it
+    cp $TROVE_LOCAL_CONF_DIR/api-paste.ini $TROVE_CONF_DIR/api-paste.ini
+    TROVE_API_PASTE_INI=$TROVE_CONF_DIR/api-paste.ini
+    iniset $TROVE_API_PASTE_INI filter:tokenauth auth_host $KEYSTONE_AUTH_HOST
+    iniset $TROVE_API_PASTE_INI filter:tokenauth auth_port $KEYSTONE_AUTH_PORT
+    iniset $TROVE_API_PASTE_INI filter:tokenauth auth_protocol $KEYSTONE_AUTH_PROTOCOL
+    iniset $TROVE_API_PASTE_INI filter:tokenauth admin_tenant_name $SERVICE_TENANT_NAME
+    iniset $TROVE_API_PASTE_INI filter:tokenauth admin_user trove
+    iniset $TROVE_API_PASTE_INI filter:tokenauth admin_password $SERVICE_PASSWORD
+    iniset $TROVE_API_PASTE_INI filter:tokenauth signing_dir $TROVE_AUTH_CACHE_DIR
+
+    # (Re)create trove conf files
+    rm -f $TROVE_CONF_DIR/trove.conf
+    rm -f $TROVE_CONF_DIR/trove-taskmanager.conf
+    iniset $TROVE_CONF_DIR/trove.conf DEFAULT rabbit_password $RABBIT_PASSWORD
+    iniset $TROVE_CONF_DIR/trove.conf DEFAULT sql_connection `database_connection_url trove`
+    iniset $TROVE_CONF_DIR/trove.conf DEFAULT add_addresses True
+
+    iniset $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample DEFAULT rabbit_password $RABBIT_PASSWORD
+    iniset $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample DEFAULT sql_connection `database_connection_url trove`
+    sed -i "s/localhost/$NETWORK_GATEWAY/g" $TROVE_LOCAL_CONF_DIR/trove-guestagent.conf.sample
+
+    # (Re)create trove taskmanager conf file if needed
+    if is_service_enabled tr-tmgr; then
+        iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT rabbit_password $RABBIT_PASSWORD
+        iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT sql_connection `database_connection_url trove`
+        iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT taskmanager_manager trove.taskmanager.manager.Manager
+        iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_user radmin
+        iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_tenant_name trove
+        iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS
+        iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT
+    fi
+}
+
+# install_troveclient() - Collect source and prepare
+function install_troveclient() {
+    git_clone $TROVECLIENT_REPO $TROVECLIENT_DIR $TROVECLIENT_BRANCH
+}
+
+# install_trove() - Collect source and prepare
+function install_trove() {
+    git_clone $TROVE_REPO $TROVE_DIR $TROVE_BRANCH
+}
+
+# init_trove() - Initializes Trove Database as a Service
+function init_trove() {
+    #(Re)Create trove db
+    recreate_database trove utf8
+
+    #Initialize the trove database
+    $TROVE_DIR/bin/trove-manage db_sync
+}
+
+# start_trove() - Start running processes, including screen
+function start_trove() {
+    screen_it tr-api "cd $TROVE_DIR; bin/trove-api --config-file=$TROVE_CONF_DIR/trove.conf --debug 2>&1"
+    screen_it tr-tmgr "cd $TROVE_DIR; bin/trove-taskmanager --config-file=$TROVE_CONF_DIR/trove-taskmanager.conf --debug 2>&1"
+}
+
+# stop_trove() - Stop running processes
+function stop_trove() {
+    # Kill the trove screen windows
+    for serv in tr-api tr-tmgr; do
+        screen -S $SCREEN_NAME -p $serv -X kill
+    done
+}
+
+# Restore xtrace
+$XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/openrc b/openrc
index a23c6e9..3de7e39 100644
--- a/openrc
+++ b/openrc
@@ -63,21 +63,19 @@
 # should be listening on HOST_IP.  If its running elsewhere, it can be set here
 GLANCE_HOST=${GLANCE_HOST:-$HOST_IP}
 
+# Identity API version
+export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-2.0}
+
 # Authenticating against an Openstack cloud using Keystone returns a **Token**
 # and **Service Catalog**.  The catalog contains the endpoints for all services
 # the user/tenant has access to - including nova, glance, keystone, swift, ...
 # We currently recommend using the 2.0 *identity api*.
 #
-# *NOTE*: Using the 2.0 *identity api* does not mean that compute api is 2.0.  We
-# will use the 1.1 *compute api*
-export OS_AUTH_URL=$SERVICE_PROTOCOL://$SERVICE_HOST:5000/v2.0
+export OS_AUTH_URL=$SERVICE_PROTOCOL://$SERVICE_HOST:5000/v${OS_IDENTITY_API_VERSION}
 
 # Set the pointer to our CA certificate chain.  Harmless if TLS is not used.
 export OS_CACERT=$INT_CA_DIR/ca-chain.pem
 
-# Identity API version
-export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-2.0}
-
 # Currently novaclient needs you to specify the *compute api* version.  This
 # needs to match the config of your catalog returned by Keystone.
 export NOVA_VERSION=${NOVA_VERSION:-1.1}
diff --git a/stack.sh b/stack.sh
index 22a23c8..be04bed 100755
--- a/stack.sh
+++ b/stack.sh
@@ -2,8 +2,8 @@
 
 # ``stack.sh`` is an opinionated OpenStack developer installation.  It
 # installs and configures various combinations of **Ceilometer**, **Cinder**,
-# **Glance**, **Heat**, **Horizon**, **Keystone**, **Nova**, **Neutron**
-# and **Swift**.
+# **Glance**, **Heat**, **Horizon**, **Keystone**, **Nova**, **Neutron**,
+# **Swift**, and **Trove**
 
 # This script allows you to specify configuration options of what git
 # repositories to use, enabled services, network configuration and various
@@ -234,8 +234,10 @@
 fi
 
 # Create the destination directory and ensure it is writable by the user
+# and read/executable by everybody for daemons (e.g. apache run for horizon)
 sudo mkdir -p $DEST
 sudo chown -R $STACK_USER $DEST
+chmod 0755 $DEST
 
 # a basic test for $DEST path permissions (fatal on error unless skipped)
 check_path_perm_sanity ${DEST}
@@ -250,6 +252,9 @@
 # operation.
 ERROR_ON_CLONE=`trueorfalse False $ERROR_ON_CLONE`
 
+# Whether to enable the debug log level in OpenStack services
+ENABLE_DEBUG_LOG_LEVEL=`trueorfalse True $ENABLE_DEBUG_LOG_LEVEL`
+
 # Destination path for service data
 DATA_DIR=${DATA_DIR:-${DEST}/data}
 sudo mkdir -p $DATA_DIR
@@ -313,6 +318,15 @@
 source $TOP_DIR/lib/neutron
 source $TOP_DIR/lib/baremetal
 source $TOP_DIR/lib/ldap
+source $TOP_DIR/lib/ironic
+source $TOP_DIR/lib/trove
+
+# Look for Nova hypervisor plugin
+NOVA_PLUGINS=$TOP_DIR/lib/nova_plugins
+if is_service_enabled nova && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
+    # Load plugin
+    source $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER
+fi
 
 # Set the destination directories for other OpenStack projects
 OPENSTACKCLIENT_DIR=$DEST/python-openstackclient
@@ -581,62 +595,8 @@
 # Configure an appropriate python environment
 $TOP_DIR/tools/install_pip.sh
 
-# System-specific preconfigure
-# ============================
-
-if [[ is_fedora && $DISTRO =~ (rhel6) ]]; then
-    # Disable selinux to avoid configuring to allow Apache access
-    # to Horizon files or run nodejs (LP#1175444)
-    if selinuxenabled; then
-        sudo setenforce 0
-    fi
-
-    # The following workarounds break xenserver
-    if [ "$VIRT_DRIVER" != 'xenserver' ]; then
-        # An old version of ``python-crypto`` (2.0.1) may be installed on a
-        # fresh system via Anaconda and the dependency chain
-        # ``cas`` -> ``python-paramiko`` -> ``python-crypto``.
-        # ``pip uninstall pycrypto`` will remove the packaged ``.egg-info``
-        #  file but leave most of the actual library files behind in
-        # ``/usr/lib64/python2.6/Crypto``. Later ``pip install pycrypto``
-        # will install over the packaged files resulting
-        # in a useless mess of old, rpm-packaged files and pip-installed files.
-        # Remove the package so that ``pip install python-crypto`` installs
-        # cleanly.
-        # Note: other RPM packages may require ``python-crypto`` as well.
-        # For example, RHEL6 does not install ``python-paramiko packages``.
-        uninstall_package python-crypto
-
-        # A similar situation occurs with ``python-lxml``, which is required by
-        # ``ipa-client``, an auditing package we don't care about.  The
-        # build-dependencies needed for ``pip install lxml`` (``gcc``,
-        # ``libxml2-dev`` and ``libxslt-dev``) are present in
-        # ``files/rpms/general``.
-        uninstall_package python-lxml
-    fi
-
-    # If the ``dbus`` package was installed by DevStack dependencies the
-    # uuid may not be generated because the service was never started (PR#598200),
-    # causing Nova to stop later on complaining that ``/var/lib/dbus/machine-id``
-    # does not exist.
-    sudo service messagebus restart
-
-    # ``setup.py`` contains a ``setup_requires`` package that is supposed
-    # to be transient.  However, RHEL6 distribute has a bug where
-    # ``setup_requires`` registers entry points that are not cleaned
-    # out properly after the setup-phase resulting in installation failures
-    # (bz#924038).  Pre-install the problem package so the ``setup_requires``
-    # dependency is satisfied and it will not be installed transiently.
-    # Note we do this before the track-depends below.
-    pip_install hgtools
-
-    # RHEL6's version of ``python-nose`` is incompatible with Tempest.
-    # Install nose 1.1 (Tempest-compatible) from EPEL
-    install_package python-nose1.1
-    # Add a symlink for the new nosetests to allow tox for Tempest to
-    # work unmolested.
-    sudo ln -sf /usr/bin/nosetests1.1 /usr/local/bin/nosetests
-fi
+# Do the ugly hacks for borken packages and distros
+$TOP_DIR/tools/fixup_stuff.sh
 
 install_rpc_backend
 
@@ -761,6 +721,12 @@
     configure_heat
 fi
 
+if is_service_enabled trove; then
+    install_trove
+    install_troveclient
+    cleanup_trove
+fi
+
 if is_service_enabled tls-proxy; then
     configure_CA
     init_CA
@@ -769,6 +735,11 @@
     # don't be naive and add to existing line!
 fi
 
+if is_service_enabled ir-api ir-cond; then
+    install_ironic
+    configure_ironic
+fi
+
 if [[ $TRACK_DEPENDS = True ]]; then
     $DEST/.venv/bin/pip freeze > $DEST/requires-post-pip
     if ! diff -Nru $DEST/requires-pre-pip $DEST/requires-post-pip > $DEST/requires.diff; then
@@ -896,6 +867,10 @@
     create_cinder_accounts
     create_neutron_accounts
 
+    if is_service_enabled trove; then
+        create_trove_accounts
+    fi
+
     if is_service_enabled swift || is_service_enabled s-proxy; then
         create_swift_accounts
     fi
@@ -937,6 +912,15 @@
     init_glance
 fi
 
+# Ironic
+# ------
+
+if is_service_enabled ir-api ir-cond; then
+    echo_summary "Configuring Ironic"
+    init_ironic
+fi
+
+
 
 # Neutron
 # -------
@@ -1004,6 +988,10 @@
     init_cinder
 fi
 
+
+# Compute Service
+# ---------------
+
 if is_service_enabled nova; then
     echo_summary "Configuring Nova"
     # Rebuild the config file from scratch
@@ -1018,10 +1006,15 @@
     fi
 
 
+    if [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
+        # Configure hypervisor plugin
+        configure_nova_hypervisor
+
+
     # XenServer
     # ---------
 
-    if [ "$VIRT_DRIVER" = 'xenserver' ]; then
+    elif [ "$VIRT_DRIVER" = 'xenserver' ]; then
         echo_summary "Using XenServer virtualization driver"
         if [ -z "$XENAPI_CONNECTION_URL" ]; then
             die $LINENO "XENAPI_CONNECTION_URL is not specified"
@@ -1177,6 +1170,12 @@
     start_glance
 fi
 
+# Launch the Ironic services
+if is_service_enabled ir-api ir-cond; then
+    echo_summary "Starting Ironic"
+    start_ironic
+fi
+
 # Create an access key and secret key for nova ec2 register image
 if is_service_enabled key && is_service_enabled swift3 && is_service_enabled nova; then
     NOVA_USER_ID=$(keystone user-list | grep ' nova ' | get_field 1)
@@ -1248,6 +1247,18 @@
     start_heat
 fi
 
+# Configure and launch the trove service api, and taskmanager
+if is_service_enabled trove; then
+    # Initialize trove
+    echo_summary "Configuring Trove"
+    configure_troveclient
+    configure_trove
+    init_trove
+
+    # Start the trove API and trove taskmgr components
+    echo_summary "Starting Trove"
+    start_trove
+fi
 
 # Create account rc files
 # =======================
diff --git a/stackrc b/stackrc
index c81906a..3a338d1 100644
--- a/stackrc
+++ b/stackrc
@@ -96,6 +96,10 @@
 HORIZON_REPO=${HORIZON_REPO:-${GIT_BASE}/openstack/horizon.git}
 HORIZON_BRANCH=${HORIZON_BRANCH:-master}
 
+# baremetal provisionint service
+IRONIC_REPO=${IRONIC_REPO:-${GIT_BASE}/openstack/ironic.git}
+IRONIC_BRANCH=${IRONIC_BRANCH:-master}
+
 # unified auth system (manages accounts/tokens)
 KEYSTONE_REPO=${KEYSTONE_REPO:-${GIT_BASE}/openstack/keystone.git}
 KEYSTONE_BRANCH=${KEYSTONE_BRANCH:-master}
@@ -177,6 +181,13 @@
 SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git}
 SPICE_BRANCH=${SPICE_BRANCH:-master}
 
+# trove service
+TROVE_REPO=${TROVE_REPO:-${GIT_BASE}/openstack/trove.git}
+TROVE_BRANCH=${TROVE_BRANCH:-master}
+
+# trove client library test
+TROVECLIENT_REPO=${TROVECLIENT_REPO:-${GIT_BASE}/openstack/python-troveclient.git}
+TROVECLIENT_BRANCH=${TROVECLIENT_BRANCH:-master}
 
 # Nova hypervisor configuration.  We default to libvirt with **kvm** but will
 # drop back to **qemu** if we are unable to load the kvm module.  ``stack.sh`` can
@@ -275,6 +286,9 @@
 # Set default screen name
 SCREEN_NAME=${SCREEN_NAME:-stack}
 
+# Do not install packages tagged with 'testonly' by default
+INSTALL_TESTONLY_PACKAGES=${INSTALL_TESTONLY_PACKAGES:-False}
+
 # Local variables:
 # mode: shell-script
 # End:
diff --git a/tests/functions.sh b/tests/functions.sh
index 27a6cfe..7d486d4 100755
--- a/tests/functions.sh
+++ b/tests/functions.sh
@@ -367,3 +367,25 @@
 else
     echo "is_package_installed() on non-existing package failed"
 fi
+
+# test against removed package...was a bug on Ubuntu
+if is_ubuntu; then
+    PKG=cowsay
+    if ! (dpkg -s $PKG >/dev/null 2>&1); then
+        # it was never installed...set up the condition
+        sudo apt-get install -y cowsay >/dev/null 2>&1
+    fi
+    if (dpkg -s $PKG >/dev/null 2>&1); then
+        # remove it to create the 'un' status
+        sudo dpkg -P $PKG >/dev/null 2>&1
+    fi
+
+    # now test the installed check on a deleted package
+    is_package_installed $PKG
+    VAL=$?
+    if [[ "$VAL" -ne 0 ]]; then
+        echo "OK"
+    else
+        echo "is_package_installed() on deleted package failed"
+    fi
+fi
diff --git a/tools/docker/README.md b/tools/docker/README.md
new file mode 100644
index 0000000..976111f
--- /dev/null
+++ b/tools/docker/README.md
@@ -0,0 +1,13 @@
+# DevStack on Docker
+
+Using Docker as Nova's hypervisor requries two steps:
+
+* Configure DevStack by adding the following to `localrc`::
+
+    VIRT_DRIVER=docker
+
+* Download and install the Docker service and images::
+
+    tools/docker/install_docker.sh
+
+After this, `stack.sh` should run as normal.
diff --git a/tools/docker/install_docker.sh b/tools/docker/install_docker.sh
new file mode 100755
index 0000000..d659ad1
--- /dev/null
+++ b/tools/docker/install_docker.sh
@@ -0,0 +1,75 @@
+#!/usr/bin/env bash
+
+# **install_docker.sh** - Do the initial Docker installation and configuration
+
+# install_docker.sh
+#
+# Install docker package and images
+# * downloads a base busybox image and a glance registry image if necessary
+# * install the images in Docker's image cache
+
+
+# Keep track of the current directory
+SCRIPT_DIR=$(cd $(dirname "$0") && pwd)
+TOP_DIR=$(cd $SCRIPT_DIR/../..; pwd)
+
+# Import common functions
+source $TOP_DIR/functions
+
+# Load local configuration
+source $TOP_DIR/stackrc
+
+FILES=$TOP_DIR/files
+
+# Get our defaults
+source $TOP_DIR/lib/nova_plugins/hypervisor-docker
+
+SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60}
+
+
+# Install Docker Service
+# ======================
+
+# Stop the auto-repo updates and do it when required here
+NO_UPDATE_REPOS=True
+
+# Set up home repo
+curl https://get.docker.io/gpg | sudo apt-key add -
+install_package python-software-properties && \
+    sudo sh -c "echo deb $DOCKER_APT_REPO docker main > /etc/apt/sources.list.d/docker.list"
+apt_get update
+install_package --force-yes lxc-docker=${DOCKER_PACKAGE_VERSION}
+
+# Start the daemon - restart just in case the package ever auto-starts...
+restart_service docker
+
+echo "Waiting for docker daemon to start..."
+DOCKER_GROUP=$(groups | cut -d' ' -f1)
+CONFIGURE_CMD="while ! /bin/echo -e 'GET /v1.3/version HTTP/1.0\n\n' | socat - unix-connect:$DOCKER_UNIX_SOCKET | grep -q '200 OK'; do
+    # Set the right group on docker unix socket before retrying
+    sudo chgrp $DOCKER_GROUP $DOCKER_UNIX_SOCKET
+    sudo chmod g+rw $DOCKER_UNIX_SOCKET
+    sleep 1
+done"
+if ! timeout $SERVICE_TIMEOUT sh -c "$CONFIGURE_CMD"; then
+    die $LINENO "docker did not start"
+fi
+
+
+# Get Docker image
+if [[ ! -r $FILES/docker-ut.tar.gz ]]; then
+    (cd $FILES; curl -OR $DOCKER_IMAGE)
+fi
+if [[ ! -r $FILES/docker-ut.tar.gz ]]; then
+    die $LINENO "Docker image unavailable"
+fi
+docker import - $DOCKER_IMAGE_NAME <$FILES/docker-ut.tar.gz
+
+# Get Docker registry image
+if [[ ! -r $FILES/docker-registry.tar.gz ]]; then
+    (cd $FILES; curl -OR $DOCKER_REGISTRY_IMAGE)
+fi
+if [[ ! -r $FILES/docker-registry.tar.gz ]]; then
+    die $LINENO "Docker registry image unavailable"
+fi
+docker import - $DOCKER_REGISTRY_IMAGE_NAME <$FILES/docker-registry.tar.gz
diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh
new file mode 100755
index 0000000..371b25f
--- /dev/null
+++ b/tools/fixup_stuff.sh
@@ -0,0 +1,116 @@
+#!/usr/bin/env bash
+
+# **fixup_stuff.sh**
+
+# fixup_stuff.sh
+#
+# All distro and package specific hacks go in here
+# - prettytable 0.7.2 permissions are 600 in the package and
+#   pip 1.4 doesn't fix it (1.3 did)
+# - httplib2 0.8 permissions are 600 in the package and
+#   pip 1.4 doesn't fix it (1.3 did)
+# - RHEL6:
+#   - set selinux not enforcing
+#   - (re)start messagebus daemon
+#   - remove distro packages python-crypto and python-lxml
+#   - pre-install hgtools to work around a bug in RHEL6 distribute
+#   - install nose 1.1 from EPEL
+
+
+# Keep track of the current directory
+TOOLS_DIR=$(cd $(dirname "$0") && pwd)
+TOP_DIR=$(cd $TOOLS_DIR/..; pwd)
+
+# Change dir to top of devstack
+cd $TOP_DIR
+
+# Import common functions
+source $TOP_DIR/functions
+
+FILES=$TOP_DIR/files
+
+
+# Python Packages
+# ---------------
+
+# Pre-install affected packages so we can fix the permissions
+sudo pip install prettytable
+sudo pip install httplib2
+
+SITE_DIRS=$(python -c "import site; import os; print os.linesep.join(site.getsitepackages())")
+for dir in $SITE_DIRS; do
+
+    # Fix prettytable 0.7.2 permissions
+    if [[ -r $dir/prettytable.py ]]; then
+        sudo chmod +r $dir/prettytable-0.7.2*/*
+    fi
+
+    # Fix httplib2 0.8 permissions
+    httplib_dir=httplib2-0.8.egg-info
+    if [[ -d $dir/$httplib_dir ]]; then
+        sudo chmod +r $dir/$httplib_dir/*
+    fi
+
+done
+
+
+# RHEL6
+# -----
+
+if [[ $DISTRO =~ (rhel6) ]]; then
+
+    # Disable selinux to avoid configuring to allow Apache access
+    # to Horizon files or run nodejs (LP#1175444)
+    # FIXME(dtroyer): see if this can be skipped without node or if Horizon is not enabled
+    if selinuxenabled; then
+        sudo setenforce 0
+    fi
+
+    # If the ``dbus`` package was installed by DevStack dependencies the
+    # uuid may not be generated because the service was never started (PR#598200),
+    # causing Nova to stop later on complaining that ``/var/lib/dbus/machine-id``
+    # does not exist.
+    sudo service messagebus restart
+
+    # The following workarounds break xenserver
+    if [ "$VIRT_DRIVER" != 'xenserver' ]; then
+        # An old version of ``python-crypto`` (2.0.1) may be installed on a
+        # fresh system via Anaconda and the dependency chain
+        # ``cas`` -> ``python-paramiko`` -> ``python-crypto``.
+        # ``pip uninstall pycrypto`` will remove the packaged ``.egg-info``
+        #  file but leave most of the actual library files behind in
+        # ``/usr/lib64/python2.6/Crypto``. Later ``pip install pycrypto``
+        # will install over the packaged files resulting
+        # in a useless mess of old, rpm-packaged files and pip-installed files.
+        # Remove the package so that ``pip install python-crypto`` installs
+        # cleanly.
+        # Note: other RPM packages may require ``python-crypto`` as well.
+        # For example, RHEL6 does not install ``python-paramiko packages``.
+        uninstall_package python-crypto
+
+        # A similar situation occurs with ``python-lxml``, which is required by
+        # ``ipa-client``, an auditing package we don't care about.  The
+        # build-dependencies needed for ``pip install lxml`` (``gcc``,
+        # ``libxml2-dev`` and ``libxslt-dev``) are present in
+        # ``files/rpms/general``.
+        uninstall_package python-lxml
+    fi
+
+    # ``setup.py`` contains a ``setup_requires`` package that is supposed
+    # to be transient.  However, RHEL6 distribute has a bug where
+    # ``setup_requires`` registers entry points that are not cleaned
+    # out properly after the setup-phase resulting in installation failures
+    # (bz#924038).  Pre-install the problem package so the ``setup_requires``
+    # dependency is satisfied and it will not be installed transiently.
+    # Note we do this before the track-depends in ``stack.sh``.
+    pip_install hgtools
+
+
+    # RHEL6's version of ``python-nose`` is incompatible with Tempest.
+    # Install nose 1.1 (Tempest-compatible) from EPEL
+    install_package python-nose1.1
+    # Add a symlink for the new nosetests to allow tox for Tempest to
+    # work unmolested.
+    sudo ln -sf /usr/bin/nosetests1.1 /usr/local/bin/nosetests
+
+fi
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index 64cc200..fc1c195 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -65,10 +65,12 @@
 }
 
 function install_pip_tarball() {
-    curl -O $PIP_TAR_URL
-    tar xvfz pip-$INSTALL_PIP_VERSION.tar.gz
-    cd pip-$INSTALL_PIP_VERSION
-    sudo python setup.py install
+    (cd $FILES; \
+        curl -O $PIP_TAR_URL; \
+        tar xvfz pip-$INSTALL_PIP_VERSION.tar.gz; \
+        cd pip-$INSTALL_PIP_VERSION; \
+        sudo python setup.py install; \
+    )
 }
 
 # Show starting versions
diff --git a/tools/xen/README.md b/tools/xen/README.md
index af54d72..06192ed 100644
--- a/tools/xen/README.md
+++ b/tools/xen/README.md
@@ -1,48 +1,54 @@
-# Getting Started With XenServer 5.6 and Devstack
-The purpose of the code in this directory it to help developers bootstrap
-a XenServer 5.6 (or greater) + Openstack development environment.  This file gives
-some pointers on how to get started.
+# Getting Started With XenServer and Devstack
 
-Xenserver is a Type 1 hypervisor, so it needs to be installed on bare metal.
-The Openstack services are configured to run within a "privileged" virtual
-machine on the Xenserver host (called OS domU). The VM uses the XAPI toolstack
-to communicate with the host.
+The purpose of the code in this directory it to help developers bootstrap a
+XenServer 6.2 (older versions may also work) + Openstack development
+environment. This file gives some pointers on how to get started.
+
+Xenserver is a Type 1 hypervisor, so it is best installed on bare metal.  The
+Openstack services are configured to run within a virtual machine (called OS
+domU) on the XenServer host. The VM uses the XAPI toolstack to communicate with
+the host over a network connection (see `MGT_BRIDGE_OR_NET_NAME`).
 
 The provided localrc helps to build a basic environment.
-The requirements are:
+
+## Introduction
+
+### Requirements
+
  - An internet-enabled network with a DHCP server on it
  - XenServer box plugged in to the same network
 This network will be used as the OpenStack management network. The VM Network
 and the Public Network will not be connected to any physical interfaces, only
 new virtual networks will be created by the `install_os_domU.sh` script.
 
-Steps to follow:
+### Steps to follow
+
  - Install XenServer
  - Download Devstack to XenServer
  - Customise `localrc`
  - Start `install_os_domU.sh` script
 
+### Brief explanation
+
 The `install_os_domU.sh` script will:
  - Setup XenAPI plugins
  - Create the named networks, if they don't exist
- - Preseed-Netinstall an Ubuntu Virtual Machine, with 1 network interface:
-   - eth0 - Connected to `UBUNTU_INST_BRIDGE_OR_NET_NAME`, defaults to
-   `MGT_BRIDGE_OR_NET_NAME`
+ - Preseed-Netinstall an Ubuntu Virtual Machine (NOTE: you can save and reuse
+   it, see [Reuse the Ubuntu VM](#reuse-the-ubuntu-vm)), with 1 network
+   interface:
+   - `eth0` - Connected to `UBUNTU_INST_BRIDGE_OR_NET_NAME`, defaults to
+     `MGT_BRIDGE_OR_NET_NAME`
  - After the Ubuntu install process finished, the network configuration is
  modified to:
-   - eth0 - Management interface, connected to `MGT_BRIDGE_OR_NET_NAME`
-   - eth1 - VM interface, connected to `VM_BRIDGE_OR_NET_NAME`
-   - eth2 - Public interface, connected to `PUB_BRIDGE_OR_NET_NAME`
-   - (eth3) - Optional network interface if neutron is used, to enforce xapi to
-   create the underlying bridge.
+   - `eth0` - Management interface, connected to `MGT_BRIDGE_OR_NET_NAME`. Xapi
+     must be accessible through this network.
+   - `eth1` - VM interface, connected to `VM_BRIDGE_OR_NET_NAME`
+   - `eth2` - Public interface, connected to `PUB_BRIDGE_OR_NET_NAME`
  - Start devstack inside the created OpenStack VM
 
 ## Step 1: Install Xenserver
-Install XenServer 5.6+ on a clean box. You can get XenServer by signing
-up for an account on citrix.com, and then visiting:
-https://www.citrix.com/English/ss/downloads/details.asp?downloadId=2311504&productId=683148
-
-For details on installation, see: http://wiki.openstack.org/XenServer/Install
+Install XenServer on a clean box. You can download the latest XenServer for
+free from: http://www.xenserver.org/
 
 The XenServer IP configuration depends on your local network setup. If you are
 using dhcp, make a reservation for XenServer, so its IP address won't change
@@ -85,17 +91,20 @@
     XENAPI_CONNECTION_URL="http://address_of_your_xenserver"
     VNCSERVER_PROXYCLIENT_ADDRESS=address_of_your_xenserver
 
-    # Do not download the usual images
-    IMAGE_URLS=""
-    # Explicitly set virt driver here
+    # Download a vhd and a uec image
+    IMAGE_URLS="\
+    https://github.com/downloads/citrix-openstack/warehouse/cirros-0.3.0-x86_64-disk.vhd.tgz,\
+    http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz"
+
+    # Explicitly set virt driver
     VIRT_DRIVER=xenserver
-    # Explicitly enable multi-host
+
+    # Explicitly enable multi-host for nova-network HA
     MULTI_HOST=1
+
     # Give extra time for boot
     ACTIVE_TIMEOUT=45
 
-    # NOTE: the value of FLAT_NETWORK_BRIDGE will automatically be determined
-    # by install_os_domU.sh script.
     EOF
 
 ## Step 4: Run `./install_os_domU.sh` from the `tools/xen` directory
@@ -107,12 +116,60 @@
 installed and tail the run.sh.log file. You will need to wait until it run.sh
 has finished executing.
 
-## Step 5: Do cloudy stuff!
-* Play with horizon
-* Play with the CLI
-* Log bugs to devstack and core projects, and submit fixes!
+# Appendix
 
-## Step 6: Run from snapshot
-If you want to quicky re-run devstack from a clean state,
-using the same settings you used in your previous run,
-you can revert the DomU to the snapshot called `before_first_boot`
+This section contains useful information for running devstack in CI
+environments / using ubuntu network mirrors.
+
+## Use a specific Ubuntu mirror for installation
+
+To speed up the Ubuntu installation, you can use a specific mirror. To specify
+a mirror explicitly, include the following settings in your `localrc` file:
+
+    UBUNTU_INST_HTTP_HOSTNAME="archive.ubuntu.com"
+    UBUNTU_INST_HTTP_DIRECTORY="/ubuntu"
+
+These variables set the `mirror/http/hostname` and `mirror/http/directory`
+settings in the ubuntu preseed file. The minimal ubuntu VM will use the
+specified parameters.
+
+## Use an http proxy to speed up Ubuntu installation
+
+To further speed up the Ubuntu VM and package installation, an internal http
+proxy could be used. `squid-deb-proxy` has prooven to be stable. To use an http
+proxy, specify:
+
+    UBUNTU_INST_HTTP_PROXY="http://ubuntu-proxy.somedomain.com:8000"
+
+in your `localrc` file.
+
+## Reuse the Ubuntu VM
+
+Performing a minimal ubuntu installation could take a lot of time, depending on
+your mirror/network speed. If you run `install_os_domU.sh` script on a clean
+hypervisor, you can speed up the installation, by re-using the ubuntu vm from
+a previous installation.
+
+### Export the Ubuntu VM to an XVA
+
+Given you have an nfs export `TEMPLATE_NFS_DIR`:
+
+    TEMPLATE_FILENAME=devstack-jeos.xva
+    TEMPLATE_NAME=jeos_template_for_devstack
+    mountdir=$(mktemp -d)
+    mount -t nfs "$TEMPLATE_NFS_DIR" "$mountdir"
+    VM="$(xe template-list name-label="$TEMPLATE_NAME" --minimal)"
+    xe template-export template-uuid=$VM filename="$mountdir/$TEMPLATE_FILENAME"
+    umount "$mountdir"
+    rm -rf "$mountdir"
+
+### Import the Ubuntu VM
+
+Given you have an nfs export `TEMPLATE_NFS_DIR` where you exported the Ubuntu
+VM as `TEMPLATE_FILENAME`:
+
+    mountdir=$(mktemp -d)
+    mount -t nfs "$TEMPLATE_NFS_DIR" "$mountdir"
+    xe vm-import filename="$mountdir/$TEMPLATE_FILENAME"
+    umount "$mountdir"
+    rm -rf "$mountdir"
diff --git a/tools/xen/functions b/tools/xen/functions
index 7616a5f..a5c4b70 100644
--- a/tools/xen/functions
+++ b/tools/xen/functions
@@ -268,3 +268,22 @@
 
     xe network-attach uuid=$net host-uuid=$host
 }
+
+function set_vm_memory() {
+    local vm_name_label
+    local memory
+
+    vm_name_label="$1"
+    memory="$2"
+
+    local vm
+
+    vm=$(_vm_uuid "$vm_name_label")
+
+    xe vm-memory-limits-set \
+        static-min=${memory}MiB \
+        static-max=${memory}MiB \
+        dynamic-min=${memory}MiB \
+        dynamic-max=${memory}MiB \
+        uuid=$vm
+}
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
index e762f6d..b49504d 100755
--- a/tools/xen/install_os_domU.sh
+++ b/tools/xen/install_os_domU.sh
@@ -29,6 +29,10 @@
 # xapi functions
 . $THIS_DIR/functions
 
+# Determine what system we are running on.
+# Might not be XenServer if we're using xenserver-core
+GetDistro
+
 #
 # Get Settings
 #
@@ -167,8 +171,8 @@
 #
 
 GUEST_NAME=${GUEST_NAME:-"DevStackOSDomU"}
-TNAME="devstack_template"
-SNAME_PREPARED="template_prepared"
+TNAME="jeos_template_for_devstack"
+SNAME_TEMPLATE="jeos_snapshot_for_devstack"
 SNAME_FIRST_BOOT="before_first_boot"
 
 function wait_for_VM_to_halt() {
@@ -177,7 +181,7 @@
     mgmt_ip=$(echo $XENAPI_CONNECTION_URL | tr -d -c '1234567890.')
     domid=$(xe vm-list name-label="$GUEST_NAME" params=dom-id minimal=true)
     port=$(xenstore-read /local/domain/$domid/console/vnc-port)
-    echo "vncviewer -via $mgmt_ip localhost:${port:2}"
+    echo "vncviewer -via root@$mgmt_ip localhost:${port:2}"
     while true
     do
         state=$(xe_min vm-list name-label="$GUEST_NAME" power-state=halted)
@@ -224,8 +228,11 @@
     $THIS_DIR/scripts/install-os-vpx.sh \
         -t "$UBUNTU_INST_TEMPLATE_NAME" \
         -n "$UBUNTU_INST_BRIDGE_OR_NET_NAME" \
-        -l "$GUEST_NAME" \
-        -r "$OSDOMU_MEM_MB"
+        -l "$GUEST_NAME"
+
+    set_vm_memory "$GUEST_NAME" "$OSDOMU_MEM_MB"
+
+    xe vm-start vm="$GUEST_NAME"
 
     # wait for install to finish
     wait_for_VM_to_halt
@@ -234,21 +241,8 @@
     vm_uuid=$(xe_min vm-list name-label="$GUEST_NAME")
     xe vm-param-set actions-after-reboot=Restart uuid="$vm_uuid"
 
-    #
-    # Prepare VM for DevStack
-    #
-
-    # Install XenServer tools, and other such things
-    $THIS_DIR/prepare_guest_template.sh "$GUEST_NAME"
-
-    # start the VM to run the prepare steps
-    xe vm-start vm="$GUEST_NAME"
-
-    # Wait for prep script to finish and shutdown system
-    wait_for_VM_to_halt
-
     # Make template from VM
-    snuuid=$(xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME_PREPARED")
+    snuuid=$(xe vm-snapshot vm="$GUEST_NAME" new-name-label="$SNAME_TEMPLATE")
     xe snapshot-clone uuid=$snuuid new-name-label="$TNAME"
 else
     #
@@ -257,6 +251,22 @@
     vm_uuid=$(xe vm-install template="$TNAME" new-name-label="$GUEST_NAME")
 fi
 
+#
+# Prepare VM for DevStack
+#
+
+# Install XenServer tools, and other such things
+$THIS_DIR/prepare_guest_template.sh "$GUEST_NAME"
+
+# Set virtual machine parameters
+set_vm_memory "$GUEST_NAME" "$OSDOMU_MEM_MB"
+
+# start the VM to run the prepare steps
+xe vm-start vm="$GUEST_NAME"
+
+# Wait for prep script to finish and shutdown system
+wait_for_VM_to_halt
+
 ## Setup network cards
 # Wipe out all
 destroy_all_vifs_of "$GUEST_NAME"
diff --git a/tools/xen/prepare_guest.sh b/tools/xen/prepare_guest.sh
index f109d72..6ec5ffa 100755
--- a/tools/xen/prepare_guest.sh
+++ b/tools/xen/prepare_guest.sh
@@ -48,6 +48,11 @@
 echo $STACK_USER:$GUEST_PASSWORD | chpasswd
 echo "$STACK_USER ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
 
+# Add an udev rule, so that new block devices could be written by stack user
+cat > /etc/udev/rules.d/50-openstack-blockdev.rules << EOF
+KERNEL=="xvd[b-z]", GROUP="$STACK_USER", MODE="0660"
+EOF
+
 # Give ownership of /opt/stack to stack user
 chown -R $STACK_USER /opt/stack
 
diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh
index 8ee8b67..c94a593 100755
--- a/tools/xen/scripts/install-os-vpx.sh
+++ b/tools/xen/scripts/install-os-vpx.sh
@@ -20,8 +20,6 @@
 set -eux
 
 BRIDGE=
-RAM=
-BALLOONING=
 NAME_LABEL=
 TEMPLATE_NAME=
 
@@ -29,7 +27,7 @@
 {
 cat << EOF
 
-  Usage: $0 -t TEMPLATE_NW_INSTALL -l NAME_LABEL [-n BRIDGE] [-r RAM] [-b] 
+  Usage: $0 -t TEMPLATE_NW_INSTALL -l NAME_LABEL [-n BRIDGE]
 
   Install a VM from a template
 
@@ -37,9 +35,6 @@
 
      -h           Shows this message.
      -t template  VM template to use
-     -b           Enable memory ballooning. When set min_RAM=RAM/2 max_RAM=RAM.
-     -r MiB       Specifies RAM used by the VPX, in MiB.
-                  By default it will take the value from the XVA.
      -l name      Specifies the name label for the VM.
      -n bridge    The bridge/network to use for eth0. Defaults to xenbr0
 EOF
@@ -53,12 +48,6 @@
       h) usage
          exit 1
          ;;
-      b)
-         BALLOONING=1
-         ;;
-      r)
-         RAM=$OPTARG
-         ;;
       n)
          BRIDGE=$OPTARG
          ;;
@@ -119,19 +108,6 @@
 }
 
 
-set_memory()
-{
-  local v="$1"
-  if [ "$RAM" != "" ]
-  then
-    echo "Setting RAM to $RAM MiB."
-    [ "$BALLOONING" == 1 ] && RAM_MIN=$(($RAM / 2)) || RAM_MIN=$RAM
-    xe vm-memory-limits-set static-min=16MiB static-max=${RAM}MiB \
-                            dynamic-min=${RAM_MIN}MiB dynamic-max=${RAM}MiB \
-                            uuid="$v"
-  fi
-}
-
 
 # Make the VM auto-start on server boot.
 set_auto_start()
@@ -161,5 +137,3 @@
 create_vif "$vm_uuid"
 xe vm-param-set other-config:os-vpx=true uuid="$vm_uuid"
 xe vm-param-set actions-after-reboot=Destroy uuid="$vm_uuid"
-set_memory "$vm_uuid"
-xe vm-start uuid=$vm_uuid
diff --git a/tools/xen/xenrc b/tools/xen/xenrc
index 03b30ac..f698be1 100644
--- a/tools/xen/xenrc
+++ b/tools/xen/xenrc
@@ -13,7 +13,7 @@
 
 # Size of image
 VDI_MB=${VDI_MB:-5000}
-OSDOMU_MEM_MB=1024
+OSDOMU_MEM_MB=2048
 OSDOMU_VDI_GB=8
 
 # Network mapping. Specify bridge names or network names. Network names may
diff --git a/unstack.sh b/unstack.sh
index 2268b90..05d9fb7 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -33,6 +33,8 @@
 source $TOP_DIR/lib/horizon
 source $TOP_DIR/lib/swift
 source $TOP_DIR/lib/neutron
+source $TOP_DIR/lib/ironic
+source $TOP_DIR/lib/trove
 
 # Determine what system we are running on.  This provides ``os_VENDOR``,
 # ``os_RELEASE``, ``os_UPDATE``, ``os_PACKAGE``, ``os_CODENAME``
@@ -65,12 +67,26 @@
     fi
 fi
 
+# Shut down Nova hypervisor plugins after Nova
+NOVA_PLUGINS=$TOP_DIR/lib/nova_plugins
+if is_service_enabled nova && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
+    # Load plugin
+    source $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER
+    stop_nova_hypervisor
+fi
+
 # Swift runs daemons
 if is_service_enabled s-proxy; then
     stop_swift
     cleanup_swift
 fi
 
+# Ironic runs daemons
+if is_service_enabled ir-api ir-cond; then
+    stop_ironic
+    cleanup_ironic
+fi
+
 # Apache has the WSGI processes
 if is_service_enabled horizon; then
     stop_horizon
@@ -115,4 +131,8 @@
     cleanup_neutron
 fi
 
+if is_service_enabled trove; then
+    cleanup_trove
+fi
+
 cleanup_tmp