Merge "add assertions for blind grep"
diff --git a/.gitignore b/.gitignore
index 798b081..a3d5b0d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -13,3 +13,6 @@
 accrc
 .stackenv
 .prereqs
+devstack-docs-*
+docs/
+docs-files
diff --git a/HACKING.rst b/HACKING.rst
index 3c08e67..103b579 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -227,3 +227,51 @@
   or graciously handle possible artifacts left over from previous runs if executed
   again.  It is acceptable to require a reboot or even a re-install of DevStack
   to restore a clean test environment.
+
+
+Bash Style Guidelines
+~~~~~~~~~~~~~~~~~~~~~
+Devstack defines a bash set of best practices for maintaining large
+collections of bash scripts. These should be considered as part of the
+review process.
+
+We have a preliminary enforcing script for this called bash8 (only a
+small number of these rules are enforced).
+
+Whitespace Rules
+----------------
+
+- lines should not include trailing whitespace
+- there should be no hard tabs in the file
+- indents are 4 spaces, and all indentation should be some multiple of
+  them
+
+Control Structure Rules
+-----------------------
+- then should be on the same line as the if
+- do should be on the same line as the for
+
+Example::
+
+  if [[ -r $TOP_DIR/local.conf ]]; then
+      LRC=$(get_meta_section_files $TOP_DIR/local.conf local)
+      for lfile in $LRC; do
+          if [[ "$lfile" == "localrc" ]]; then
+              if [[ -r $TOP_DIR/localrc ]]; then
+                  warn $LINENO "localrc and local.conf:[[local]] both exist, using localrc"
+              else
+                  echo "# Generated file, do not edit" >$TOP_DIR/.localrc.auto
+                  get_meta_section $TOP_DIR/local.conf local $lfile >>$TOP_DIR/.localrc.auto
+              fi
+          fi
+      done
+  fi
+
+Variables and Functions
+-----------------------
+- functions should be used whenever possible for clarity
+- functions should use ``local`` variables as much as possible to
+  ensure they are isolated from the rest of the environment
+- local variables should be lower case, global variables should be
+  upper case
+- function names should_have_underscores, NotCamelCase.
diff --git a/README.md b/README.md
index 640fab6..91d7efb 100644
--- a/README.md
+++ b/README.md
@@ -30,7 +30,7 @@
 `stackrc` for the default set).  Usually just before a release there will be
 milestone-proposed branches that need to be tested::
 
-    GLANCE_REPO=https://github.com/openstack/glance.git
+    GLANCE_REPO=git://git.openstack.org/openstack/glance.git
     GLANCE_BRANCH=milestone-proposed
 
 # Start A Dev Cloud
@@ -82,7 +82,7 @@
 # Customizing
 
 You can override environment variables used in `stack.sh` by creating file
-name `local.conf` with a ``locarc`` section as shown below.  It is likely
+name `local.conf` with a ``localrc`` section as shown below.  It is likely
 that you will need to do this to tweak your networking configuration should
 you need to access your cloud from a different host.
 
@@ -171,6 +171,7 @@
     enable_service q-dhcp
     enable_service q-l3
     enable_service q-meta
+    enable_service q-metering
     enable_service neutron
     # Optional, to enable tempest configuration as part of DevStack
     enable_service tempest
diff --git a/driver_certs/cinder_driver_cert.sh b/driver_certs/cinder_driver_cert.sh
new file mode 100755
index 0000000..18bef8b
--- /dev/null
+++ b/driver_certs/cinder_driver_cert.sh
@@ -0,0 +1,87 @@
+#!/usr/bin/env bash
+
+# **cinder_cert.sh**
+
+CERT_DIR=$(cd $(dirname "$0") && pwd)
+TOP_DIR=$(cd $CERT_DIR/..; pwd)
+
+source $TOP_DIR/functions
+source $TOP_DIR/stackrc
+source $TOP_DIR/openrc
+source $TOP_DIR/lib/tempest
+source $TOP_DIR/lib/cinder
+
+TEMPFILE=`mktemp`
+RECLONE=True
+
+function log_message() {
+    MESSAGE=$1
+    STEP_HEADER=$2
+    if [[ "$STEP_HEADER" = "True" ]]; then
+        echo -e "\n========================================================" | tee -a $TEMPFILE
+    fi
+    echo -e `date +%m/%d/%y/%T:`"${MESSAGE}" | tee -a $TEMPFILE
+    if [[ "$STEP_HEADER" = "True" ]]; then
+        echo -e "========================================================" | tee -a $TEMPFILE
+    fi
+}
+
+if [[ "$OFFLINE" = "True" ]]; then
+    echo "ERROR: Driver cert requires fresh clone/pull from ${CINDER_BRANCH}"
+    echo "       Please set OFFLINE=False and retry."
+    exit 1
+fi
+
+log_message "RUNNING CINDER DRIVER CERTIFICATION CHECK", True
+log_message "Output is being logged to: $TEMPFILE"
+
+cd $CINDER_DIR
+log_message "Cloning to ${CINDER_REPO}...", True
+install_cinder
+
+log_message "Pull a fresh Clone of cinder repo...", True
+git status | tee -a $TEMPFILE
+git log --pretty=oneline -n 1 | tee -a $TEMPFILE
+
+log_message "Gathering copy of cinder.conf file (passwords will be scrubbed)...", True
+cat /etc/cinder/cinder.conf | egrep -v "(^#.*|^$)" | tee -a $TEMPFILE
+sed -i "s/\(.*password.*=\).*$/\1 xxx/i" $TEMPFILE
+log_message "End of cinder.conf.", True
+
+cd $TOP_DIR
+# Verify tempest is installed/enabled
+if ! is_service_enabled tempest; then
+    log_message "ERROR!!! Cert requires tempest in enabled_services!", True
+    log_message"       Please add tempest to enabled_services and retry."
+    exit 1
+fi
+
+cd $TEMPEST_DIR
+install_tempest
+
+log_message "Verify tempest is current....", True
+git status | tee -a $TEMPFILE
+log_message "Check status and get latest commit..."
+git log --pretty=oneline -n 1 | tee -a $TEMPFILE
+
+
+#stop and restart cinder services
+log_message "Restart Cinder services...", True
+stop_cinder
+sleep 1
+start_cinder
+sleep 5
+
+# run tempest api/volume/test_*
+log_message "Run the actual tempest volume tests (run_tests.sh -N tempest.api.volume.test_*)...", True
+exec 2> >(tee -a $TEMPFILE)
+`./run_tests.sh -N tempest.api.volume.test_*`
+if [[ $? = 0 ]]; then
+    log_message "CONGRATULATIONS!!!  Device driver PASSED!", True
+    log_message "Submit output: ($TEMPFILE)"
+    exit 0
+else
+    log_message "SORRY!!!  Device driver FAILED!", True
+    log_message "Check output in $TEMPFILE"
+    exit 1
+fi
diff --git a/eucarc b/eucarc
index 2b0f7dd..3502351 100644
--- a/eucarc
+++ b/eucarc
@@ -13,7 +13,7 @@
 fi
 
 # Find the other rc files
-RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd)
+RC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd)
 
 # Get user configuration
 source $RC_DIR/openrc
diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh
index 6cc81ae..1b1ac06 100755
--- a/exercises/aggregates.sh
+++ b/exercises/aggregates.sh
@@ -3,12 +3,13 @@
 # **aggregates.sh**
 
 # This script demonstrates how to use host aggregates:
-#  *  Create an Aggregate
-#  *  Updating Aggregate details
-#  *  Testing Aggregate metadata
-#  *  Testing Aggregate delete
-#  *  Testing General Aggregates (https://blueprints.launchpad.net/nova/+spec/general-host-aggregates)
-#  *  Testing add/remove hosts (with one host)
+#
+# *  Create an Aggregate
+# *  Updating Aggregate details
+# *  Testing Aggregate metadata
+# *  Testing Aggregate delete
+# *  Testing General Aggregates (https://blueprints.launchpad.net/nova/+spec/general-host-aggregates)
+# *  Testing add/remove hosts (with one host)
 
 echo "**************************************************"
 echo "Begin DevStack Exercise: $0"
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
index 634a6d5..3b3d3ba 100755
--- a/exercises/boot_from_volume.sh
+++ b/exercises/boot_from_volume.sh
@@ -3,8 +3,9 @@
 # **boot_from_volume.sh**
 
 # This script demonstrates how to boot from a volume.  It does the following:
-#  *  Create a bootable volume
-#  *  Boot a volume-backed instance
+#
+# *  Create a bootable volume
+# *  Boot a volume-backed instance
 
 echo "*********************************************************************"
 echo "Begin DevStack Exercise: $0"
diff --git a/exercises/docker.sh b/exercises/docker.sh
deleted file mode 100755
index 10c5436..0000000
--- a/exercises/docker.sh
+++ /dev/null
@@ -1,104 +0,0 @@
-#!/usr/bin/env bash
-
-# **docker**
-
-# Test Docker hypervisor
-
-echo "*********************************************************************"
-echo "Begin DevStack Exercise: $0"
-echo "*********************************************************************"
-
-# This script exits on an error so that errors don't compound and you see
-# only the first error that occurred.
-set -o errexit
-
-# Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following allowing as the install occurs.
-set -o xtrace
-
-
-# Settings
-# ========
-
-# Keep track of the current directory
-EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
-TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
-
-# Import common functions
-source $TOP_DIR/functions
-
-# Import configuration
-source $TOP_DIR/openrc
-
-# Import exercise configuration
-source $TOP_DIR/exerciserc
-
-# Skip if the hypervisor is not Docker
-[[ "$VIRT_DRIVER" == "docker" ]] || exit 55
-
-# Import docker functions and declarations
-source $TOP_DIR/lib/nova_plugins/hypervisor-docker
-
-# Image and flavor are ignored but the CLI requires them...
-
-# Instance type to create
-DEFAULT_INSTANCE_TYPE=${DEFAULT_INSTANCE_TYPE:-m1.tiny}
-
-# Boot this image, use first AMI image if unset
-DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami}
-
-# Instance name
-VM_NAME=ex-docker
-
-
-# Launching a server
-# ==================
-
-# Grab the id of the image to launch
-IMAGE=$(glance image-list | egrep " $DOCKER_IMAGE_NAME:latest " | get_field 1)
-die_if_not_set $LINENO IMAGE "Failure getting image $DOCKER_IMAGE_NAME"
-
-# Select a flavor
-INSTANCE_TYPE=$(nova flavor-list | grep $DEFAULT_INSTANCE_TYPE | get_field 1)
-if [[ -z "$INSTANCE_TYPE" ]]; then
-    # grab the first flavor in the list to launch if default doesn't exist
-    INSTANCE_TYPE=$(nova flavor-list | head -n 4 | tail -n 1 | get_field 1)
-fi
-
-# Clean-up from previous runs
-nova delete $VM_NAME || true
-if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $VM_NAME; do sleep 1; done"; then
-    die $LINENO "server didn't terminate!"
-fi
-
-# Boot instance
-# -------------
-
-VM_UUID=$(nova boot --flavor $INSTANCE_TYPE --image $IMAGE $VM_NAME | grep ' id ' | get_field 2)
-die_if_not_set $LINENO VM_UUID "Failure launching $VM_NAME"
-
-# Check that the status is active within ACTIVE_TIMEOUT seconds
-if ! timeout $ACTIVE_TIMEOUT sh -c "while ! nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
-    die $LINENO "server didn't become active!"
-fi
-
-# Get the instance IP
-IP=$(nova show $VM_UUID | grep "$PRIVATE_NETWORK_NAME" | get_field 2)
-die_if_not_set $LINENO IP "Failure retrieving IP address"
-
-# Private IPs can be pinged in single node deployments
-ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT
-
-# Clean up
-# --------
-
-# Delete instance
-nova delete $VM_UUID || die $LINENO "Failure deleting instance $VM_NAME"
-if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then
-    die $LINENO "Server $VM_NAME not deleted"
-fi
-
-set +o xtrace
-echo "*********************************************************************"
-echo "SUCCESS: End DevStack Exercise: $0"
-echo "*********************************************************************"
diff --git a/exercises/savanna.sh b/exercises/savanna.sh
new file mode 100755
index 0000000..fc3f976
--- /dev/null
+++ b/exercises/savanna.sh
@@ -0,0 +1,43 @@
+#!/usr/bin/env bash
+
+# **savanna.sh**
+
+# Sanity check that Savanna started if enabled
+
+echo "*********************************************************************"
+echo "Begin DevStack Exercise: $0"
+echo "*********************************************************************"
+
+# This script exits on an error so that errors don't compound and you see
+# only the first error that occurred.
+set -o errexit
+
+# Print the commands being run so that we can see the command that triggers
+# an error.  It is also useful for following allowing as the install occurs.
+set -o xtrace
+
+
+# Settings
+# ========
+
+# Keep track of the current directory
+EXERCISE_DIR=$(cd $(dirname "$0") && pwd)
+TOP_DIR=$(cd $EXERCISE_DIR/..; pwd)
+
+# Import common functions
+source $TOP_DIR/functions
+
+# Import configuration
+source $TOP_DIR/openrc
+
+# Import exercise configuration
+source $TOP_DIR/exerciserc
+
+is_service_enabled savanna || exit 55
+
+curl http://$SERVICE_HOST:8386/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Savanna API not functioning!"
+
+set +o xtrace
+echo "*********************************************************************"
+echo "SUCCESS: End DevStack Exercise: $0"
+echo "*********************************************************************"
diff --git a/exercises/swift.sh b/exercises/swift.sh
index b9f1b56..25ea671 100755
--- a/exercises/swift.sh
+++ b/exercises/swift.sh
@@ -2,7 +2,7 @@
 
 # **swift.sh**
 
-# Test swift via the ``swift`` command line from ``python-swiftclient`
+# Test swift via the ``swift`` command line from ``python-swiftclient``
 
 echo "*********************************************************************"
 echo "Begin DevStack Exercise: $0"
diff --git a/extras.d/70-savanna.sh b/extras.d/70-savanna.sh
new file mode 100644
index 0000000..f6881cc
--- /dev/null
+++ b/extras.d/70-savanna.sh
@@ -0,0 +1,31 @@
+# savanna.sh - DevStack extras script to install Savanna
+
+if is_service_enabled savanna; then
+    if [[ "$1" == "source" ]]; then
+        # Initial source
+        source $TOP_DIR/lib/savanna
+        source $TOP_DIR/lib/savanna-dashboard
+    elif [[ "$1" == "stack" && "$2" == "install" ]]; then
+        echo_summary "Installing Savanna"
+        install_savanna
+        if is_service_enabled horizon; then
+            install_savanna_dashboard
+        fi
+    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
+        echo_summary "Configuring Savanna"
+        configure_savanna
+        if is_service_enabled horizon; then
+            configure_savanna_dashboard
+        fi
+    elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
+        echo_summary "Initializing Savanna"
+        start_savanna
+    fi
+
+    if [[ "$1" == "unstack" ]]; then
+        stop_savanna
+        if is_service_enabled horizon; then
+            cleanup_savanna_dashboard
+        fi
+    fi
+fi
diff --git a/files/apts/horizon b/files/apts/horizon
index 0865931..8969046 100644
--- a/files/apts/horizon
+++ b/files/apts/horizon
@@ -19,5 +19,3 @@
 python-coverage
 python-cherrypy3 # why?
 python-migrate
-nodejs
-nodejs-legacy # dist:quantal
diff --git a/files/apts/trema b/files/apts/trema
index e33ccd3..09cb7c6 100644
--- a/files/apts/trema
+++ b/files/apts/trema
@@ -6,6 +6,7 @@
 ruby1.8-dev
 libpcap-dev
 libsqlite3-dev
+libglib2.0-dev
 
 # Sliceable Switch
 sqlite3
diff --git a/files/rpms-suse/horizon b/files/rpms-suse/horizon
index 73932ac..d3bde26 100644
--- a/files/rpms-suse/horizon
+++ b/files/rpms-suse/horizon
@@ -1,6 +1,5 @@
 apache2  # NOPRIME
 apache2-mod_wsgi  # NOPRIME
-nodejs
 python-CherryPy # why? (coming from apts)
 python-Paste
 python-PasteDeploy
diff --git a/files/rpms/horizon b/files/rpms/horizon
index 0ca18ca..aa27ab4 100644
--- a/files/rpms/horizon
+++ b/files/rpms/horizon
@@ -3,7 +3,6 @@
 gcc
 httpd # NOPRIME
 mod_wsgi  # NOPRIME
-nodejs # NOPRIME
 pylint
 python-anyjson
 python-BeautifulSoup
diff --git a/functions b/functions
index af5a37d..4d5b4b5 100644
--- a/functions
+++ b/functions
@@ -1,16 +1,17 @@
 # functions - Common functions used by DevStack components
 #
 # The following variables are assumed to be defined by certain functions:
-# ``ENABLED_SERVICES``
-# ``ERROR_ON_CLONE``
-# ``FILES``
-# ``GLANCE_HOSTPORT``
-# ``OFFLINE``
-# ``PIP_DOWNLOAD_CACHE``
-# ``PIP_USE_MIRRORS``
-# ``RECLONE``
-# ``TRACK_DEPENDS``
-# ``http_proxy``, ``https_proxy``, ``no_proxy``
+#
+# - ``ENABLED_SERVICES``
+# - ``ERROR_ON_CLONE``
+# - ``FILES``
+# - ``GLANCE_HOSTPORT``
+# - ``OFFLINE``
+# - ``PIP_DOWNLOAD_CACHE``
+# - ``PIP_USE_MIRRORS``
+# - ``RECLONE``
+# - ``TRACK_DEPENDS``
+# - ``http_proxy``, ``https_proxy``, ``no_proxy``
 
 
 # Save trace setting
@@ -54,7 +55,7 @@
 
 
 # Wrapper for ``apt-get`` to set cache and proxy environment variables
-# Uses globals ``OFFLINE``, ``*_proxy`
+# Uses globals ``OFFLINE``, ``*_proxy``
 # apt_get operation package [package ...]
 function apt_get() {
     [[ "$OFFLINE" = "True" || -z "$@" ]] && return
@@ -260,7 +261,8 @@
 #
 # Only packages required for the services in 1st argument will be
 # included.  Two bits of metadata are recognized in the prerequisite files:
-# - ``# NOPRIME`` defers installation to be performed later in stack.sh
+#
+# - ``# NOPRIME`` defers installation to be performed later in `stack.sh`
 # - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection
 #   of the package to the distros listed.  The distro names are case insensitive.
 function get_packages() {
@@ -555,6 +557,18 @@
     [ "($uname -m)" = "$ARCH_TYPE" ]
 }
 
+# Checks if installed Apache is <= given version
+# $1 = x.y.z (version string of Apache)
+function check_apache_version {
+    local cmd="apachectl"
+    if ! [[ -x $(which apachectl 2>/dev/null) ]]; then
+        cmd="/usr/sbin/apachectl"
+    fi
+
+    local version=$($cmd -v | grep version | grep -Po 'Apache/\K[^ ]*')
+    expr "$version" '>=' $1 > /dev/null
+}
+
 # git clone only if directory doesn't exist already.  Since ``DEST`` might not
 # be owned by the installation user, we create the directory and change the
 # ownership to the proper user.
@@ -827,6 +841,7 @@
         [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && return 0
         [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && return 0
         [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && return 0
+        [[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && return 0
         [[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && return 0
         [[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && return 0
         [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && return 0
@@ -982,7 +997,7 @@
 
 # Wrapper for ``pip install`` to set cache and proxy environment variables
 # Uses globals ``OFFLINE``, ``PIP_DOWNLOAD_CACHE``, ``PIP_USE_MIRRORS``,
-#   ``TRACK_DEPENDS``, ``*_proxy`
+# ``TRACK_DEPENDS``, ``*_proxy``
 # pip_install package [package ...]
 function pip_install {
     [[ "$OFFLINE" = "True" || -z "$@" ]] && return
@@ -1011,8 +1026,7 @@
     # /tmp/$USER-pip-build.  Even if a later component specifies foo <
     # 1.1, the existing extracted build will be used and cause
     # confusing errors.  By creating unique build directories we avoid
-    # this problem. See
-    #  https://github.com/pypa/pip/issues/709
+    # this problem. See https://github.com/pypa/pip/issues/709
     local pip_build_tmp=$(mktemp --tmpdir -d pip-build.XXXXX)
 
     $SUDO_PIP PIP_DOWNLOAD_CACHE=${PIP_DOWNLOAD_CACHE:-/var/cache/pip} \
@@ -1146,8 +1160,8 @@
 }
 
 
-# Helper to remove the *.failure files under $SERVICE_DIR/$SCREEN_NAME
-# This is used for service_check when all the screen_it are called finished
+# Helper to remove the ``*.failure`` files under ``$SERVICE_DIR/$SCREEN_NAME``.
+# This is used for ``service_check`` when all the ``screen_it`` are called finished
 # init_service_check
 function init_service_check() {
     SCREEN_NAME=${SCREEN_NAME:-stack}
@@ -1237,7 +1251,11 @@
 
 # ``pip install -e`` the package, which processes the dependencies
 # using pip before running `setup.py develop`
-# Uses globals ``STACK_USER``, ``TRACK_DEPENDS``, ``REQUIREMENTS_DIR``
+#
+# Updates the dependencies in project_dir from the
+# openstack/requirements global list before installing anything.
+#
+# Uses globals ``TRACK_DEPENDS``, ``REQUIREMENTS_DIR``
 # setup_develop directory
 function setup_develop() {
     local project_dir=$1
@@ -1253,14 +1271,33 @@
             $SUDO_CMD python update.py $project_dir)
     fi
 
+    setup_develop_no_requirements_update $project_dir
+
+    # We've just gone and possibly modified the user's source tree in an
+    # automated way, which is considered bad form if it's a development
+    # tree because we've screwed up their next git checkin. So undo it.
+    #
+    # However... there are some circumstances, like running in the gate
+    # where we really really want the overridden version to stick. So provide
+    # a variable that tells us whether or not we should UNDO the requirements
+    # changes (this will be set to False in the OpenStack ci gate)
+    if [ $UNDO_REQUIREMENTS = "True" ]; then
+        if [ $update_requirements -eq 0 ]; then
+            (cd $project_dir && git reset --hard)
+        fi
+    fi
+}
+
+# ``pip install -e`` the package, which processes the dependencies
+# using pip before running `setup.py develop`
+# Uses globals ``STACK_USER``
+# setup_develop_no_requirements_update directory
+function setup_develop_no_requirements_update() {
+    local project_dir=$1
+
     pip_install -e $project_dir
     # ensure that further actions can do things like setup.py sdist
     safe_chown -R $STACK_USER $1/*.egg-info
-
-    # Undo requirements changes, if we made them
-    if [ $update_requirements -eq 0 ]; then
-        (cd $project_dir && git checkout -- requirements.txt test-requirements.txt setup.py)
-    fi
 }
 
 
@@ -1301,10 +1338,12 @@
 }
 
 
-# Retrieve an image from a URL and upload into Glance
+# Retrieve an image from a URL and upload into Glance.
 # Uses the following variables:
-#   ``FILES`` must be set to the cache dir
-#   ``GLANCE_HOSTPORT``
+#
+# - ``FILES`` must be set to the cache dir
+# - ``GLANCE_HOSTPORT``
+#
 # upload_image image-url glance-token
 function upload_image() {
     local image_url=$1
@@ -1312,12 +1351,24 @@
 
     # Create a directory for the downloaded image tarballs.
     mkdir -p $FILES/images
-
-    # Downloads the image (uec ami+aki style), then extracts it.
     IMAGE_FNAME=`basename "$image_url"`
-    if [[ ! -f $FILES/$IMAGE_FNAME || "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then
-        wget -c $image_url -O $FILES/$IMAGE_FNAME
-        if [[ $? -ne 0 ]]; then
+    if [[ $image_url != file* ]]; then
+        # Downloads the image (uec ami+aki style), then extracts it.
+        if [[ ! -f $FILES/$IMAGE_FNAME || "$(stat -c "%s" $FILES/$IMAGE_FNAME)" = "0" ]]; then
+             wget -c $image_url -O $FILES/$IMAGE_FNAME
+             if [[ $? -ne 0 ]]; then
+                 echo "Not found: $image_url"
+                 return
+             fi
+        fi
+        IMAGE="$FILES/${IMAGE_FNAME}"
+    else
+        # File based URL (RFC 1738): file://host/path
+        # Remote files are not considered here.
+        # *nix: file:///home/user/path/file
+        # windows: file:///C:/Documents%20and%20Settings/user/path/file
+        IMAGE=$(echo $image_url | sed "s/^file:\/\///g")
+        if [[ ! -f $IMAGE || "$(stat -c "%s" $IMAGE)" == "0" ]]; then
             echo "Not found: $image_url"
             return
         fi
@@ -1325,7 +1376,6 @@
 
     # OpenVZ-format images are provided as .tar.gz, but not decompressed prior to loading
     if [[ "$image_url" =~ 'openvz' ]]; then
-        IMAGE="$FILES/${IMAGE_FNAME}"
         IMAGE_NAME="${IMAGE_FNAME%.tar.gz}"
         glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format ami --disk-format ami < "${IMAGE}"
         return
@@ -1333,26 +1383,130 @@
 
     # vmdk format images
     if [[ "$image_url" =~ '.vmdk' ]]; then
-        IMAGE="$FILES/${IMAGE_FNAME}"
         IMAGE_NAME="${IMAGE_FNAME%.vmdk}"
 
         # Before we can upload vmdk type images to glance, we need to know it's
         # disk type, storage adapter, and networking adapter. These values are
-        # passed to glance as custom properties. We take these values from the
+        # passed to glance as custom properties.
+        # We take these values from the vmdk file if populated. Otherwise, we use
         # vmdk filename, which is expected in the following format:
         #
-        #     <name>-<disk type>:<storage adapter>:<network adapter>
+        #     <name>-<disk type>;<storage adapter>;<network adapter>
         #
         # If the filename does not follow the above format then the vsphere
         # driver will supply default values.
-        property_string=`echo "$IMAGE_NAME" | grep -oP '(?<=-)(?!.*-).+:.+:.+$'`
-        if [[ ! -z "$property_string" ]]; then
-            IFS=':' read -a props <<< "$property_string"
-            vmdk_disktype="${props[0]}"
-            vmdk_adapter_type="${props[1]}"
-            vmdk_net_adapter="${props[2]}"
+
+        vmdk_adapter_type=""
+        vmdk_disktype=""
+        vmdk_net_adapter=""
+
+        # vmdk adapter type
+        vmdk_adapter_type="$(head -25 $IMAGE | grep -a -F -m 1 'ddb.adapterType =' $IMAGE)"
+        vmdk_adapter_type="${vmdk_adapter_type#*\"}"
+        vmdk_adapter_type="${vmdk_adapter_type%?}"
+
+        # vmdk disk type
+        vmdk_create_type="$(head -25 $IMAGE | grep -a -F -m 1 'createType=' $IMAGE)"
+        vmdk_create_type="${vmdk_create_type#*\"}"
+        vmdk_create_type="${vmdk_create_type%?}"
+
+        descriptor_data_pair_msg="Monolithic flat and VMFS disks "`
+                                 `"should use a descriptor-data pair."
+        if [[ "$vmdk_create_type" = "monolithicSparse" ]]; then
+            vmdk_disktype="sparse"
+        elif [[ "$vmdk_create_type" = "monolithicFlat" || \
+        "$vmdk_create_type" = "vmfs" ]]; then
+            # Attempt to retrieve the *-flat.vmdk
+            flat_fname="$(head -25 $IMAGE | grep -G 'RW\|RDONLY [0-9]+ FLAT\|VMFS' $IMAGE)"
+            flat_fname="${flat_fname#*\"}"
+            flat_fname="${flat_fname%?}"
+            if [[ -z "$flat_name" ]]; then
+                flat_fname="$IMAGE_NAME-flat.vmdk"
+            fi
+            path_len=`expr ${#image_url} - ${#IMAGE_FNAME}`
+            flat_url="${image_url:0:$path_len}$flat_fname"
+            warn $LINENO "$descriptor_data_pair_msg"`
+                         `" Attempt to retrieve the *-flat.vmdk: $flat_url"
+            if [[ $flat_url != file* ]]; then
+                if [[ ! -f $FILES/$flat_fname || \
+                "$(stat -c "%s" $FILES/$flat_fname)" = "0" ]]; then
+                    wget -c $flat_url -O $FILES/$flat_fname
+                    if [[ $? -ne 0 ]]; then
+                        echo "Flat disk not found: $flat_url"
+                        flat_found=false
+                    fi
+                fi
+                if $flat_found; then
+                    IMAGE="$FILES/${flat_fname}"
+                fi
+            else
+                IMAGE=$(echo $flat_url | sed "s/^file:\/\///g")
+                if [[ ! -f $IMAGE || "$(stat -c "%s" $IMAGE)" == "0" ]]; then
+                    echo "Flat disk not found: $flat_url"
+                    flat_found=false
+                fi
+                if ! $flat_found; then
+                    IMAGE=$(echo $image_url | sed "s/^file:\/\///g")
+                fi
+            fi
+            if $flat_found; then
+                IMAGE_NAME="${flat_fname}"
+            fi
+            vmdk_disktype="preallocated"
+        elif [[ -z "$vmdk_create_type" ]]; then
+            # *-flat.vmdk provided: attempt to retrieve the descriptor (*.vmdk)
+            # to retrieve appropriate metadata
+            if [[ ${IMAGE_NAME: -5} != "-flat" ]]; then
+                warn $LINENO "Expected filename suffix: '-flat'."`
+                            `" Filename provided: ${IMAGE_NAME}"
+            else
+                descriptor_fname="${IMAGE_NAME:0:${#IMAGE_NAME} - 5}.vmdk"
+                path_len=`expr ${#image_url} - ${#IMAGE_FNAME}`
+                flat_path="${image_url:0:$path_len}"
+                descriptor_url=$flat_path$descriptor_fname
+                warn $LINENO "$descriptor_data_pair_msg"`
+                             `" Attempt to retrieve the descriptor *.vmdk: $descriptor_url"
+                if [[ $flat_path != file* ]]; then
+                    if [[ ! -f $FILES/$descriptor_fname || \
+                    "$(stat -c "%s" $FILES/$descriptor_fname)" = "0" ]]; then
+                        wget -c $descriptor_url -O $FILES/$descriptor_fname
+                        if [[ $? -ne 0 ]]; then
+                            warn $LINENO "Descriptor not found $descriptor_url"
+                            descriptor_found=false
+                        fi
+                    fi
+                    descriptor_url="$FILES/$descriptor_fname"
+                else
+                    descriptor_url=$(echo $descriptor_url | sed "s/^file:\/\///g")
+                    if [[ ! -f $descriptor_url || \
+                    "$(stat -c "%s" $descriptor_url)" == "0" ]]; then
+                         warn $LINENO "Descriptor not found $descriptor_url"
+                         descriptor_found=false
+                    fi
+                fi
+                if $descriptor_found; then
+                    vmdk_adapter_type="$(head -25 $descriptor_url |"`
+                    `"grep -a -F -m 1 'ddb.adapterType =' $descriptor_url)"
+                    vmdk_adapter_type="${vmdk_adapter_type#*\"}"
+                    vmdk_adapter_type="${vmdk_adapter_type%?}"
+                 fi
+             fi
+             #TODO(alegendre): handle streamOptimized once supported by the VMware driver.
+             vmdk_disktype="preallocated"
+        else
+            #TODO(alegendre): handle streamOptimized once supported by the VMware driver.
+            vmdk_disktype="preallocated"
         fi
 
+        # NOTE: For backwards compatibility reasons, colons may be used in place
+        # of semi-colons for property delimiters but they are not permitted
+        # characters in NTFS filesystems.
+        property_string=`echo "$IMAGE_NAME" | grep -oP '(?<=-)(?!.*-).+[:;].+[:;].+$'`
+        IFS=':;' read -a props <<< "$property_string"
+        vmdk_disktype="${props[0]:-$vmdk_disktype}"
+        vmdk_adapter_type="${props[1]:-$vmdk_adapter_type}"
+        vmdk_net_adapter="${props[2]:-$vmdk_net_adapter}"
+
         glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format bare --disk-format vmdk --property vmware_disktype="$vmdk_disktype" --property vmware_adaptertype="$vmdk_adapter_type" --property hw_vif_model="$vmdk_net_adapter" < "${IMAGE}"
         return
     fi
@@ -1360,7 +1514,6 @@
     # XenServer-vhd-ovf-format images are provided as .vhd.tgz
     # and should not be decompressed prior to loading
     if [[ "$image_url" =~ '.vhd.tgz' ]]; then
-        IMAGE="$FILES/${IMAGE_FNAME}"
         IMAGE_NAME="${IMAGE_FNAME%.vhd.tgz}"
         glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format=ovf --disk-format=vhd < "${IMAGE}"
         return
@@ -1370,7 +1523,6 @@
     # and should not be decompressed prior to loading.
     # Setting metadata, so PV mode is used.
     if [[ "$image_url" =~ '.xen-raw.tgz' ]]; then
-        IMAGE="$FILES/${IMAGE_FNAME}"
         IMAGE_NAME="${IMAGE_FNAME%.xen-raw.tgz}"
         glance \
             --os-auth-token $token \
@@ -1408,7 +1560,6 @@
             fi
             ;;
         *.img)
-            IMAGE="$FILES/$IMAGE_FNAME";
             IMAGE_NAME=$(basename "$IMAGE" ".img")
             format=$(qemu-img info ${IMAGE} | awk '/^file format/ { print $3; exit }')
             if [[ ",qcow2,raw,vdi,vmdk,vpc," =~ ",$format," ]]; then
@@ -1419,20 +1570,17 @@
             CONTAINER_FORMAT=bare
             ;;
         *.img.gz)
-            IMAGE="$FILES/${IMAGE_FNAME}"
             IMAGE_NAME=$(basename "$IMAGE" ".img.gz")
             DISK_FORMAT=raw
             CONTAINER_FORMAT=bare
             UNPACK=zcat
             ;;
         *.qcow2)
-            IMAGE="$FILES/${IMAGE_FNAME}"
             IMAGE_NAME=$(basename "$IMAGE" ".qcow2")
             DISK_FORMAT=qcow2
             CONTAINER_FORMAT=bare
             ;;
         *.iso)
-            IMAGE="$FILES/${IMAGE_FNAME}"
             IMAGE_NAME=$(basename "$IMAGE" ".iso")
             DISK_FORMAT=iso
             CONTAINER_FORMAT=bare
@@ -1466,7 +1614,8 @@
 # When called from stackrc/localrc DATABASE_BACKENDS has not been
 # initialized yet, just save the configuration selection and call back later
 # to validate it.
-#  $1 The name of the database backend to use (mysql, postgresql, ...)
+#
+# ``$1`` - the name of the database backend to use (mysql, postgresql, ...)
 function use_database {
     if [[ -z "$DATABASE_BACKENDS" ]]; then
         # No backends registered means this is likely called from ``localrc``
@@ -1507,7 +1656,7 @@
 
 
 # Wrapper for ``yum`` to set proxy environment variables
-# Uses globals ``OFFLINE``, ``*_proxy`
+# Uses globals ``OFFLINE``, ``*_proxy``
 # yum_install package [package ...]
 function yum_install() {
     [[ "$OFFLINE" = "True" ]] && return
diff --git a/lib/apache b/lib/apache
index 3a1f6f1..8ae78b2 100644
--- a/lib/apache
+++ b/lib/apache
@@ -2,15 +2,20 @@
 # Functions to control configuration and operation of apache web server
 
 # Dependencies:
-# ``functions`` file
-# is_apache_enabled_service
-# install_apache_wsgi
-# config_apache_wsgi
-# enable_apache_site
-# disable_apache_site
-# start_apache_server
-# stop_apache_server
-# restart_apache_server
+#
+# - ``functions`` file
+# -``STACK_USER`` must be defined
+
+# lib/apache exports the following functions:
+#
+# - is_apache_enabled_service
+# - install_apache_wsgi
+# - config_apache_wsgi
+# - enable_apache_site
+# - disable_apache_site
+# - start_apache_server
+# - stop_apache_server
+# - restart_apache_server
 
 # Save trace setting
 XTRACE=$(set +o | grep xtrace)
@@ -18,7 +23,7 @@
 
 # Allow overriding the default Apache user and group, default to
 # current user and his default group.
-APACHE_USER=${APACHE_USER:-$USER}
+APACHE_USER=${APACHE_USER:-$STACK_USER}
 APACHE_GROUP=${APACHE_GROUP:-$(id -gn $APACHE_USER)}
 
 
@@ -116,6 +121,7 @@
 # Restore xtrace
 $XTRACE
 
-# Local variables:
-# mode: shell-script
-# End:
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/baremetal b/lib/baremetal
index 141c28d..a0df85e 100644
--- a/lib/baremetal
+++ b/lib/baremetal
@@ -1,19 +1,19 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
+## vim: tabstop=4 shiftwidth=4 softtabstop=4
 
-# Copyright (c) 2012 Hewlett-Packard Development Company, L.P.
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
+## Copyright (c) 2012 Hewlett-Packard Development Company, L.P.
+## All Rights Reserved.
+##
+##    Licensed under the Apache License, Version 2.0 (the "License"); you may
+##    not use this file except in compliance with the License. You may obtain
+##    a copy of the License at
+##
+##         http://www.apache.org/licenses/LICENSE-2.0
+##
+##    Unless required by applicable law or agreed to in writing, software
+##    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+##    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+##    License for the specific language governing permissions and limitations
+##    under the License.
 
 
 # This file provides devstack with the environment and utilities to
@@ -24,7 +24,8 @@
 # control physical hardware resources on the same network, if you know
 # the MAC address(es) and IPMI credentials.
 #
-# At a minimum, to enable the baremetal driver, you must set these in loclarc:
+# At a minimum, to enable the baremetal driver, you must set these in localrc:
+#
 #    VIRT_DRIVER=baremetal
 #    ENABLED_SERVICES="$ENABLED_SERVICES,baremetal"
 #
@@ -38,11 +39,13 @@
 # Below that, various functions are defined, which are called by devstack
 # in the following order:
 #
-#  before nova-cpu starts:
+# before nova-cpu starts:
+#
 #  - prepare_baremetal_toolchain
 #  - configure_baremetal_nova_dirs
 #
-#  after nova and glance have started:
+# after nova and glance have started:
+#
 #  - build_and_upload_baremetal_deploy_k_and_r $token
 #  - create_baremetal_flavor $BM_DEPLOY_KERNEL_ID $BM_DEPLOY_RAMDISK_ID
 #  - upload_baremetal_image $url $token
@@ -58,11 +61,13 @@
 # -------------------
 
 # sub-driver to use for kernel deployment
-#  - nova.virt.baremetal.pxe.PXE
-#  - nova.virt.baremetal.tilera.TILERA
+#
+# - nova.virt.baremetal.pxe.PXE
+# - nova.virt.baremetal.tilera.TILERA
 BM_DRIVER=${BM_DRIVER:-nova.virt.baremetal.pxe.PXE}
 
 # sub-driver to use for remote power management
+#
 # - nova.virt.baremetal.fake.FakePowerManager, for manual power control
 # - nova.virt.baremetal.ipmi.IPMI, for remote IPMI
 # - nova.virt.baremetal.tilera_pdu.Pdu, for TilePro hardware
@@ -83,10 +88,12 @@
 # To provide PXE, configure nova-network's dnsmasq rather than run the one
 # dedicated to baremetal. When enable this, make sure these conditions are
 # fulfilled:
-#  1) nova-compute and nova-network runs on the same host
-#  2) nova-network uses FlatDHCPManager
+#
+# 1) nova-compute and nova-network runs on the same host
+# 2) nova-network uses FlatDHCPManager
+#
 # NOTE: the other BM_DNSMASQ_* have no effect on the behavior if this option
-#       is enabled.
+# is enabled.
 BM_DNSMASQ_FROM_NOVA_NETWORK=`trueorfalse False $BM_DNSMASQ_FROM_NOVA_NETWORK`
 
 # BM_DNSMASQ_IFACE should match FLAT_NETWORK_BRIDGE
@@ -103,9 +110,9 @@
 # BM_DNSMASQ_DNS provide dns server to bootstrap clients
 BM_DNSMASQ_DNS=${BM_DNSMASQ_DNS:-}
 
-# BM_FIRST_MAC *must* be set to the MAC address of the node you will boot.
-#              This is passed to dnsmasq along with the kernel/ramdisk to
-#              deploy via PXE.
+# BM_FIRST_MAC *must* be set to the MAC address of the node you will
+# boot.  This is passed to dnsmasq along with the kernel/ramdisk to
+# deploy via PXE.
 BM_FIRST_MAC=${BM_FIRST_MAC:-}
 
 # BM_SECOND_MAC is only important if the host has >1 NIC.
@@ -119,9 +126,9 @@
 BM_PM_USER=${BM_PM_USER:-user}
 BM_PM_PASS=${BM_PM_PASS:-pass}
 
-# BM_FLAVOR_* options are arbitrary and not necessarily related to physical
-#             hardware capacity. These can be changed if you are testing
-#             BaremetalHostManager with multiple nodes and different flavors.
+# BM_FLAVOR_* options are arbitrary and not necessarily related to
+# physical hardware capacity. These can be changed if you are testing
+# BaremetalHostManager with multiple nodes and different flavors.
 BM_CPU_ARCH=${BM_CPU_ARCH:-x86_64}
 BM_FLAVOR_CPU=${BM_FLAVOR_CPU:-1}
 BM_FLAVOR_RAM=${BM_FLAVOR_RAM:-1024}
@@ -198,8 +205,8 @@
     BM_FIRST_MAC=$(sudo $bm_poseur get-macs)
 
     # NOTE: there is currently a limitation in baremetal driver
-    #       that requires second MAC even if it is not used.
-    #       Passing a fake value allows this to work.
+    # that requires second MAC even if it is not used.
+    # Passing a fake value allows this to work.
     # TODO(deva): remove this after driver issue is fixed.
     BM_SECOND_MAC='12:34:56:78:90:12'
 }
@@ -286,8 +293,8 @@
 
 }
 
-# pull run-time kernel/ramdisk out of disk image and load into glance
-# note that $file is currently expected to be in qcow2 format
+# Pull run-time kernel/ramdisk out of disk image and load into glance.
+# Note that $file is currently expected to be in qcow2 format.
 # Sets KERNEL_ID and RAMDISK_ID
 #
 # Usage: extract_and_upload_k_and_r_from_image $token $file
@@ -430,7 +437,7 @@
     done
 }
 
-# inform nova-baremetal about nodes, MACs, etc
+# Inform nova-baremetal about nodes, MACs, etc.
 # Defaults to using BM_FIRST_MAC and BM_SECOND_MAC if parameters not specified
 #
 # Usage: add_baremetal_node <first_mac> <second_mac>
@@ -459,6 +466,7 @@
 # Restore xtrace
 $XTRACE
 
-# Local variables:
-# mode: shell-script
-# End:
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/ceilometer b/lib/ceilometer
index cd4c4d8..8e2970c 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -2,12 +2,15 @@
 # Install and start **Ceilometer** service
 
 # To enable a minimal set of Ceilometer services, add the following to localrc:
+#
 #   enable_service ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api
 #
 # To ensure Ceilometer alarming services are enabled also, further add to the localrc:
+#
 #   enable_service ceilometer-alarm-notifier ceilometer-alarm-evaluator
 
 # Dependencies:
+#
 # - functions
 # - OS_AUTH_URL for auth in api
 # - DEST set to the destination directory
@@ -16,12 +19,12 @@
 
 # stack.sh
 # ---------
-# install_ceilometer
-# configure_ceilometer
-# init_ceilometer
-# start_ceilometer
-# stop_ceilometer
-# cleanup_ceilometer
+# - install_ceilometer
+# - configure_ceilometer
+# - init_ceilometer
+# - start_ceilometer
+# - stop_ceilometer
+# - cleanup_ceilometer
 
 # Save trace setting
 XTRACE=$(set +o | grep xtrace)
@@ -64,10 +67,10 @@
     setup_develop $CEILOMETER_DIR
 
     [ ! -d $CEILOMETER_CONF_DIR ] && sudo mkdir -m 755 -p $CEILOMETER_CONF_DIR
-    sudo chown $USER $CEILOMETER_CONF_DIR
+    sudo chown $STACK_USER $CEILOMETER_CONF_DIR
 
     [ ! -d $CEILOMETER_API_LOG_DIR ] &&  sudo mkdir -m 755 -p $CEILOMETER_API_LOG_DIR
-    sudo chown $USER $CEILOMETER_API_LOG_DIR
+    sudo chown $STACK_USER $CEILOMETER_API_LOG_DIR
 
     iniset_rpc_backend ceilometer $CEILOMETER_CONF DEFAULT
 
@@ -79,6 +82,10 @@
     cp $CEILOMETER_DIR/etc/ceilometer/pipeline.yaml $CEILOMETER_CONF_DIR
     iniset $CEILOMETER_CONF DEFAULT policy_file $CEILOMETER_CONF_DIR/policy.json
 
+    if [ "$CEILOMETER_PIPELINE_INTERVAL" ]; then
+        sed -i "s/interval:.*/interval: ${CEILOMETER_PIPELINE_INTERVAL}/" $CEILOMETER_CONF_DIR/pipeline.yaml
+    fi
+
     # the compute and central agents need these credentials in order to
     # call out to the public nova and glance APIs
     iniset $CEILOMETER_CONF DEFAULT os_username ceilometer
@@ -91,7 +98,7 @@
     iniset $CEILOMETER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
     iniset $CEILOMETER_CONF keystone_authtoken signing_dir $CEILOMETER_AUTH_CACHE_DIR
 
-    if [[ "$CEILOMETER_BACKEND" = 'mysql' ]]; then
+    if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then
         iniset $CEILOMETER_CONF database connection `database_connection_url ceilometer`
     else
         iniset $CEILOMETER_CONF database connection mongodb://localhost:27017/ceilometer
@@ -116,7 +123,7 @@
     sudo chown $STACK_USER $CEILOMETER_AUTH_CACHE_DIR
     rm -f $CEILOMETER_AUTH_CACHE_DIR/*
 
-    if [[ "$CEILOMETER_BACKEND" = 'mysql' ]]; then
+    if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then
         recreate_database ceilometer utf8
         $CEILOMETER_BIN_DIR/ceilometer-dbsync
     fi
@@ -134,10 +141,18 @@
 
 # start_ceilometer() - Start running processes, including screen
 function start_ceilometer() {
-    screen_it ceilometer-acompute "cd ; sg $LIBVIRT_GROUP \"ceilometer-agent-compute --config-file $CEILOMETER_CONF\""
+    if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
+        screen_it ceilometer-acompute "cd ; sg $LIBVIRT_GROUP \"ceilometer-agent-compute --config-file $CEILOMETER_CONF\""
+    fi
     screen_it ceilometer-acentral "cd ; ceilometer-agent-central --config-file $CEILOMETER_CONF"
     screen_it ceilometer-collector "cd ; ceilometer-collector --config-file $CEILOMETER_CONF"
     screen_it ceilometer-api "cd ; ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF"
+
+    echo "Waiting for ceilometer-api to start..."
+    if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://localhost:8777/v2/ >/dev/null; do sleep 1; done"; then
+        die $LINENO "ceilometer-api did not start"
+    fi
+
     screen_it ceilometer-alarm-notifier "cd ; ceilometer-alarm-notifier --config-file $CEILOMETER_CONF"
     screen_it ceilometer-alarm-evaluator "cd ; ceilometer-alarm-evaluator --config-file $CEILOMETER_CONF"
 }
@@ -154,6 +169,7 @@
 # Restore xtrace
 $XTRACE
 
-# Local variables:
-# mode: shell-script
-# End:
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/cinder b/lib/cinder
index f6f137c..9288685 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -2,19 +2,20 @@
 # Install and start **Cinder** volume service
 
 # Dependencies:
+#
 # - functions
 # - DEST, DATA_DIR, STACK_USER must be defined
-# SERVICE_{TENANT_NAME|PASSWORD} must be defined
-# ``KEYSTONE_TOKEN_FORMAT`` must be defined
+# - SERVICE_{TENANT_NAME|PASSWORD} must be defined
+# - ``KEYSTONE_TOKEN_FORMAT`` must be defined
 
 # stack.sh
 # ---------
-# install_cinder
-# configure_cinder
-# init_cinder
-# start_cinder
-# stop_cinder
-# cleanup_cinder
+# - install_cinder
+# - configure_cinder
+# - init_cinder
+# - start_cinder
+# - stop_cinder
+# - cleanup_cinder
 
 # Save trace setting
 XTRACE=$(set +o | grep xtrace)
@@ -82,7 +83,8 @@
 # Functions
 # ---------
 # _clean_lvm_lv removes all cinder LVM volumes
-# _clean_lvm_lv $VOLUME_GROUP $VOLUME_NAME_PREFIX
+#
+# Usage: _clean_lvm_lv $VOLUME_GROUP $VOLUME_NAME_PREFIX
 function _clean_lvm_lv() {
     local vg=$1
     local lv_prefix=$2
@@ -98,7 +100,8 @@
 
 # _clean_lvm_backing_file() removes the backing file of the
 # volume group used by cinder
-# _clean_lvm_backing_file() $VOLUME_GROUP
+#
+# Usage: _clean_lvm_backing_file() $VOLUME_GROUP
 function _clean_lvm_backing_file() {
     local vg=$1
 
@@ -196,7 +199,7 @@
     fi
 
     TEMPFILE=`mktemp`
-    echo "$USER ALL=(root) NOPASSWD: $ROOTWRAP_CINDER_SUDOER_CMD" >$TEMPFILE
+    echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_CINDER_SUDOER_CMD" >$TEMPFILE
     chmod 0440 $TEMPFILE
     sudo chown root:root $TEMPFILE
     sudo mv $TEMPFILE /etc/sudoers.d/cinder-rootwrap
@@ -206,6 +209,7 @@
     inicomment $CINDER_API_PASTE_INI filter:authtoken auth_host
     inicomment $CINDER_API_PASTE_INI filter:authtoken auth_port
     inicomment $CINDER_API_PASTE_INI filter:authtoken auth_protocol
+    inicomment $CINDER_API_PASTE_INI filter:authtoken cafile
     inicomment $CINDER_API_PASTE_INI filter:authtoken admin_tenant_name
     inicomment $CINDER_API_PASTE_INI filter:authtoken admin_user
     inicomment $CINDER_API_PASTE_INI filter:authtoken admin_password
@@ -216,6 +220,7 @@
     iniset $CINDER_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST
     iniset $CINDER_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT
     iniset $CINDER_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
+    iniset $CINDER_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA
     iniset $CINDER_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
     iniset $CINDER_CONF keystone_authtoken admin_user cinder
     iniset $CINDER_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
@@ -281,6 +286,11 @@
             iniset $CINDER_CONF DEFAULT xenapi_nfs_server "$CINDER_XENAPI_NFS_SERVER"
             iniset $CINDER_CONF DEFAULT xenapi_nfs_serverpath "$CINDER_XENAPI_NFS_SERVERPATH"
         )
+    elif [ "$CINDER_DRIVER" == "nfs" ]; then
+        iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.nfs.NfsDriver"
+        iniset $CINDER_CONF DEFAULT nfs_shares_config "$CINDER_CONF_DIR/nfs_shares.conf"
+        echo "$CINDER_NFS_SERVERPATH" | sudo tee "$CINDER_CONF_DIR/nfs_shares.conf"
+        sudo chmod 666 $CINDER_CONF_DIR/nfs_shares.conf
     elif [ "$CINDER_DRIVER" == "sheepdog" ]; then
         iniset $CINDER_CONF DEFAULT volume_driver "cinder.volume.drivers.sheepdog.SheepdogDriver"
     elif [ "$CINDER_DRIVER" == "glusterfs" ]; then
@@ -546,6 +556,7 @@
 # Restore xtrace
 $XTRACE
 
-# Local variables:
-# mode: shell-script
-# End:
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/database b/lib/database
index 3c15609..0661049 100644
--- a/lib/database
+++ b/lib/database
@@ -9,10 +9,11 @@
 
 # This is a wrapper for the specific database backends available.
 # Each database must implement four functions:
-#   recreate_database_$DATABASE_TYPE
-#   install_database_$DATABASE_TYPE
-#   configure_database_$DATABASE_TYPE
-#   database_connection_url_$DATABASE_TYPE
+#
+# - recreate_database_$DATABASE_TYPE
+# - install_database_$DATABASE_TYPE
+# - configure_database_$DATABASE_TYPE
+# - database_connection_url_$DATABASE_TYPE
 #
 # and call register_database $DATABASE_TYPE
 
@@ -22,7 +23,9 @@
 
 
 # Register a database backend
-#  $1 The name of the database backend
+#
+#   $1 The name of the database backend
+#
 # This is required to be defined before the specific database scripts are sourced
 function register_database {
     [ -z "$DATABASE_BACKENDS" ] && DATABASE_BACKENDS=$1 || DATABASE_BACKENDS+=" $1"
@@ -121,6 +124,7 @@
 # Restore xtrace
 $XTRACE
 
-# Local variables:
-# mode: shell-script
-# End:
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/databases/mysql b/lib/databases/mysql
index 41e3236..0eb8fdd 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -2,7 +2,8 @@
 # Functions to control the configuration and operation of the **MySQL** database backend
 
 # Dependencies:
-# DATABASE_{HOST,USER,PASSWORD} must be defined
+#
+# - DATABASE_{HOST,USER,PASSWORD} must be defined
 
 # Save trace setting
 MY_XTRACE=$(set +o | grep xtrace)
diff --git a/lib/databases/postgresql b/lib/databases/postgresql
index b173772..519479a 100644
--- a/lib/databases/postgresql
+++ b/lib/databases/postgresql
@@ -2,7 +2,8 @@
 # Functions to control the configuration and operation of the **PostgreSQL** database backend
 
 # Dependencies:
-# DATABASE_{HOST,USER,PASSWORD} must be defined
+#
+# - DATABASE_{HOST,USER,PASSWORD} must be defined
 
 # Save trace setting
 PG_XTRACE=$(set +o | grep xtrace)
diff --git a/lib/glance b/lib/glance
index 75e3dd0..2e29a8f 100644
--- a/lib/glance
+++ b/lib/glance
@@ -2,20 +2,21 @@
 # Functions to control the configuration and operation of the **Glance** service
 
 # Dependencies:
-# ``functions`` file
-# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
-# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined
-# ``SERVICE_HOST``
-# ``KEYSTONE_TOKEN_FORMAT`` must be defined
+#
+# - ``functions`` file
+# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
+# - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined
+# - ``SERVICE_HOST``
+# - ``KEYSTONE_TOKEN_FORMAT`` must be defined
 
 # ``stack.sh`` calls the entry points in this order:
 #
-# install_glance
-# configure_glance
-# init_glance
-# start_glance
-# stop_glance
-# cleanup_glance
+# - install_glance
+# - configure_glance
+# - init_glance
+# - start_glance
+# - stop_glance
+# - cleanup_glance
 
 # Save trace setting
 XTRACE=$(set +o | grep xtrace)
@@ -81,6 +82,7 @@
     iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST
     iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT
     iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
+    iniset $GLANCE_REGISTRY_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA
     iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
     iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
     iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_user glance
@@ -98,6 +100,7 @@
     iniset $GLANCE_API_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST
     iniset $GLANCE_API_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT
     iniset $GLANCE_API_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
+    iniset $GLANCE_API_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA
     iniset $GLANCE_API_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
     iniset $GLANCE_API_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
     iniset $GLANCE_API_CONF keystone_authtoken admin_user glance
@@ -171,7 +174,7 @@
     recreate_database glance utf8
 
     # Migrate glance database
-    $GLANCE_BIN_DIR/glance-manage db_sync
+    $GLANCE_BIN_DIR/glance-manage db sync
 
     create_glance_cache_dir
 }
@@ -209,6 +212,7 @@
 # Restore xtrace
 $XTRACE
 
-# Local variables:
-# mode: shell-script
-# End:
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/heat b/lib/heat
index 8acadb4..e44a618 100644
--- a/lib/heat
+++ b/lib/heat
@@ -2,21 +2,23 @@
 # Install and start **Heat** service
 
 # To enable, add the following to localrc
-# ENABLED_SERVICES+=,heat,h-api,h-api-cfn,h-api-cw,h-eng
+#
+#   ENABLED_SERVICES+=,heat,h-api,h-api-cfn,h-api-cw,h-eng
 
 # Dependencies:
+#
 # - functions
 
 # stack.sh
 # ---------
-# install_heatclient
-# install_heat
-# configure_heatclient
-# configure_heat
-# init_heat
-# start_heat
-# stop_heat
-# cleanup_heat
+# - install_heatclient
+# - install_heat
+# - configure_heatclient
+# - configure_heat
+# - init_heat
+# - start_heat
+# - stop_heat
+# - cleanup_heat
 
 # Save trace setting
 XTRACE=$(set +o | grep xtrace)
@@ -78,7 +80,7 @@
     iniset $HEAT_CONF DEFAULT heat_metadata_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT
     iniset $HEAT_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1/waitcondition
     iniset $HEAT_CONF DEFAULT heat_watch_server_url http://$HEAT_API_CW_HOST:$HEAT_API_CW_PORT
-    iniset $HEAT_CONF DEFAULT sql_connection `database_connection_url heat`
+    iniset $HEAT_CONF database connection `database_connection_url heat`
     iniset $HEAT_CONF DEFAULT auth_encryption_key `hexdump -n 16 -v -e '/1 "%02x"' /dev/random`
 
     # logging
@@ -94,6 +96,7 @@
     iniset $HEAT_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT
     iniset $HEAT_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
     iniset $HEAT_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0
+    iniset $HEAT_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA
     iniset $HEAT_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
     iniset $HEAT_CONF keystone_authtoken admin_user heat
     iniset $HEAT_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
@@ -118,9 +121,6 @@
     iniset $HEAT_CONF heat_api_cloudwatch bind_host $HEAT_API_CW_HOST
     iniset $HEAT_CONF heat_api_cloudwatch bind_port $HEAT_API_CW_PORT
 
-    # Set limits to match tempest defaults
-    iniset $HEAT_CONF DEFAULT max_template_size 10240
-
     # heat environment
     sudo mkdir -p $HEAT_ENV_DIR
     sudo chown $STACK_USER $HEAT_ENV_DIR
@@ -198,6 +198,7 @@
 # Restore xtrace
 $XTRACE
 
-# Local variables:
-# mode: shell-script
-# End:
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/horizon b/lib/horizon
index 63caf3c..5bff712 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -1,21 +1,20 @@
 # lib/horizon
 # Functions to control the configuration and operation of the horizon service
-# <do not include this template file in ``stack.sh``!>
 
 # Dependencies:
-# ``functions`` file
-# ``apache`` file
-# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined
-# <list other global vars that are assumed to be defined>
+#
+# - ``functions`` file
+# - ``apache`` file
+# - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined
 
 # ``stack.sh`` calls the entry points in this order:
 #
-# install_horizon
-# configure_horizon
-# init_horizon
-# start_horizon
-# stop_horizon
-# cleanup_horizon
+# - install_horizon
+# - configure_horizon
+# - init_horizon
+# - start_horizon
+# - stop_horizon
+# - cleanup_horizon
 
 # Save trace setting
 XTRACE=$(set +o | grep xtrace)
@@ -25,8 +24,6 @@
 # Defaults
 # --------
 
-# <define global variables here that belong to this project>
-
 # Set up default directories
 HORIZON_DIR=$DEST/horizon
 
@@ -115,7 +112,12 @@
     # Create an empty directory that apache uses as docroot
     sudo mkdir -p $HORIZON_DIR/.blackhole
 
+    # Apache 2.4 uses mod_authz_host for access control now (instead of "Allow")
     HORIZON_REQUIRE=''
+    if check_apache_version "2.4" ; then
+        HORIZON_REQUIRE='Require all granted'
+    fi
+
     local horizon_conf=/etc/$APACHE_NAME/$APACHE_CONF_DIR/horizon.conf
     if is_ubuntu; then
         # Clean up the old config name
@@ -124,11 +126,6 @@
         sudo touch $horizon_conf
         sudo a2ensite horizon.conf
     elif is_fedora; then
-        if [[ "$os_RELEASE" -ge "18" ]]; then
-            # fedora 18 has Require all denied  in its httpd.conf
-            # and requires explicit Require all granted
-            HORIZON_REQUIRE='Require all granted'
-        fi
         sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf
     elif is_suse; then
         : # nothing to do
@@ -156,15 +153,6 @@
     # Apache installation, because we mark it NOPRIME
     install_apache_wsgi
 
-    # NOTE(sdague) quantal changed the name of the node binary
-    if is_ubuntu; then
-        if [[ ! -e "/usr/bin/node" ]]; then
-            install_package nodejs-legacy
-        fi
-    elif is_fedora && [[ $DISTRO =~ (rhel6) || "$os_RELEASE" -ge "18" ]]; then
-        install_package nodejs
-    fi
-
     git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH $HORIZON_TAG
 }
 
@@ -183,6 +171,7 @@
 # Restore xtrace
 $XTRACE
 
-# Local variables:
-# mode: shell-script
-# End:
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/infra b/lib/infra
index 0b73259..0dcf0ad 100644
--- a/lib/infra
+++ b/lib/infra
@@ -5,12 +5,13 @@
 # requirements as a global list
 
 # Dependencies:
-# ``functions`` file
+#
+# - ``functions`` file
 
 # ``stack.sh`` calls the entry points in this order:
 #
-# unfubar_setuptools
-# install_infra
+# - unfubar_setuptools
+# - install_infra
 
 # Save trace setting
 XTRACE=$(set +o | grep xtrace)
@@ -51,6 +52,7 @@
 # Restore xtrace
 $XTRACE
 
-# Local variables:
-# mode: shell-script
-# End:
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/ironic b/lib/ironic
index 649c1c2..099746a 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -2,21 +2,21 @@
 # Functions to control the configuration and operation of the **Ironic** service
 
 # Dependencies:
-# ``functions`` file
-# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
-# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined
-# ``SERVICE_HOST``
-# ``KEYSTONE_TOKEN_FORMAT`` must be defined
+#
+# - ``functions`` file
+# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
+# - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined
+# - ``SERVICE_HOST``
+# - ``KEYSTONE_TOKEN_FORMAT`` must be defined
 
 # ``stack.sh`` calls the entry points in this order:
 #
-# install_ironic
-# install_ironicclient
-# configure_ironic
-# init_ironic
-# start_ironic
-# stop_ironic
-# cleanup_ironic
+# - install_ironic
+# - install_ironicclient
+# - init_ironic
+# - start_ironic
+# - stop_ironic
+# - cleanup_ironic
 
 # Save trace setting
 XTRACE=$(set +o | grep xtrace)
@@ -93,9 +93,12 @@
 # configure_ironic_api() - Is used by configure_ironic(). Performs
 # API specific configuration.
 function configure_ironic_api() {
+    iniset $IRONIC_CONF_FILE DEFAULT auth_strategy keystone
+    iniset $IRONIC_CONF_FILE DEFAULT policy_file $IRONIC_POLICY_JSON
     iniset $IRONIC_CONF_FILE keystone_authtoken auth_host $KEYSTONE_AUTH_HOST
     iniset $IRONIC_CONF_FILE keystone_authtoken auth_port $KEYSTONE_AUTH_PORT
     iniset $IRONIC_CONF_FILE keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
+    iniset $IRONIC_CONF_FILE keystone_authtoken cafile $KEYSTONE_SSL_CA
     iniset $IRONIC_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
     iniset $IRONIC_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
     iniset $IRONIC_CONF_FILE keystone_authtoken admin_user ironic
@@ -225,6 +228,7 @@
 # Restore xtrace
 $XTRACE
 
-# Local variables:
-# mode: shell-script
-# End:
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/keystone b/lib/keystone
old mode 100755
new mode 100644
index beddb1c..6d0c1cd
--- a/lib/keystone
+++ b/lib/keystone
@@ -2,25 +2,27 @@
 # Functions to control the configuration and operation of **Keystone**
 
 # Dependencies:
-# ``functions`` file
-# ``DEST``, ``STACK_USER``
-# ``IDENTITY_API_VERSION``
-# ``BASE_SQL_CONN``
-# ``SERVICE_HOST``, ``SERVICE_PROTOCOL``
-# ``SERVICE_TOKEN``
-# ``S3_SERVICE_PORT`` (template backend only)
+#
+# - ``functions`` file
+# - ``tls`` file
+# - ``DEST``, ``STACK_USER``
+# - ``IDENTITY_API_VERSION``
+# - ``BASE_SQL_CONN``
+# - ``SERVICE_HOST``, ``SERVICE_PROTOCOL``
+# - ``SERVICE_TOKEN``
+# - ``S3_SERVICE_PORT`` (template backend only)
 
 # ``stack.sh`` calls the entry points in this order:
 #
-# install_keystone
-# configure_keystone
-# _config_keystone_apache_wsgi
-# init_keystone
-# start_keystone
-# create_keystone_accounts
-# stop_keystone
-# cleanup_keystone
-# _cleanup_keystone_apache_wsgi
+# - install_keystone
+# - configure_keystone
+# - _config_keystone_apache_wsgi
+# - init_keystone
+# - start_keystone
+# - create_keystone_accounts
+# - stop_keystone
+# - cleanup_keystone
+# - _cleanup_keystone_apache_wsgi
 
 # Save trace setting
 XTRACE=$(set +o | grep xtrace)
@@ -78,6 +80,13 @@
 # valid assignment backends as per dir keystone/identity/backends
 KEYSTONE_VALID_ASSIGNMENT_BACKENDS=kvs,ldap,sql
 
+# if we are running with SSL use https protocols
+if is_ssl_enabled_service "key"; then
+    KEYSTONE_AUTH_PROTOCOL="https"
+    KEYSTONE_SERVICE_PROTOCOL="https"
+fi
+
+
 # Functions
 # ---------
 # cleanup_keystone() - Remove residual data files, anything left over from previous
@@ -125,6 +134,7 @@
 
     if [[ "$KEYSTONE_CONF_DIR" != "$KEYSTONE_DIR/etc" ]]; then
         cp -p $KEYSTONE_DIR/etc/keystone.conf.sample $KEYSTONE_CONF
+        chmod 600 $KEYSTONE_CONF
         cp -p $KEYSTONE_DIR/etc/policy.json $KEYSTONE_CONF_DIR
         if [[ -f "$KEYSTONE_DIR/etc/keystone-paste.ini" ]]; then
             cp -p "$KEYSTONE_DIR/etc/keystone-paste.ini" "$KEYSTONE_PASTE_INI"
@@ -170,6 +180,15 @@
     iniset $KEYSTONE_CONF DEFAULT public_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(public_port)s/"
     iniset $KEYSTONE_CONF DEFAULT admin_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(admin_port)s/"
 
+    # Register SSL certificates if provided
+    if is_ssl_enabled_service key; then
+        ensure_certificates KEYSTONE
+
+        iniset $KEYSTONE_CONF ssl enable True
+        iniset $KEYSTONE_CONF ssl certfile $KEYSTONE_SSL_CERT
+        iniset $KEYSTONE_CONF ssl keyfile $KEYSTONE_SSL_KEY
+    fi
+
     if is_service_enabled tls-proxy; then
         # Set the service ports for a proxy to take the originals
         iniset $KEYSTONE_CONF DEFAULT public_port $KEYSTONE_SERVICE_PORT_INT
@@ -177,7 +196,6 @@
     fi
 
     iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN"
-    iniset $KEYSTONE_CONF signing token_format "$KEYSTONE_TOKEN_FORMAT"
 
     if [[ "$KEYSTONE_TOKEN_FORMAT" = "UUID" ]]; then
         iniset $KEYSTONE_CONF token provider keystone.token.providers.uuid.Provider
@@ -188,6 +206,8 @@
 
     if [[ "$KEYSTONE_TOKEN_BACKEND" = "sql" ]]; then
         iniset $KEYSTONE_CONF token driver keystone.token.backends.sql.Token
+    elif [[ "$KEYSTONE_TOKEN_BACKEND" = "memcache" ]]; then
+        iniset $KEYSTONE_CONF token driver keystone.token.backends.memcache.Token
     else
         iniset $KEYSTONE_CONF token driver keystone.token.backends.kvs.Token
     fi
@@ -348,6 +368,17 @@
     if is_service_enabled ldap; then
         install_ldap
     fi
+    if [[ "$KEYSTONE_TOKEN_BACKEND" = "memcache" ]]; then
+        # Install memcached and the memcache Python library that keystone uses.
+        # Unfortunately the Python library goes by different names in the .deb
+        # and .rpm circles.
+        install_package memcached
+        if is_ubuntu; then
+            install_package python-memcache
+        else
+            install_package python-memcached
+        fi
+    fi
     git_clone $KEYSTONE_REPO $KEYSTONE_DIR $KEYSTONE_BRANCH
     setup_develop $KEYSTONE_DIR
     if is_apache_enabled_service key; then
@@ -372,7 +403,7 @@
     fi
 
     echo "Waiting for keystone to start..."
-    if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s http://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then
+    if ! timeout $SERVICE_TIMEOUT sh -c "while ! curl --noproxy '*' -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$service_port/v$IDENTITY_API_VERSION/ >/dev/null; do sleep 1; done"; then
         die $LINENO "keystone did not start"
     fi
 
@@ -393,6 +424,7 @@
 # Restore xtrace
 $XTRACE
 
-# Local variables:
-# mode: shell-script
-# End:
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/ldap b/lib/ldap
index 2a24ccd..80992a7 100644
--- a/lib/ldap
+++ b/lib/ldap
@@ -2,7 +2,8 @@
 # Functions to control the installation and configuration of **ldap**
 
 # ``lib/keystone`` calls the entry points in this order:
-# install_ldap()
+#
+# - install_ldap()
 
 # Save trace setting
 XTRACE=$(set +o | grep xtrace)
@@ -91,6 +92,7 @@
 # Restore xtrace
 $XTRACE
 
-# Local variables:
-# mode: shell-script
-# End:
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/neutron b/lib/neutron
index 9227f19..7376772 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -4,27 +4,29 @@
 # Dependencies:
 # ``functions`` file
 # ``DEST`` must be defined
+# ``STACK_USER`` must be defined
 
 # ``stack.sh`` calls the entry points in this order:
 #
-# install_neutron
-# install_neutronclient
-# install_neutron_agent_packages
-# install_neutron_third_party
-# configure_neutron
-# init_neutron
-# configure_neutron_third_party
-# init_neutron_third_party
-# start_neutron_third_party
-# create_nova_conf_neutron
-# start_neutron_service_and_check
-# create_neutron_initial_network
-# setup_neutron_debug
-# start_neutron_agents
+# - install_neutron
+# - install_neutronclient
+# - install_neutron_agent_packages
+# - install_neutron_third_party
+# - configure_neutron
+# - init_neutron
+# - configure_neutron_third_party
+# - init_neutron_third_party
+# - start_neutron_third_party
+# - create_neutron_cache_dir
+# - create_nova_conf_neutron
+# - start_neutron_service_and_check
+# - create_neutron_initial_network
+# - setup_neutron_debug
+# - start_neutron_agents
 #
 # ``unstack.sh`` calls the entry points in this order:
 #
-# stop_neutron
+# - stop_neutron
 
 # Functions in lib/neutron are classified into the following categories:
 #
@@ -110,6 +112,10 @@
 Q_USE_DEBUG_COMMAND=${Q_USE_DEBUG_COMMAND:-False}
 # The name of the default q-l3 router
 Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1}
+# nova vif driver that all plugins should use
+NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+
+
 # List of config file names in addition to the main plugin config file
 # See _configure_neutron_common() for details about setting it up
 declare -a Q_PLUGIN_EXTRA_CONF_FILES
@@ -202,13 +208,19 @@
 # Hardcoding for 1 service plugin for now
 source $TOP_DIR/lib/neutron_plugins/services/loadbalancer
 
+# Agent metering service plugin functions
+# -------------------------------------------
+
+# Hardcoding for 1 service plugin for now
+source $TOP_DIR/lib/neutron_plugins/services/metering
+
 # VPN service plugin functions
 # -------------------------------------------
 # Hardcoding for 1 service plugin for now
 source $TOP_DIR/lib/neutron_plugins/services/vpn
 
 # Firewall Service Plugin functions
-# --------------------------------
+# ---------------------------------
 source $TOP_DIR/lib/neutron_plugins/services/firewall
 
 # Use security group or not
@@ -231,6 +243,9 @@
     if is_service_enabled q-lbaas; then
         _configure_neutron_lbaas
     fi
+    if is_service_enabled q-metering; then
+        _configure_neutron_metering
+    fi
     if is_service_enabled q-vpn; then
         _configure_neutron_vpn
     fi
@@ -268,6 +283,7 @@
 
     if [[ "$Q_USE_SECGROUP" == "True" ]]; then
         LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver
+        iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER
         iniset $NOVA_CONF DEFAULT security_group_api neutron
     fi
 
@@ -281,6 +297,14 @@
     fi
 }
 
+# create_neutron_cache_dir() - Part of the _neutron_setup_keystone() process
+function create_neutron_cache_dir() {
+    # Create cache dir
+    sudo mkdir -p $NEUTRON_AUTH_CACHE_DIR
+    sudo chown $STACK_USER $NEUTRON_AUTH_CACHE_DIR
+    rm -f $NEUTRON_AUTH_CACHE_DIR/*
+}
+
 # create_neutron_accounts() - Set up common required neutron accounts
 
 # Tenant               User       Roles
@@ -461,6 +485,10 @@
     if is_service_enabled q-lbaas; then
         screen_it q-lbaas "cd $NEUTRON_DIR && python $AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME"
     fi
+
+    if is_service_enabled q-metering; then
+        screen_it q-metering "cd $NEUTRON_DIR && python $AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME"
+    fi
 }
 
 # stop_neutron() - Stop running processes (non-screen)
@@ -504,6 +532,7 @@
     # For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``.
     # For addition plugin config files, set ``Q_PLUGIN_EXTRA_CONF_PATH``,
     # ``Q_PLUGIN_EXTRA_CONF_FILES``.  For example:
+    #
     #    ``Q_PLUGIN_EXTRA_CONF_FILES=(file1, file2)``
     neutron_plugin_configure_common
 
@@ -546,7 +575,7 @@
 
     # Format logging
     if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
-        setup_colorized_logging $NEUTRON_CONF DEFAULT
+        setup_colorized_logging $NEUTRON_CONF DEFAULT project_id
     fi
 
     _neutron_setup_rootwrap
@@ -640,6 +669,11 @@
     neutron_agent_lbaas_configure_agent
 }
 
+function _configure_neutron_metering() {
+    neutron_agent_metering_configure_common
+    neutron_agent_metering_configure_agent
+}
+
 function _configure_neutron_fwaas() {
     neutron_fwaas_configure_common
     neutron_fwaas_configure_driver
@@ -734,7 +768,7 @@
 
     # Set up the rootwrap sudoers for neutron
     TEMPFILE=`mktemp`
-    echo "$USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE
+    echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE
     chmod 0440 $TEMPFILE
     sudo chown root:root $TEMPFILE
     sudo mv $TEMPFILE /etc/sudoers.d/neutron-rootwrap
@@ -767,9 +801,7 @@
     if [[ -z $skip_auth_cache ]]; then
         iniset $conf_file $section signing_dir $NEUTRON_AUTH_CACHE_DIR
         # Create cache dir
-        sudo mkdir -p $NEUTRON_AUTH_CACHE_DIR
-        sudo chown $STACK_USER $NEUTRON_AUTH_CACHE_DIR
-        rm -f $NEUTRON_AUTH_CACHE_DIR/*
+        create_neutron_cache_dir
     fi
 }
 
@@ -900,6 +932,7 @@
 # Restore xtrace
 $XTRACE
 
-# Local variables:
-# mode: shell-script
-# End:
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight
index 2450731..93ec497 100644
--- a/lib/neutron_plugins/bigswitch_floodlight
+++ b/lib/neutron_plugins/bigswitch_floodlight
@@ -9,7 +9,7 @@
 source $TOP_DIR/lib/neutron_thirdparty/bigswitch_floodlight     # for third party service specific configuration values
 
 function neutron_plugin_create_nova_conf() {
-    NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+    :
 }
 
 function neutron_plugin_install_agent_packages() {
diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent
index 88c49c5..85e8c08 100644
--- a/lib/neutron_plugins/linuxbridge_agent
+++ b/lib/neutron_plugins/linuxbridge_agent
@@ -11,7 +11,7 @@
 }
 
 function neutron_plugin_create_nova_conf() {
-    NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+    :
 }
 
 function neutron_plugin_install_agent_packages() {
diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet
index cf45a9d..e406146 100644
--- a/lib/neutron_plugins/midonet
+++ b/lib/neutron_plugins/midonet
@@ -32,23 +32,10 @@
 
 function neutron_plugin_configure_dhcp_agent() {
     DHCP_DRIVER=${DHCP_DRIVER:-"neutron.plugins.midonet.agent.midonet_driver.DhcpNoOpDriver"}
-    DHCP_INTERFACE_DRIVER=${DHCP_INTEFACE_DRIVER:-"neutron.plugins.midonet.agent.midonet_driver.MidonetInterfaceDriver"}
+    neutron_plugin_setup_interface_driver $Q_DHCP_CONF_FILE
     iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_driver $DHCP_DRIVER
-    iniset $Q_DHCP_CONF_FILE DEFAULT interface_driver $DHCP_INTERFACE_DRIVER
     iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces True
     iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata True
-    if [[ "$MIDONET_API_URI" != "" ]]; then
-        iniset $Q_DHCP_CONF_FILE MIDONET midonet_uri "$MIDONET_API_URI"
-    fi
-    if [[ "$MIDONET_USERNAME" != "" ]]; then
-        iniset $Q_DHCP_CONF_FILE MIDONET username "$MIDONET_USERNAME"
-    fi
-    if [[ "$MIDONET_PASSWORD" != "" ]]; then
-        iniset $Q_DHCP_CONF_FILE MIDONET password "$MIDONET_PASSWORD"
-    fi
-    if [[ "$MIDONET_PROJECT_ID" != "" ]]; then
-        iniset $Q_DHCP_CONF_FILE MIDONET project_id "$MIDONET_PROJECT_ID"
-    fi
 }
 
 function neutron_plugin_configure_l3_agent() {
@@ -78,8 +65,8 @@
 }
 
 function neutron_plugin_setup_interface_driver() {
-    # May change in the future
-    :
+    local conf_file=$1
+    iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.MidonetInterfaceDriver
 }
 
 function has_neutron_plugin_security_group() {
diff --git a/lib/neutron_plugins/nec b/lib/neutron_plugins/nec
index 3806c32..d8d8b7c 100644
--- a/lib/neutron_plugins/nec
+++ b/lib/neutron_plugins/nec
@@ -55,21 +55,26 @@
     _neutron_ovs_base_configure_l3_agent
 }
 
-function neutron_plugin_configure_plugin_agent() {
+function _quantum_plugin_setup_bridge() {
     if [[ "$SKIP_OVS_BRIDGE_SETUP" = "True" ]]; then
         return
     fi
     # Set up integration bridge
     _neutron_ovs_base_setup_bridge $OVS_BRIDGE
-    sudo ovs-vsctl --no-wait set-controller $OVS_BRIDGE tcp:$OFC_OFP_HOST:$OFC_OFP_PORT
     # Generate datapath ID from HOST_IP
-    local dpid=$(printf "0x%07d%03d%03d%03d\n" ${HOST_IP//./ })
+    local dpid=$(printf "%07d%03d%03d%03d\n" ${HOST_IP//./ })
     sudo ovs-vsctl --no-wait set Bridge $OVS_BRIDGE other-config:datapath-id=$dpid
     sudo ovs-vsctl --no-wait set-fail-mode $OVS_BRIDGE secure
+    sudo ovs-vsctl --no-wait set-controller $OVS_BRIDGE tcp:$OFC_OFP_HOST:$OFC_OFP_PORT
     if [ -n "$OVS_INTERFACE" ]; then
         sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_BRIDGE $OVS_INTERFACE
     fi
     _neutron_setup_ovs_tunnels $OVS_BRIDGE
+}
+
+function neutron_plugin_configure_plugin_agent() {
+    _quantum_plugin_setup_bridge
+
     AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-nec-agent"
 
     _neutron_ovs_base_configure_firewall_driver
diff --git a/lib/neutron_plugins/nicira b/lib/neutron_plugins/nicira
index 7c99b69..87d3c3d 100644
--- a/lib/neutron_plugins/nicira
+++ b/lib/neutron_plugins/nicira
@@ -26,7 +26,6 @@
 }
 
 function neutron_plugin_create_nova_conf() {
-    NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtOpenVswitchDriver"}
     # if n-cpu is enabled, then setup integration bridge
     if is_service_enabled n-cpu; then
         setup_integration_bridge
diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base
index 1214f3b..89db29d 100644
--- a/lib/neutron_plugins/ovs_base
+++ b/lib/neutron_plugins/ovs_base
@@ -73,7 +73,7 @@
 }
 
 function _neutron_ovs_base_configure_nova_vif_driver() {
-    NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+    :
 }
 
 # Restore xtrace
diff --git a/lib/neutron_plugins/plumgrid b/lib/neutron_plugins/plumgrid
index 9d3c92f..d4050bb 100644
--- a/lib/neutron_plugins/plumgrid
+++ b/lib/neutron_plugins/plumgrid
@@ -9,8 +9,7 @@
 #source $TOP_DIR/lib/neutron_plugins/ovs_base
 
 function neutron_plugin_create_nova_conf() {
-
-    NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"}
+    :
 }
 
 function neutron_plugin_setup_interface_driver() {
diff --git a/lib/neutron_plugins/services/metering b/lib/neutron_plugins/services/metering
new file mode 100644
index 0000000..629f3b7
--- /dev/null
+++ b/lib/neutron_plugins/services/metering
@@ -0,0 +1,30 @@
+# Neutron metering plugin
+# ---------------------------
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+AGENT_METERING_BINARY="$NEUTRON_BIN_DIR/neutron-metering-agent"
+METERING_PLUGIN="neutron.services.metering.metering_plugin.MeteringPlugin"
+
+function neutron_agent_metering_configure_common() {
+    if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then
+        Q_SERVICE_PLUGIN_CLASSES=$METERING_PLUGIN
+    else
+        Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$METERING_PLUGIN"
+    fi
+}
+
+function neutron_agent_metering_configure_agent() {
+    METERING_AGENT_CONF_PATH=/etc/neutron/services/metering
+    mkdir -p $METERING_AGENT_CONF_PATH
+
+    METERING_AGENT_CONF_FILENAME="$METERING_AGENT_CONF_PATH/metering_agent.ini"
+
+    cp $NEUTRON_DIR/etc/metering_agent.ini $METERING_AGENT_CONF_FILENAME
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/neutron_thirdparty/trema b/lib/neutron_thirdparty/trema
index 5b5c459..9efd3f6 100644
--- a/lib/neutron_thirdparty/trema
+++ b/lib/neutron_thirdparty/trema
@@ -28,7 +28,7 @@
 TREMA_LOG_LEVEL=${TREMA_LOG_LEVEL:-info}
 
 TREMA_SS_CONFIG=$TREMA_SS_ETC_DIR/sliceable.conf
-TREMA_SS_APACHE_CONFIG=/etc/apache2/sites-available/sliceable_switch
+TREMA_SS_APACHE_CONFIG=/etc/apache2/sites-available/sliceable_switch.conf
 
 # configure_trema - Set config files, create data dirs, etc
 function configure_trema() {
diff --git a/lib/nova b/lib/nova
index 809f8e5..5fd0beb 100644
--- a/lib/nova
+++ b/lib/nova
@@ -2,22 +2,23 @@
 # Functions to control the configuration and operation of the **Nova** service
 
 # Dependencies:
-# ``functions`` file
-# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
-# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined
-# ``LIBVIRT_TYPE`` must be defined
-# ``INSTANCE_NAME_PREFIX``, ``VOLUME_NAME_PREFIX`` must be defined
-# ``KEYSTONE_TOKEN_FORMAT`` must be defined
+#
+# - ``functions`` file
+# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
+# - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined
+# - ``LIBVIRT_TYPE`` must be defined
+# - ``INSTANCE_NAME_PREFIX``, ``VOLUME_NAME_PREFIX`` must be defined
+# - ``KEYSTONE_TOKEN_FORMAT`` must be defined
 
 # ``stack.sh`` calls the entry points in this order:
 #
-# install_nova
-# configure_nova
-# create_nova_conf
-# init_nova
-# start_nova
-# stop_nova
-# cleanup_nova
+# - install_nova
+# - configure_nova
+# - create_nova_conf
+# - init_nova
+# - start_nova
+# - stop_nova
+# - cleanup_nova
 
 # Save trace setting
 XTRACE=$(set +o | grep xtrace)
@@ -62,15 +63,16 @@
 # NOTE: Set API_RATE_LIMIT="False" to turn OFF rate limiting
 API_RATE_LIMIT=${API_RATE_LIMIT:-"True"}
 
+# Option to enable/disable config drive
+# NOTE: Set FORCE_CONFIG_DRIVE="False" to turn OFF config drive
+FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"always"}
+
 # Nova supports pluggable schedulers.  The default ``FilterScheduler``
 # should work in most cases.
 SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler}
 
 QEMU_CONF=/etc/libvirt/qemu.conf
 
-NOVNC_DIR=$DEST/noVNC
-SPICE_DIR=$DEST/spice-html5
-
 # Set default defaults here as some hypervisor drivers override these
 PUBLIC_INTERFACE_DEFAULT=br100
 GUEST_INTERFACE_DEFAULT=eth0
@@ -193,7 +195,7 @@
 
     # Set up the rootwrap sudoers for nova
     TEMPFILE=`mktemp`
-    echo "$USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE
+    echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE
     chmod 0440 $TEMPFILE
     sudo chown root:root $TEMPFILE
     sudo mv $TEMPFILE /etc/sudoers.d/nova-rootwrap
@@ -221,10 +223,9 @@
         # Comment out the keystone configs in Nova's api-paste.ini.
         # We are using nova.conf to configure this instead.
         inicomment $NOVA_API_PASTE_INI filter:authtoken auth_host
-        if is_service_enabled tls-proxy; then
-            inicomment $NOVA_API_PASTE_INI filter:authtoken auth_protocol
-        fi
+        inicomment $NOVA_API_PASTE_INI filter:authtoken auth_protocol
         inicomment $NOVA_API_PASTE_INI filter:authtoken admin_tenant_name
+        inicomment $NOVA_API_PASTE_INI filter:authtoken cafile
         inicomment $NOVA_API_PASTE_INI filter:authtoken admin_user
         inicomment $NOVA_API_PASTE_INI filter:authtoken admin_password
     fi
@@ -397,10 +398,9 @@
         # Add keystone authtoken configuration
 
         iniset $NOVA_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST
-        if is_service_enabled tls-proxy; then
-            iniset $NOVA_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
-        fi
+        iniset $NOVA_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
         iniset $NOVA_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
+        iniset $NOVA_CONF keystone_authtoken cafile $KEYSTONE_SSL_CA
         iniset $NOVA_CONF keystone_authtoken admin_user nova
         iniset $NOVA_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
     fi
@@ -427,6 +427,9 @@
     if [ "$API_RATE_LIMIT" != "True" ]; then
         iniset $NOVA_CONF DEFAULT api_rate_limit "False"
     fi
+    if [ "$FORCE_CONFIG_DRIVE" != "False" ]; then
+        iniset $NOVA_CONF DEFAULT force_config_drive "$FORCE_CONFIG_DRIVE"
+    fi
     # Format logging
     if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
         setup_colorized_logging $NOVA_CONF DEFAULT
@@ -586,6 +589,30 @@
         install_nova_hypervisor
     fi
 
+    if is_service_enabled n-novnc; then
+        # a websockets/html5 or flash powered VNC console for vm instances
+        NOVNC_FROM_PACKAGE=`trueorfalse False $NOVNC_FROM_PACKAGE`
+        if [ "$NOVNC_FROM_PACKAGE" = "True" ]; then
+            NOVNC_WEB_DIR=/usr/share/novnc
+            install_package novnc
+        else
+            NOVNC_WEB_DIR=$DEST/noVNC
+            git_clone $NOVNC_REPO $NOVNC_WEB_DIR $NOVNC_BRANCH
+        fi
+    fi
+
+    if is_service_enabled n-spice; then
+        # a websockets/html5 or flash powered SPICE console for vm instances
+        SPICE_FROM_PACKAGE=`trueorfalse True $SPICE_FROM_PACKAGE`
+        if [ "$SPICE_FROM_PACKAGE" = "True" ]; then
+            SPICE_WEB_DIR=/usr/share/spice-html5
+            install_package spice-html5
+        else
+            SPICE_WEB_DIR=$DEST/spice-html5
+            git_clone $SPICE_REPO $SPICE_WEB_DIR $SPICE_BRANCH
+        fi
+    fi
+
     git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH
     setup_develop $NOVA_DIR
     sudo install -D -m 0644 -o $STACK_USER {$NOVA_DIR/tools/,/etc/bash_completion.d/}nova-manage.bash_completion
@@ -613,52 +640,56 @@
 
 # start_nova_compute() - Start the compute process
 function start_nova_compute() {
-    NOVA_CONF_BOTTOM=$NOVA_CONF
+    if is_service_enabled n-cell; then
+        local compute_cell_conf=$NOVA_CELLS_CONF
+    else
+        local compute_cell_conf=$NOVA_CONF
+    fi
 
     if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
         # The group **$LIBVIRT_GROUP** is added to the current user in this script.
         # Use 'sg' to execute nova-compute as a member of the **$LIBVIRT_GROUP** group.
-        screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM'"
+        screen_it n-cpu "cd $NOVA_DIR && sg $LIBVIRT_GROUP '$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf'"
     elif [[ "$VIRT_DRIVER" = 'fake' ]]; then
         for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do
-            screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM"
+            screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf"
         done
     else
         if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
             start_nova_hypervisor
         fi
-        screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF_BOTTOM"
+        screen_it n-cpu "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf"
     fi
 }
 
 # start_nova() - Start running processes, including screen
 function start_nova_rest() {
-    NOVA_CONF_BOTTOM=$NOVA_CONF
-
-    # ``screen_it`` checks ``is_service_enabled``, it is not needed here
-    screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor"
-
+    local api_cell_conf=$NOVA_CONF
     if is_service_enabled n-cell; then
-        NOVA_CONF_BOTTOM=$NOVA_CELLS_CONF
-        screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor --config-file $NOVA_CELLS_CONF"
-        screen_it n-cell-region "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CONF"
-        screen_it n-cell-child "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CELLS_CONF"
+        local compute_cell_conf=$NOVA_CELLS_CONF
+    else
+        local compute_cell_conf=$NOVA_CONF
     fi
 
-    screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert"
-    screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network --config-file $NOVA_CONF_BOTTOM"
-    screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler --config-file $NOVA_CONF_BOTTOM"
-    screen_it n-api-meta "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api-metadata --config-file $NOVA_CONF_BOTTOM"
+    # ``screen_it`` checks ``is_service_enabled``, it is not needed here
+    screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor --config-file $compute_cell_conf"
+    screen_it n-cell-region "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $api_cell_conf"
+    screen_it n-cell-child "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $compute_cell_conf"
 
-    screen_it n-novnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-novncproxy --config-file $NOVA_CONF --web $NOVNC_DIR"
-    screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $NOVA_CONF"
-    screen_it n-spice "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $NOVA_CONF --web $SPICE_DIR"
-    screen_it n-cauth "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-consoleauth"
+    screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert --config-file $api_cell_conf"
+    screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network --config-file $compute_cell_conf"
+    screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf"
+    screen_it n-api-meta "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf"
+
+    screen_it n-novnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-novncproxy --config-file $api_cell_conf --web $NOVNC_WEB_DIR"
+    screen_it n-xvnc "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-xvpvncproxy --config-file $api_cell_conf"
+    screen_it n-spice "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-spicehtml5proxy --config-file $api_cell_conf --web $SPICE_WEB_DIR"
+    screen_it n-cauth "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-consoleauth --config-file $api_cell_conf"
 
     # Starting the nova-objectstore only if swift3 service is not enabled.
     # Swift will act as s3 objectstore.
     is_service_enabled swift3 || \
-        screen_it n-obj "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-objectstore"
+        screen_it n-obj "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-objectstore --config-file $api_cell_conf"
 }
 
 function start_nova() {
@@ -671,7 +702,7 @@
     # Kill the nova screen windows
     # Some services are listed here twice since more than one instance
     # of a service may be running in certain configs.
-    for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cond n-cell n-cell n-api-meta; do
+    for serv in n-api n-cpu n-crt n-net n-sch n-novnc n-xvnc n-cauth n-spice n-cond n-cell n-cell n-api-meta; do
         screen -S $SCREEN_NAME -p $serv -X kill
     done
     if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then
@@ -683,6 +714,7 @@
 # Restore xtrace
 $XTRACE
 
-# Local variables:
-# mode: shell-script
-# End:
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/nova_plugins/hypervisor-docker b/lib/nova_plugins/hypervisor-docker
index 427554b..0153953 100644
--- a/lib/nova_plugins/hypervisor-docker
+++ b/lib/nova_plugins/hypervisor-docker
@@ -2,11 +2,13 @@
 # Configure the Docker hypervisor
 
 # Enable with:
-# VIRT_DRIVER=docker
+#
+#   VIRT_DRIVER=docker
 
 # Dependencies:
-# ``functions`` file
-# ``nova`` and ``glance`` configurations
+#
+# - ``functions`` file
+# - ``nova`` and ``glance`` configurations
 
 # install_nova_hypervisor - install any external requirements
 # configure_nova_hypervisor - make configuration changes, including those to other services
@@ -24,8 +26,6 @@
 
 # Set up default directories
 DOCKER_DIR=$DEST/docker
-DOCKER_REPO=${DOCKER_REPO:-https://github.com/dotcloud/openstack-docker.git}
-DOCKER_BRANCH=${DOCKER_BRANCH:-master}
 
 DOCKER_UNIX_SOCKET=/var/run/docker.sock
 DOCKER_PID_FILE=/var/run/docker.pid
@@ -37,7 +37,6 @@
 DOCKER_REGISTRY_IMAGE_NAME=docker-registry
 DOCKER_REPOSITORY_NAME=${SERVICE_HOST}:${DOCKER_REGISTRY_PORT}/${DOCKER_IMAGE_NAME}
 
-DOCKER_PACKAGE_VERSION=${DOCKER_PACKAGE_VERSION:-0.6.1}
 DOCKER_APT_REPO=${DOCKER_APT_REPO:-https://get.docker.io/ubuntu}
 
 
@@ -54,14 +53,8 @@
 
 # configure_nova_hypervisor - Set config files, create data dirs, etc
 function configure_nova_hypervisor() {
-    git_clone $DOCKER_REPO $DOCKER_DIR $DOCKER_BRANCH
-
-    ln -snf ${DOCKER_DIR}/nova-driver $NOVA_DIR/nova/virt/docker
-
     iniset $NOVA_CONF DEFAULT compute_driver docker.DockerDriver
     iniset $GLANCE_API_CONF DEFAULT container_formats ami,ari,aki,bare,ovf,docker
-
-    sudo cp -p ${DOCKER_DIR}/nova-driver/docker.filters $NOVA_CONF_DIR/rootwrap.d
 }
 
 # install_nova_hypervisor() - Install external components
@@ -72,7 +65,7 @@
     fi
 
     # Make sure Docker is installed
-    if ! is_package_installed lxc-docker-${DOCKER_PACKAGE_VERSION}; then
+    if ! is_package_installed lxc-docker; then
         die $LINENO "Docker is not installed.  Please run tools/docker/install_docker.sh"
     fi
 
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index 6fae0b1..6f90f4a 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -7,6 +7,7 @@
 # Dependencies:
 # ``functions`` file
 # ``nova`` configuration
+# ``STACK_USER`` has to be defined
 
 # install_nova_hypervisor - install any external requirements
 # configure_nova_hypervisor - make configuration changes, including those to other services
@@ -68,7 +69,7 @@
             # with 'unix-group:$group'.
             sudo bash -c "cat <<EOF >/etc/polkit-1/localauthority/50-local.d/50-libvirt-remote-access.pkla
 [libvirt Management Access]
-Identity=unix-user:$USER
+Identity=unix-user:$STACK_USER
 Action=org.libvirt.unix.manage
 ResultAny=yes
 ResultInactive=yes
diff --git a/lib/nova_plugins/hypervisor-powervm b/lib/nova_plugins/hypervisor-powervm
deleted file mode 100644
index 561dd9f..0000000
--- a/lib/nova_plugins/hypervisor-powervm
+++ /dev/null
@@ -1,76 +0,0 @@
-# lib/nova_plugins/hypervisor-powervm
-# Configure the PowerVM hypervisor
-
-# Enable with:
-# VIRT_DRIVER=powervm
-
-# Dependencies:
-# ``functions`` file
-# ``nova`` configuration
-
-# install_nova_hypervisor - install any external requirements
-# configure_nova_hypervisor - make configuration changes, including those to other services
-# start_nova_hypervisor - start any external services
-# stop_nova_hypervisor - stop any external services
-# cleanup_nova_hypervisor - remove transient data and cache
-
-# Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-
-# Defaults
-# --------
-
-
-# Entry Points
-# ------------
-
-# clean_nova_hypervisor - Clean up an installation
-function cleanup_nova_hypervisor() {
-    # This function intentionally left blank
-    :
-}
-
-# configure_nova_hypervisor - Set config files, create data dirs, etc
-function configure_nova_hypervisor() {
-    POWERVM_MGR_TYPE=${POWERVM_MGR_TYPE:-"ivm"}
-    POWERVM_MGR_HOST=${POWERVM_MGR_HOST:-"powervm.host"}
-    POWERVM_MGR_USER=${POWERVM_MGR_USER:-"padmin"}
-    POWERVM_MGR_PASSWD=${POWERVM_MGR_PASSWD:-"password"}
-    POWERVM_IMG_REMOTE_PATH=${POWERVM_IMG_REMOTE_PATH:-"/tmp"}
-    POWERVM_IMG_LOCAL_PATH=${POWERVM_IMG_LOCAL_PATH:-"/tmp"}
-    iniset $NOVA_CONF DEFAULT compute_driver nova.virt.powervm.PowerVMDriver
-    iniset $NOVA_CONF DEFAULT powervm_mgr_type $POWERVM_MGR_TYPE
-    iniset $NOVA_CONF DEFAULT powervm_mgr $POWERVM_MGR_HOST
-    iniset $NOVA_CONF DEFAULT powervm_mgr_user $POWERVM_MGR_USER
-    iniset $NOVA_CONF DEFAULT powervm_mgr_passwd $POWERVM_MGR_PASSWD
-    iniset $NOVA_CONF DEFAULT powervm_img_remote_path $POWERVM_IMG_REMOTE_PATH
-    iniset $NOVA_CONF DEFAULT powervm_img_local_path $POWERVM_IMG_LOCAL_PATH
-}
-
-# install_nova_hypervisor() - Install external components
-function install_nova_hypervisor() {
-    # This function intentionally left blank
-    :
-}
-
-# start_nova_hypervisor - Start any required external services
-function start_nova_hypervisor() {
-    # This function intentionally left blank
-    :
-}
-
-# stop_nova_hypervisor - Stop any external services
-function stop_nova_hypervisor() {
-    # This function intentionally left blank
-    :
-}
-
-
-# Restore xtrace
-$MY_XTRACE
-
-# Local variables:
-# mode: shell-script
-# End:
diff --git a/lib/oslo b/lib/oslo
index f77a4fa..816ae9a 100644
--- a/lib/oslo
+++ b/lib/oslo
@@ -6,11 +6,12 @@
 # pre-released versions of oslo libraries.
 
 # Dependencies:
-# ``functions`` file
+#
+# - ``functions`` file
 
 # ``stack.sh`` calls the entry points in this order:
 #
-# install_oslo
+# - install_oslo
 
 # Save trace setting
 XTRACE=$(set +o | grep xtrace)
@@ -52,6 +53,7 @@
 # Restore xtrace
 $XTRACE
 
-# Local variables:
-# mode: shell-script
-# End:
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/rpc_backend b/lib/rpc_backend
index a323d64..ae83e85 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -3,15 +3,16 @@
 # rpc backend settings
 
 # Dependencies:
-# ``functions`` file
-# ``RABBIT_{HOST|PASSWORD}`` must be defined when RabbitMQ is used
+#
+# - ``functions`` file
+# - ``RABBIT_{HOST|PASSWORD}`` must be defined when RabbitMQ is used
 
 # ``stack.sh`` calls the entry points in this order:
 #
-# check_rpc_backend
-# install_rpc_backend
-# restart_rpc_backend
-# iniset_rpc_backend
+# - check_rpc_backend
+# - install_rpc_backend
+# - restart_rpc_backend
+# - iniset_rpc_backend
 
 # Save trace setting
 XTRACE=$(set +o | grep xtrace)
@@ -200,6 +201,7 @@
 # Restore xtrace
 $XTRACE
 
-# Local variables:
-# mode: shell-script
-# End:
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/savanna b/lib/savanna
new file mode 100644
index 0000000..e9dbe72
--- /dev/null
+++ b/lib/savanna
@@ -0,0 +1,97 @@
+# lib/savanna
+
+# Dependencies:
+# ``functions`` file
+# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
+# ``ADMIN_{TENANT_NAME|PASSWORD}`` must be defined
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# install_savanna
+# configure_savanna
+# start_savanna
+# stop_savanna
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+
+# Set up default repos
+SAVANNA_REPO=${SAVANNA_REPO:-${GIT_BASE}/openstack/savanna.git}
+SAVANNA_BRANCH=${SAVANNA_BRANCH:-master}
+
+# Set up default directories
+SAVANNA_DIR=$DEST/savanna
+SAVANNA_CONF_DIR=${SAVANNA_CONF_DIR:-/etc/savanna}
+SAVANNA_CONF_FILE=savanna.conf
+ADMIN_TENANT_NAME=${ADMIN_TENANT_NAME:-admin}
+ADMIN_NAME=${ADMIN_NAME:-admin}
+ADMIN_PASSWORD=${ADMIN_PASSWORD:-nova}
+SAVANNA_DEBUG=${SAVANNA_DEBUG:-True}
+
+# Support entry points installation of console scripts
+if [[ -d $SAVANNA_DIR/bin ]]; then
+    SAVANNA_BIN_DIR=$SAVANNA_DIR/bin
+else
+    SAVANNA_BIN_DIR=$(get_python_exec_prefix)
+fi
+
+# Functions
+# ---------
+
+# configure_savanna() - Set config files, create data dirs, etc
+function configure_savanna() {
+
+    if [[ ! -d $SAVANNA_CONF_DIR ]]; then
+        sudo mkdir -p $SAVANNA_CONF_DIR
+    fi
+    sudo chown $STACK_USER $SAVANNA_CONF_DIR
+
+    # Copy over savanna configuration file and configure common parameters.
+    cp $SAVANNA_DIR/etc/savanna/savanna.conf.sample $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE
+
+    iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_password $ADMIN_PASSWORD
+    iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_username $ADMIN_NAME
+    iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT os_admin_tenant_name $ADMIN_TENANT_NAME
+    iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT debug $SAVANNA_DEBUG
+
+    recreate_database savanna utf8
+    iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE database sql_connection `database_connection_url savanna`
+    inicomment $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE database connection
+
+    if is_service_enabled neutron; then
+        iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT use_neutron true
+        iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT use_floating_ips true
+    fi
+
+    iniset $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE DEFAULT use_syslog $SYSLOG
+}
+
+# install_savanna() - Collect source and prepare
+function install_savanna() {
+    git_clone $SAVANNA_REPO $SAVANNA_DIR $SAVANNA_BRANCH
+    setup_develop $SAVANNA_DIR
+}
+
+# start_savanna() - Start running processes, including screen
+function start_savanna() {
+    screen_it savanna "cd $SAVANNA_DIR && $SAVANNA_BIN_DIR/savanna-api --config-file $SAVANNA_CONF_DIR/$SAVANNA_CONF_FILE"
+}
+
+# stop_savanna() - Stop running processes
+function stop_savanna() {
+    # Kill the Savanna screen windows
+    screen -S $SCREEN_NAME -p savanna -X kill
+}
+
+
+# Restore xtrace
+$XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/savanna-dashboard b/lib/savanna-dashboard
new file mode 100644
index 0000000..e967622
--- /dev/null
+++ b/lib/savanna-dashboard
@@ -0,0 +1,71 @@
+# lib/savanna-dashboard
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
+# - ``SERVICE_HOST``
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# - install_savanna_dashboard
+# - configure_savanna_dashboard
+# - cleanup_savanna_dashboard
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+source $TOP_DIR/lib/horizon
+
+# Defaults
+# --------
+
+# Set up default repos
+SAVANNA_DASHBOARD_REPO=${SAVANNA_DASHBOARD_REPO:-${GIT_BASE}/openstack/savanna-dashboard.git}
+SAVANNA_DASHBOARD_BRANCH=${SAVANNA_DASHBOARD_BRANCH:-master}
+
+SAVANNA_PYTHONCLIENT_REPO=${SAVANNA_PYTHONCLIENT_REPO:-${GIT_BASE}/openstack/python-savannaclient.git}
+SAVANNA_PYTHONCLIENT_BRANCH=${SAVANNA_PYTHONCLIENT_BRANCH:-master}
+
+# Set up default directories
+SAVANNA_DASHBOARD_DIR=$DEST/savanna_dashboard
+SAVANNA_PYTHONCLIENT_DIR=$DEST/python-savannaclient
+
+# Functions
+# ---------
+
+function configure_savanna_dashboard() {
+
+    echo -e "SAVANNA_URL = \"http://$SERVICE_HOST:8386/v1.1\"\nAUTO_ASSIGNMENT_ENABLED = False" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py
+    echo -e "HORIZON_CONFIG['dashboards'] += ('savanna',)\nINSTALLED_APPS += ('savannadashboard',)" >> $HORIZON_DIR/openstack_dashboard/settings.py
+
+    if is_service_enabled neutron; then
+        echo -e "SAVANNA_USE_NEUTRON = True" >> $HORIZON_DIR/openstack_dashboard/local/local_settings.py
+    fi
+}
+
+# install_savanna_dashboard() - Collect source and prepare
+function install_savanna_dashboard() {
+    install_python_savannaclient
+    git_clone $SAVANNA_DASHBOARD_REPO $SAVANNA_DASHBOARD_DIR $SAVANNA_DASHBOARD_BRANCH
+    setup_develop $SAVANNA_DASHBOARD_DIR
+}
+
+function install_python_savannaclient() {
+    git_clone $SAVANNA_PYTHONCLIENT_REPO $SAVANNA_PYTHONCLIENT_DIR $SAVANNA_PYTHONCLIENT_BRANCH
+    setup_develop $SAVANNA_PYTHONCLIENT_DIR
+}
+
+# Cleanup file settings.py from Savanna
+function cleanup_savanna_dashboard() {
+    sed -i '/savanna/d' $HORIZON_DIR/openstack_dashboard/settings.py
+}
+
+# Restore xtrace
+$XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
+
diff --git a/lib/stackforge b/lib/stackforge
new file mode 100644
index 0000000..718b818
--- /dev/null
+++ b/lib/stackforge
@@ -0,0 +1,67 @@
+# lib/stackforge
+#
+# Functions to install stackforge libraries that we depend on so
+# that we can try their git versions during devstack gate.
+#
+# This is appropriate for python libraries that release to pypi and are
+# expected to be used beyond OpenStack like, but are requirements
+# for core services in global-requirements.
+#    * wsme
+#    * pecan
+#
+# This is not appropriate for stackforge projects which are early stage
+# OpenStack tools
+
+# Dependencies:
+# ``functions`` file
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# install_stackforge
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+WSME_DIR=$DEST/wsme
+PECAN_DIR=$DEST/pecan
+
+# Entry Points
+# ------------
+
+# install_stackforge() - Collect source and prepare
+function install_stackforge() {
+    # TODO(sdague): remove this once we get to Icehouse, this just makes
+    # for a smoother transition of existing users.
+    cleanup_stackforge
+
+    git_clone $WSME_REPO $WSME_DIR $WSME_BRANCH
+    setup_develop_no_requirements_update $WSME_DIR
+
+    git_clone $PECAN_REPO $PECAN_DIR $PECAN_BRANCH
+    setup_develop_no_requirements_update $PECAN_DIR
+}
+
+# cleanup_stackforge() - purge possibly old versions of stackforge libraries
+function cleanup_stackforge() {
+    # this means we've got an old version installed, lets get rid of it
+    # otherwise python hates itself
+    for lib in wsme pecan; do
+        if ! python -c "import $lib" 2>/dev/null; then
+            echo "Found old $lib... removing to ensure consistency"
+            local PIP_CMD=$(get_pip_command)
+            pip_install $lib
+            sudo $PIP_CMD uninstall -y $lib
+        fi
+    done
+}
+
+# Restore xtrace
+$XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/swift b/lib/swift
index c338375..5ff6055 100644
--- a/lib/swift
+++ b/lib/swift
@@ -2,22 +2,24 @@
 # Functions to control the configuration and operation of the **Swift** service
 
 # Dependencies:
-# ``functions`` file
-# ``apache`` file
-# ``DEST``, ``SCREEN_NAME``, `SWIFT_HASH` must be defined
-# ``STACK_USER`` must be defined
-# ``SWIFT_DATA_DIR`` or ``DATA_DIR`` must be defined
-# ``lib/keystone`` file
+#
+# - ``functions`` file
+# - ``apache`` file
+# - ``DEST``, ``SCREEN_NAME``, `SWIFT_HASH` must be defined
+# - ``STACK_USER`` must be defined
+# - ``SWIFT_DATA_DIR`` or ``DATA_DIR`` must be defined
+# - ``lib/keystone`` file
+#
 # ``stack.sh`` calls the entry points in this order:
 #
-# install_swift
-# _config_swift_apache_wsgi
-# configure_swift
-# init_swift
-# start_swift
-# stop_swift
-# cleanup_swift
-# _cleanup_swift_apache_wsgi
+# - install_swift
+# - _config_swift_apache_wsgi
+# - configure_swift
+# - init_swift
+# - start_swift
+# - stop_swift
+# - cleanup_swift
+# - _cleanup_swift_apache_wsgi
 
 # Save trace setting
 XTRACE=$(set +o | grep xtrace)
@@ -57,9 +59,9 @@
 # kilobytes.
 # Default is 1 gigabyte.
 SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=1G
-# if tempest enabled the default size is 4 Gigabyte.
+# if tempest enabled the default size is 6 Gigabyte.
 if is_service_enabled tempest; then
-    SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=${SWIFT_LOOPBACK_DISK_SIZE:-4G}
+    SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=${SWIFT_LOOPBACK_DISK_SIZE:-6G}
 fi
 
 SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-$SWIFT_LOOPBACK_DISK_SIZE_DEFAULT}
@@ -72,6 +74,10 @@
 # the end of the pipeline.
 SWIFT_EXTRAS_MIDDLEWARE_LAST=${SWIFT_EXTRAS_MIDDLEWARE_LAST}
 
+# Set ``SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH`` to extras middlewares that need to be at
+# the beginning of the pipeline, before authentication middlewares.
+SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH=${SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH:-crossdomain}
+
 # The ring uses a configurable number of bits from a path’s MD5 hash as
 # a partition index that designates a device. The number of bits kept
 # from the hash is known as the partition power, and 2 to the partition
@@ -90,6 +96,13 @@
 SWIFT_REPLICAS=${SWIFT_REPLICAS:-1}
 SWIFT_REPLICAS_SEQ=$(seq ${SWIFT_REPLICAS})
 
+# Set ``SWIFT_LOG_TOKEN_LENGTH`` to configure how many characters of an auth
+# token should be placed in the logs. When keystone is used with PKI tokens,
+# the token values can be huge, seemingly larger the 2K, at the least. We
+# restrict it here to a default of 12 characters, which should be enough to
+# trace through the logs when looking for its use.
+SWIFT_LOG_TOKEN_LENGTH=${SWIFT_LOG_TOKEN_LENGTH:-12}
+
 # Set ``OBJECT_PORT_BASE``, ``CONTAINER_PORT_BASE``, ``ACCOUNT_PORT_BASE``
 # Port bases used in port number calclution for the service "nodes"
 # The specified port number will be used, the additinal ports calculated by
@@ -210,7 +223,7 @@
 
 # configure_swift() - Set config files, create data dirs and loop image
 function configure_swift() {
-    local swift_pipeline=" "
+    local swift_pipeline="${SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH}"
     local node_number
     local swift_node_config
     local swift_log_dir
@@ -219,7 +232,7 @@
     swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
 
     sudo mkdir -p ${SWIFT_CONF_DIR}/{object,container,account}-server
-    sudo chown -R $USER: ${SWIFT_CONF_DIR}
+    sudo chown -R ${STACK_USER}: ${SWIFT_CONF_DIR}
 
     if [[ "$SWIFT_CONF_DIR" != "/etc/swift" ]]; then
         # Some swift tools are hard-coded to use ``/etc/swift`` and are apparently not going to be fixed.
@@ -232,7 +245,7 @@
     # setup) we configure it with our version of rsync.
     sed -e "
         s/%GROUP%/${USER_GROUP}/;
-        s/%USER%/$USER/;
+        s/%USER%/${STACK_USER}/;
         s,%SWIFT_DATA_DIR%,$SWIFT_DATA_DIR,;
     " $FILES/swift/rsyncd.conf | sudo tee /etc/rsyncd.conf
     # rsyncd.conf just prepared for 4 nodes
@@ -246,7 +259,7 @@
     cp ${SWIFT_DIR}/etc/proxy-server.conf-sample ${SWIFT_CONFIG_PROXY_SERVER}
 
     iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user
-    iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user ${USER}
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user ${STACK_USER}
 
     iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir
     iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT swift_dir ${SWIFT_CONF_DIR}
@@ -260,18 +273,30 @@
     iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port
     iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080}
 
+    # Devstack is commonly run in a small slow environment, so bump the
+    # timeouts up.
+    # node_timeout is how long between read operations a node takes to
+    # respond to the proxy server
+    # conn_timeout is all about how long it takes a connect() system call to
+    # return
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server node_timeout 120
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server conn_timeout 20
+
     # Configure Ceilometer
     if is_service_enabled ceilometer; then
         iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer use "egg:ceilometer#swift"
         SWIFT_EXTRAS_MIDDLEWARE_LAST="${SWIFT_EXTRAS_MIDDLEWARE_LAST} ceilometer"
     fi
 
+    # Restrict the length of auth tokens in the swift proxy-server logs.
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:proxy-logging reveal_sensitive_prefix ${SWIFT_LOG_TOKEN_LENGTH}
+
     # By default Swift will be installed with keystone and tempauth middleware
     # and add the swift3 middleware if its configured for it. The token for
-    # tempauth would be prefixed with the reseller_prefix setting TEMPAUTH_ the
-    # token for keystoneauth would have the standard reseller_prefix AUTH_
+    # tempauth would be prefixed with the reseller_prefix setting `TEMPAUTH_` the
+    # token for keystoneauth would have the standard reseller_prefix `AUTH_`
     if is_service_enabled swift3;then
-        swift_pipeline=" swift3 s3token "
+        swift_pipeline+=" swift3 s3token "
     fi
     swift_pipeline+=" authtoken keystoneauth tempauth "
     sed -i "/^pipeline/ { s/tempauth/${swift_pipeline} ${SWIFT_EXTRAS_MIDDLEWARE}/ ;}" ${SWIFT_CONFIG_PROXY_SERVER}
@@ -283,16 +308,24 @@
     iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth reseller_prefix
     iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth reseller_prefix "TEMPAUTH"
 
+    # Configure Crossdomain
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:crossdomain use "egg:swift#crossdomain"
+
     # Configure Keystone
     sed -i '/^# \[filter:authtoken\]/,/^# \[filter:keystoneauth\]$/ s/^#[ \t]*//' ${SWIFT_CONFIG_PROXY_SERVER}
     iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_host $KEYSTONE_AUTH_HOST
     iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_port $KEYSTONE_AUTH_PORT
     iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken cafile $KEYSTONE_SSL_CA
     iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
     iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME
     iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_user swift
     iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_password $SERVICE_PASSWORD
     iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken signing_dir $SWIFT_AUTH_CACHE_DIR
+    # This causes the authtoken middleware to use the same python logging
+    # adapter provided by the swift proxy-server, so that request transaction
+    # IDs will included in all of its log messages.
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken log_name swift
 
     iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth use
     iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles
@@ -307,6 +340,7 @@
 auth_port = ${KEYSTONE_AUTH_PORT}
 auth_host = ${KEYSTONE_AUTH_HOST}
 auth_protocol = ${KEYSTONE_AUTH_PROTOCOL}
+cafile = ${KEYSTONE_SSL_CA}
 auth_token = ${SERVICE_TOKEN}
 admin_token = ${SERVICE_TOKEN}
 
@@ -318,7 +352,7 @@
     cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONF_DIR}/swift.conf
     iniset ${SWIFT_CONF_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH}
 
-    # This function generates an object/account/proxy configuration
+    # This function generates an object/container/account configuration
     # emulating 4 nodes on different ports
     function generate_swift_config() {
         local swift_node_config=$1
@@ -330,7 +364,7 @@
         node_path=${SWIFT_DATA_DIR}/${node_number}
 
         iniuncomment ${swift_node_config} DEFAULT user
-        iniset ${swift_node_config} DEFAULT user ${USER}
+        iniset ${swift_node_config} DEFAULT user ${STACK_USER}
 
         iniuncomment ${swift_node_config} DEFAULT bind_port
         iniset ${swift_node_config} DEFAULT bind_port ${bind_port}
@@ -401,7 +435,7 @@
     swift_log_dir=${SWIFT_DATA_DIR}/logs
     rm -rf ${swift_log_dir}
     mkdir -p ${swift_log_dir}/hourly
-    sudo chown -R $USER:adm ${swift_log_dir}
+    sudo chown -R ${STACK_USER}:adm ${swift_log_dir}
     sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \
         tee /etc/rsyslog.d/10-swift.conf
     if is_apache_enabled_service swift; then
@@ -416,9 +450,9 @@
     # First do a bit of setup by creating the directories and
     # changing the permissions so we can run it as our user.
 
-    USER_GROUP=$(id -g)
+    USER_GROUP=$(id -g ${STACK_USER})
     sudo mkdir -p ${SWIFT_DATA_DIR}/{drives,cache,run,logs}
-    sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_DIR}
+    sudo chown -R ${STACK_USER}:${USER_GROUP} ${SWIFT_DATA_DIR}
 
     # Create a loopback disk and format it to XFS.
     if [[ -e ${SWIFT_DISK_IMAGE} ]]; then
@@ -430,7 +464,7 @@
 
     mkdir -p ${SWIFT_DATA_DIR}/drives/images
     sudo touch ${SWIFT_DISK_IMAGE}
-    sudo chown $USER: ${SWIFT_DISK_IMAGE}
+    sudo chown ${STACK_USER}: ${SWIFT_DISK_IMAGE}
 
     truncate -s ${SWIFT_LOOPBACK_DISK_SIZE} ${SWIFT_DISK_IMAGE}
 
@@ -453,9 +487,9 @@
         node_device=${node}/sdb1
         [[ -d $node ]] && continue
         [[ -d $drive ]] && continue
-        sudo install -o ${USER} -g $USER_GROUP -d $drive
-        sudo install -o ${USER} -g $USER_GROUP -d $node_device
-        sudo chown -R $USER: ${node}
+        sudo install -o ${STACK_USER} -g $USER_GROUP -d $drive
+        sudo install -o ${STACK_USER} -g $USER_GROUP -d $node_device
+        sudo chown -R ${STACK_USER}: ${node}
     done
 }
 # create_swift_accounts() - Set up standard swift accounts and extra
@@ -622,6 +656,7 @@
 # Restore xtrace
 $XTRACE
 
-# Local variables:
-# mode: shell-script
-# End:
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/tempest b/lib/tempest
index 8e4e521..803b740 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -2,34 +2,38 @@
 # Install and configure Tempest
 
 # Dependencies:
-# ``functions`` file
-# ``lib/nova`` service is running
-# <list other global vars that are assumed to be defined>
-# - ``DEST``, ``FILES``
-# - ``ADMIN_PASSWORD``
-# - ``DEFAULT_IMAGE_NAME``
-# - ``S3_SERVICE_PORT``
-# - ``SERVICE_HOST``
-# - ``BASE_SQL_CONN`` ``lib/database`` declares
-# - ``PUBLIC_NETWORK_NAME``
-# - ``Q_USE_NAMESPACE``
-# - ``Q_ROUTER_NAME``
-# - ``VIRT_DRIVER``
-# - ``LIBVIRT_TYPE``
-# - ``KEYSTONE_SERVICE_PROTOCOL``, ``KEYSTONE_SERVICE_HOST`` from lib/keystone
+#
+# - ``functions`` file
+# - ``lib/nova`` service is running
+# - Global vars that are assumed to be defined:
+#   - ``DEST``, ``FILES``
+#   - ``ADMIN_PASSWORD``
+#   - ``DEFAULT_IMAGE_NAME``
+#   - ``S3_SERVICE_PORT``
+#   - ``SERVICE_HOST``
+#   - ``BASE_SQL_CONN`` ``lib/database`` declares
+#   - ``PUBLIC_NETWORK_NAME``
+#   - ``Q_USE_NAMESPACE``
+#   - ``Q_ROUTER_NAME``
+#   - ``VIRT_DRIVER``
+#   - ``LIBVIRT_TYPE``
+#   - ``KEYSTONE_SERVICE_PROTOCOL``, ``KEYSTONE_SERVICE_HOST`` from lib/keystone
+#
 # Optional Dependencies:
-# ALT_* (similar vars exists in keystone_data.sh)
-# ``LIVE_MIGRATION_AVAILABLE``
-# ``USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION``
-# ``DEFAULT_INSTANCE_TYPE``
-# ``DEFAULT_INSTANCE_USER``
-# ``CINDER_MULTI_LVM_BACKEND``
-# ``HEAT_CREATE_TEST_IMAGE``
+#
+# - ``ALT_*`` (similar vars exists in keystone_data.sh)
+# - ``LIVE_MIGRATION_AVAILABLE``
+# - ``USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION``
+# - ``DEFAULT_INSTANCE_TYPE``
+# - ``DEFAULT_INSTANCE_USER``
+# - ``CINDER_MULTI_LVM_BACKEND``
+# - ``HEAT_CREATE_TEST_IMAGE``
+#
 # ``stack.sh`` calls the entry points in this order:
 #
-# install_tempest
-# configure_tempest
-# init_tempest
+# - install_tempest
+# - configure_tempest
+# - init_tempest
 
 # Save trace setting
 XTRACE=$(set +o | grep xtrace)
@@ -48,7 +52,7 @@
 NOVA_SOURCE_DIR=$DEST/nova
 
 BUILD_INTERVAL=1
-BUILD_TIMEOUT=400
+BUILD_TIMEOUT=196
 
 
 BOTO_MATERIALS_PATH="$FILES/images/s3-materials/cirros-0.3.1"
@@ -69,12 +73,14 @@
     local password
     local line
     local flavors
+    local available_flavors
     local flavors_ref
     local flavor_lines
     local public_network_id
     local public_router_id
     local tenant_networks_reachable
     local boto_instance_type="m1.tiny"
+    local ssh_connect_method="fixed"
 
     # TODO(afazekas):
     # sudo python setup.py deploy
@@ -138,10 +144,15 @@
     # If the ``DEFAULT_INSTANCE_TYPE`` not declared, use the new behavior
     # Tempest creates instane types for himself
     if  [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then
-        nova flavor-create m1.nano 42 64 0 1
+        available_flavors=$(nova flavor-list)
+        if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then
+            nova flavor-create m1.nano 42 64 0 1
+        fi
         flavor_ref=42
         boto_instance_type=m1.nano
-        nova flavor-create m1.micro 84 128 0 1
+        if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then
+            nova flavor-create m1.micro 84 128 0 1
+        fi
         flavor_ref_alt=84
     else
         # Check Nova for existing flavors and, if set, look for the
@@ -182,10 +193,13 @@
 
     if [ "$Q_USE_NAMESPACE" != "False" ]; then
         tenant_networks_reachable=false
+        ssh_connect_method="floating"
     else
         tenant_networks_reachable=true
     fi
 
+    ssh_connect_method=${TEMPEST_SSH_CONNECT_METHOD:-$ssh_connect_method}
+
     if is_service_enabled q-l3; then
         public_network_id=$(neutron net-list | grep $PUBLIC_NETWORK_NAME | \
             awk '{print $2}')
@@ -248,6 +262,7 @@
     iniset $TEMPEST_CONF compute flavor_ref_alt $flavor_ref_alt
     iniset $TEMPEST_CONF compute live_migration_available ${LIVE_MIGRATION_AVAILABLE:-False}
     iniset $TEMPEST_CONF compute use_block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False}
+    iniset $TEMPEST_CONF compute ssh_connect_method $ssh_connect_method
 
     # Compute admin
     iniset $TEMPEST_CONF "compute-admin" password "$password" # DEPRECATED
@@ -296,7 +311,7 @@
     iniset $TEMPEST_CONF cli cli_dir $NOVA_BIN_DIR
 
     # service_available
-    for service in nova cinder glance neutron swift heat horizon ; do
+    for service in nova cinder glance neutron swift heat horizon ceilometer ironic savanna; do
         if is_service_enabled $service ; then
             iniset $TEMPEST_CONF service_available $service "True"
         else
@@ -345,6 +360,7 @@
 # Restore xtrace
 $XTRACE
 
-# Local variables:
-# mode: shell-script
-# End:
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/template b/lib/template
index 72904fe..629e110 100644
--- a/lib/template
+++ b/lib/template
@@ -3,18 +3,19 @@
 # <do not include this template file in ``stack.sh``!>
 
 # Dependencies:
-# ``functions`` file
-# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined
-# <list other global vars that are assumed to be defined>
+#
+# - ``functions`` file
+# - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined
+# - <list other global vars that are assumed to be defined>
 
 # ``stack.sh`` calls the entry points in this order:
 #
-# install_XXXX
-# configure_XXXX
-# init_XXXX
-# start_XXXX
-# stop_XXXX
-# cleanup_XXXX
+# - install_XXXX
+# - configure_XXXX
+# - init_XXXX
+# - start_XXXX
+# - stop_XXXX
+# - cleanup_XXXX
 
 # Save trace setting
 XTRACE=$(set +o | grep xtrace)
@@ -79,6 +80,7 @@
 # Restore xtrace
 $XTRACE
 
-# Local variables:
-# mode: shell-script
-# End:
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/tls b/lib/tls
index f7dcffa..6134fa1 100644
--- a/lib/tls
+++ b/lib/tls
@@ -1,25 +1,29 @@
 # lib/tls
 # Functions to control the configuration and operation of the TLS proxy service
 
-# Dependencies:
 # !! source _before_ any services that use ``SERVICE_HOST``
-# ``functions`` file
-# ``DEST``, ``DATA_DIR`` must be defined
-# ``HOST_IP``, ``SERVICE_HOST``
-# ``KEYSTONE_TOKEN_FORMAT`` must be defined
+#
+# Dependencies:
+#
+# - ``functions`` file
+# - ``DEST``, ``DATA_DIR`` must be defined
+# - ``HOST_IP``, ``SERVICE_HOST``
+# - ``KEYSTONE_TOKEN_FORMAT`` must be defined
 
 # Entry points:
-# configure_CA
-# init_CA
+#
+# - configure_CA
+# - init_CA
 
-# configure_proxy
-# start_tls_proxy
+# - configure_proxy
+# - start_tls_proxy
 
-# make_root_ca
-# make_int_ca
-# new_cert $INT_CA_DIR int-server "abc"
-# start_tls_proxy HOST_IP 5000 localhost 5000
-
+# - make_root_ca
+# - make_int_ca
+# - new_cert $INT_CA_DIR int-server "abc"
+# - start_tls_proxy HOST_IP 5000 localhost 5000
+# - ensure_certificates
+# - is_ssl_enabled_service
 
 # Defaults
 # --------
@@ -306,6 +310,53 @@
 }
 
 
+# Certificate Input Configuration
+# ===============================
+
+# check to see if the service(s) specified are to be SSL enabled.
+#
+# Multiple services specified as arguments are ``OR``'ed together; the test
+# is a short-circuit boolean, i.e it returns on the first match.
+#
+# Uses global ``SSL_ENABLED_SERVICES``
+function is_ssl_enabled_service() {
+    services=$@
+    for service in ${services}; do
+        [[ ,${SSL_ENABLED_SERVICES}, =~ ,${service}, ]] && return 0
+    done
+    return 1
+}
+
+
+# Ensure that the certificates for a service are in place. This function does
+# not check that a service is SSL enabled, this should already have been
+# completed.
+#
+# The function expects to find a certificate, key and CA certificate in the
+# variables {service}_SSL_CERT, {service}_SSL_KEY and {service}_SSL_CA. For
+# example for keystone this would be KEYSTONE_SSL_CERT, KEYSTONE_SSL_KEY and
+# KEYSTONE_SSL_CA. If it does not find these certificates the program will
+# quit.
+function ensure_certificates() {
+    local service=$1
+
+    local cert_var="${service}_SSL_CERT"
+    local key_var="${service}_SSL_KEY"
+    local ca_var="${service}_SSL_CA"
+
+    local cert=${!cert_var}
+    local key=${!key_var}
+    local ca=${!ca_var}
+
+    if [[ !($cert && $key && $ca) ]]; then
+        die $LINENO "Missing either the ${cert_var} ${key_var} or ${ca_var}" \
+                    "variable to enable SSL for ${service}"
+    fi
+
+    cat $ca >> $SSL_BUNDLE_FILE
+}
+
+
 # Proxy Functions
 # ===============
 
@@ -321,6 +372,7 @@
 }
 
 
-# Local variables:
-# mode: shell-script
-# End:
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/trove b/lib/trove
index 0a19d03..5ba4de5 100644
--- a/lib/trove
+++ b/lib/trove
@@ -29,7 +29,6 @@
 TROVECLIENT_DIR=$DEST/python-troveclient
 TROVE_CONF_DIR=/etc/trove
 TROVE_LOCAL_CONF_DIR=$TROVE_DIR/etc/trove
-TROVE_AUTH_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT//v$IDENTITY_API_VERSION
 TROVE_AUTH_CACHE_DIR=${TROVE_AUTH_CACHE_DIR:-/var/cache/trove}
 TROVE_BIN_DIR=/usr/local/bin
 
@@ -102,6 +101,7 @@
     iniset $TROVE_API_PASTE_INI filter:tokenauth auth_host $KEYSTONE_AUTH_HOST
     iniset $TROVE_API_PASTE_INI filter:tokenauth auth_port $KEYSTONE_AUTH_PORT
     iniset $TROVE_API_PASTE_INI filter:tokenauth auth_protocol $KEYSTONE_AUTH_PROTOCOL
+    iniset $TROVE_API_PASTE_INI filter:tokenauth cafile $KEYSTONE_SSL_CA
     iniset $TROVE_API_PASTE_INI filter:tokenauth admin_tenant_name $SERVICE_TENANT_NAME
     iniset $TROVE_API_PASTE_INI filter:tokenauth admin_user trove
     iniset $TROVE_API_PASTE_INI filter:tokenauth admin_password $SERVICE_PASSWORD
@@ -123,6 +123,8 @@
 
     # (Re)create trove taskmanager conf file if needed
     if is_service_enabled tr-tmgr; then
+        TROVE_AUTH_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT//v$IDENTITY_API_VERSION
+
         iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT rabbit_password $RABBIT_PASSWORD
         iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT sql_connection `database_connection_url trove`
         iniset $TROVE_CONF_DIR/trove-taskmanager.conf DEFAULT taskmanager_manager trove.taskmanager.manager.Manager
@@ -181,6 +183,7 @@
 # Restore xtrace
 $XTRACE
 
-# Local variables:
-# mode: shell-script
-# End:
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/openrc b/openrc
index 3de7e39..784b00e 100644
--- a/openrc
+++ b/openrc
@@ -18,7 +18,7 @@
 fi
 
 # Find the other rc files
-RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd)
+RC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd)
 
 # Import common functions
 source $RC_DIR/functions
@@ -58,6 +58,7 @@
 HOST_IP=${HOST_IP:-127.0.0.1}
 SERVICE_HOST=${SERVICE_HOST:-$HOST_IP}
 SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http}
+KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-$SERVICE_PROTOCOL}
 
 # Some exercises call glance directly.  On a single-node installation, Glance
 # should be listening on HOST_IP.  If its running elsewhere, it can be set here
@@ -71,13 +72,18 @@
 # the user/tenant has access to - including nova, glance, keystone, swift, ...
 # We currently recommend using the 2.0 *identity api*.
 #
-export OS_AUTH_URL=$SERVICE_PROTOCOL://$SERVICE_HOST:5000/v${OS_IDENTITY_API_VERSION}
+export OS_AUTH_URL=$KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:5000/v${OS_IDENTITY_API_VERSION}
 
 # Set the pointer to our CA certificate chain.  Harmless if TLS is not used.
-export OS_CACERT=$INT_CA_DIR/ca-chain.pem
+export OS_CACERT=${OS_CACERT:-$INT_CA_DIR/ca-chain.pem}
 
 # Currently novaclient needs you to specify the *compute api* version.  This
 # needs to match the config of your catalog returned by Keystone.
 export NOVA_VERSION=${NOVA_VERSION:-1.1}
 # In the future this will change names:
 export COMPUTE_API_VERSION=${COMPUTE_API_VERSION:-$NOVA_VERSION}
+
+# Currently cinderclient needs you to specify the *volume api* version. This
+# needs to match the config of your catalog returned by Keystone.
+export CINDER_VERSION=${CINDER_VERSION:-2}
+export OS_VOLUME_API_VERSION=${OS_VOLUME_API_VERSION:-$CINDER_VERSION}
diff --git a/stack.sh b/stack.sh
index 3c4afd9..0f4329a 100755
--- a/stack.sh
+++ b/stack.sh
@@ -131,7 +131,7 @@
 
 # Warn users who aren't on an explicitly supported distro, but allow them to
 # override check and attempt installation with ``FORCE=yes ./stack``
-if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|saucy|7.0|wheezy|sid|testing|jessie|f16|f17|f18|f19|opensuse-12.2|rhel6) ]]; then
+if [[ ! ${DISTRO} =~ (oneiric|precise|quantal|raring|saucy|trusty|7.0|wheezy|sid|testing|jessie|f16|f17|f18|f19|opensuse-12.2|rhel6) ]]; then
     echo "WARNING: this script has not been tested on $DISTRO"
     if [[ "$FORCE" != "yes" ]]; then
         die $LINENO "If you wish to run this script anyway run with FORCE=yes"
@@ -290,6 +290,10 @@
 # Service startup timeout
 SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60}
 
+# Reset the bundle of CA certificates
+SSL_BUNDLE_FILE="$DATA_DIR/ca-bundle.pem"
+rm -f $SSL_BUNDLE_FILE
+
 
 # Configure Projects
 # ==================
@@ -299,6 +303,7 @@
 source $TOP_DIR/lib/tls
 source $TOP_DIR/lib/infra
 source $TOP_DIR/lib/oslo
+source $TOP_DIR/lib/stackforge
 source $TOP_DIR/lib/horizon
 source $TOP_DIR/lib/keystone
 source $TOP_DIR/lib/glance
@@ -629,6 +634,11 @@
 # Install oslo libraries that have graduated
 install_oslo
 
+# Install stackforge libraries for testing
+if is_service_enabled stackforge_libs; then
+    install_stackforge
+fi
+
 # Install clients libraries
 install_keystoneclient
 install_glanceclient
@@ -688,16 +698,6 @@
     configure_nova
 fi
 
-if is_service_enabled n-novnc; then
-    # a websockets/html5 or flash powered VNC console for vm instances
-    git_clone $NOVNC_REPO $NOVNC_DIR $NOVNC_BRANCH
-fi
-
-if is_service_enabled n-spice; then
-    # a websockets/html5 or flash powered SPICE console for vm instances
-    git_clone $SPICE_REPO $SPICE_DIR $SPICE_BRANCH
-fi
-
 if is_service_enabled horizon; then
     # dashboard
     install_horizon
@@ -751,6 +751,7 @@
 if [[ $TRACK_DEPENDS = True ]]; then
     $DEST/.venv/bin/pip freeze > $DEST/requires-post-pip
     if ! diff -Nru $DEST/requires-pre-pip $DEST/requires-post-pip > $DEST/requires.diff; then
+        echo "Detect some changes for installed packages of pip, in depend tracking mode"
         cat $DEST/requires.diff
     fi
     echo "Ran stack.sh in depend tracking mode, bailing out now"
@@ -802,6 +803,17 @@
 restart_rpc_backend
 
 
+# Export Certicate Authority Bundle
+# ---------------------------------
+
+# If certificates were used and written to the SSL bundle file then these
+# should be exported so clients can validate their connections.
+
+if [ -f $SSL_BUNDLE_FILE ]; then
+    export OS_CACERT=$SSL_BUNDLE_FILE
+fi
+
+
 # Configure database
 # ------------------
 
@@ -1151,6 +1163,7 @@
     start_trove
 fi
 
+
 # Create account rc files
 # =======================
 
@@ -1159,7 +1172,13 @@
 # which is helpful in image bundle steps.
 
 if is_service_enabled nova && is_service_enabled key; then
-    $TOP_DIR/tools/create_userrc.sh -PA --target-dir $TOP_DIR/accrc
+    USERRC_PARAMS="-PA --target-dir $TOP_DIR/accrc"
+
+    if [ -f $SSL_BUNDLE_FILE ]; then
+        USERRC_PARAMS="$USERRC_PARAMS --os-cacert $SSL_BUNDLE_FILE"
+    fi
+
+    $TOP_DIR/tools/create_userrc.sh $USERRC_PARAMS
 fi
 
 
@@ -1235,7 +1254,7 @@
 CURRENT_RUN_TIME=$(date "+$TIMESTAMP_FORMAT")
 echo "# $CURRENT_RUN_TIME" >$TOP_DIR/.stackenv
 for i in BASE_SQL_CONN ENABLED_SERVICES HOST_IP LOGFILE \
-    SERVICE_HOST SERVICE_PROTOCOL STACK_USER TLS_IP; do
+    SERVICE_HOST SERVICE_PROTOCOL STACK_USER TLS_IP KEYSTONE_AUTH_PROTOCOL OS_CACERT; do
     echo $i=${!i} >>$TOP_DIR/.stackenv
 done
 
diff --git a/stackrc b/stackrc
index 0151672..7eda5a5 100644
--- a/stackrc
+++ b/stackrc
@@ -1,7 +1,7 @@
 # stackrc
 #
 # Find the other rc files
-RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd)
+RC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd)
 
 # Destination path for installation
 DEST=/opt/stack
@@ -62,7 +62,7 @@
 
 # Base GIT Repo URL
 # Another option is http://review.openstack.org/p
-GIT_BASE=${GIT_BASE:-https://github.com}
+GIT_BASE=${GIT_BASE:-git://git.openstack.org}
 
 # metering service
 CEILOMETER_REPO=${CEILOMETER_REPO:-${GIT_BASE}/openstack/ceilometer.git}
@@ -182,7 +182,7 @@
 NOVNC_BRANCH=${NOVNC_BRANCH:-master}
 
 # ryu service
-RYU_REPO=${RYU_REPO:-${GIT_BASE}/osrg/ryu.git}
+RYU_REPO=${RYU_REPO:-https://github.com/osrg/ryu.git}
 RYU_BRANCH=${RYU_BRANCH:-master}
 
 # a websockets/html5 or flash powered SPICE console for vm instances
@@ -197,6 +197,16 @@
 TROVECLIENT_REPO=${TROVECLIENT_REPO:-${GIT_BASE}/openstack/python-troveclient.git}
 TROVECLIENT_BRANCH=${TROVECLIENT_BRANCH:-master}
 
+# stackforge libraries that are used by OpenStack core services
+# wsme
+WSME_REPO=${WSME_REPO:-${GIT_BASE}/stackforge/wsme.git}
+WSME_BRANCH=${WSME_BRANCH:-master}
+
+# pecan
+PECAN_REPO=${PECAN_REPO:-${GIT_BASE}/stackforge/pecan.git}
+PECAN_BRANCH=${PECAN_BRANCH:-master}
+
+
 # Nova hypervisor configuration.  We default to libvirt with **kvm** but will
 # drop back to **qemu** if we are unable to load the kvm module.  ``stack.sh`` can
 # also install an **LXC**, **OpenVZ** or **XenAPI** based system.  If xenserver-core
@@ -297,6 +307,9 @@
 # Do not install packages tagged with 'testonly' by default
 INSTALL_TESTONLY_PACKAGES=${INSTALL_TESTONLY_PACKAGES:-False}
 
+# Undo requirements changes by global requirements
+UNDO_REQUIREMENTS=${UNDO_REQUIREMENTS:-True}
+
 # Local variables:
 # mode: shell-script
 # End:
diff --git a/tools/build_docs.sh b/tools/build_docs.sh
new file mode 100755
index 0000000..c566e63
--- /dev/null
+++ b/tools/build_docs.sh
@@ -0,0 +1,145 @@
+#!/usr/bin/env bash
+
+# **build_docs.sh** - Build the gh-pages docs for DevStack
+#
+# - Install shocco if not found on PATH
+# - Clone MASTER_REPO branch MASTER_BRANCH
+# - Re-creates ``docs`` directory from existing repo + new generated script docs
+
+# Usage:
+## build_docs.sh [[-b branch] [-p] repo] | .
+## -b branch        The DevStack branch to check out (default is master; ignored if
+##                  repo is not specified)
+## -p               Push the resulting docs tree to the source repo; fatal error if
+##                  repo is not specified
+## repo             The DevStack repository to clone (default is DevStack github repo)
+##                  If a repo is not supplied use the current directory
+##                  (assumed to be a DevStack checkout) as the source.
+## .                Use the current repo and branch (do not use with -p to
+##                  prevent stray files in the workspace being added tot he docs)
+
+# Defaults
+# --------
+
+# Source repo/branch for DevStack
+MASTER_REPO=${MASTER_REPO:-https://github.com/openstack-dev/devstack.git}
+MASTER_BRANCH=${MASTER_BRANCH:-master}
+
+# http://devstack.org is a GitHub gh-pages site in the https://github.com/cloudbuilders/devtack.git repo
+GH_PAGES_REPO=git@github.com:cloudbuilders/devstack.git
+
+# Keep track of the devstack directory
+TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
+
+# Uses this shocco branch: https://github.com/dtroyer/shocco/tree/rst_support
+SHOCCO=${SHOCCO:-shocco}
+if ! which shocco; then
+    if [[ ! -x $TOP_DIR/shocco/shocco ]]; then
+        if [[ -z "$INSTALL_SHOCCO" ]]; then
+            echo "shocco not found in \$PATH, please set environment variable SHOCCO"
+            exit 1
+        fi
+        echo "Installing local copy of shocco"
+        if ! which pygmentize; then
+            sudo pip install Pygments
+        fi
+        if ! which rst2html.py; then
+            sudo pip install docutils
+        fi
+        git clone -b rst_support https://github.com/dtroyer/shocco shocco
+        cd shocco
+        ./configure
+        make
+        cd ..
+    fi
+    SHOCCO=$TOP_DIR/shocco/shocco
+fi
+
+# Process command-line args
+while getopts b:p c; do
+    case $c in
+        b)  MASTER_BRANCH=$OPTARG
+            ;;
+        p)  PUSH_REPO=1
+            ;;
+    esac
+done
+shift `expr $OPTIND - 1`
+
+# Sanity check the args
+if [[ "$1" == "." ]]; then
+    REPO=""
+    if [[ -n $PUSH_REPO ]]; then
+        echo "Push not allowed from an active workspace"
+        unset PUSH_REPO
+    fi
+else
+    if [[ -z "$1" ]]; then
+        REPO=$MASTER_REPO
+    else
+        REPO=$1
+    fi
+fi
+
+# Check out a specific DevStack branch
+if [[ -n $REPO ]]; then
+    # Make a workspace
+    TMP_ROOT=$(mktemp -d devstack-docs-XXXX)
+    echo "Building docs in $TMP_ROOT"
+    cd $TMP_ROOT
+
+    # Get the master branch
+    git clone $REPO devstack
+    cd devstack
+    git checkout $MASTER_BRANCH
+fi
+
+# Processing
+# ----------
+
+# Assumption is we are now in the DevStack repo workspace to be processed
+
+# Pull the latest docs branch from devstack.org repo
+if ! [ -d docs ]; then
+    git clone -b gh-pages $GH_PAGES_REPO docs
+fi
+
+# Build list of scripts to process
+FILES=""
+for f in $(find . -name .git -prune -o \( -type f -name \*.sh -not -path \*shocco/\* -print \)); do
+    echo $f
+    FILES+="$f "
+    mkdir -p docs/`dirname $f`;
+    $SHOCCO $f > docs/$f.html
+done
+for f in $(find functions lib samples -type f -name \*); do
+    echo $f
+    FILES+="$f "
+    mkdir -p docs/`dirname $f`;
+    $SHOCCO $f > docs/$f.html
+done
+echo "$FILES" >docs-files
+
+# Switch to the gh_pages repo
+cd docs
+
+# Collect the new generated pages
+find . -name \*.html -print0 | xargs -0 git add
+
+# Push our changes back up to the docs branch
+if ! git diff-index HEAD --quiet; then
+    git commit -a -m "Update script docs"
+    if [[ -n $PUSH ]]; then
+        git push
+    fi
+fi
+
+# Clean up or report the temp workspace
+if [[ -n REPO && -n $PUSH_REPO ]]; then
+    rm -rf $TMP_ROOT
+else
+    if [[ -z "$TMP_ROOT" ]]; then
+        TMP_ROOT="$(pwd)"
+    fi
+    echo "Built docs in $TMP_ROOT"
+fi
diff --git a/tools/build_ramdisk.sh b/tools/build_ramdisk.sh
index 2c45568..7372555 100755
--- a/tools/build_ramdisk.sh
+++ b/tools/build_ramdisk.sh
@@ -22,7 +22,7 @@
         umount $MNTDIR
         rmdir $MNTDIR
     fi
-    if [ -n "$DEV_FILE_TMP" -a -e "$DEV_FILE_TMP "]; then
+    if [ -n "$DEV_FILE_TMP" -a -e "$DEV_FILE_TMP" ]; then
         rm -f $DEV_FILE_TMP
     fi
     if [ -n "$IMG_FILE_TMP" -a -e "$IMG_FILE_TMP" ]; then
@@ -84,11 +84,10 @@
     $TOOLS_DIR/get_uec_image.sh $DIST_NAME $CACHEDIR/$DIST_NAME-base.img
 fi
 
-# Finds the next available NBD device
-# Exits script if error connecting or none free
+# Finds and returns full device path for the next available NBD device.
+# Exits script if error connecting or none free.
 # map_nbd image
-# Returns full nbd device path
-function map_nbd {
+function map_nbd() {
     for i in `seq 0 15`; do
         if [ ! -e /sys/block/nbd$i/pid ]; then
             NBD=/dev/nbd$i
@@ -156,7 +155,7 @@
 
     # Pre-create the image file
     # FIXME(dt): This should really get the partition size to
-    #            pre-create the image file
+    # pre-create the image file
     dd if=/dev/zero of=$IMG_FILE_TMP bs=1 count=1 seek=$((2*1024*1024*1024))
     # Create filesystem image for RAM disk
     dd if=${NBD}p1 of=$IMG_FILE_TMP bs=1M
diff --git a/tools/build_tempest.sh b/tools/build_tempest.sh
index 1758e7d..6c527f5 100755
--- a/tools/build_tempest.sh
+++ b/tools/build_tempest.sh
@@ -2,7 +2,7 @@
 #
 # **build_tempest.sh**
 
-# Checkout and prepare a Tempest repo: https://github.com/openstack/tempest.git
+# Checkout and prepare a Tempest repo: git://git.openstack.org/openstack/tempest.git
 
 function usage {
     echo "$0 - Check out and prepare a Tempest repo"
diff --git a/tools/create-stack-user.sh b/tools/create-stack-user.sh
index 2251d1e..50f6592 100755
--- a/tools/create-stack-user.sh
+++ b/tools/create-stack-user.sh
@@ -5,7 +5,9 @@
 # Create a user account suitable for running DevStack
 # - create a group named $STACK_USER if it does not exist
 # - create a user named $STACK_USER if it does not exist
+#
 #   - home is $DEST
+#
 # - configure sudo for $STACK_USER
 
 # ``stack.sh`` was never intended to run as root.  It had a hack to do what is
diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh
index 8383fe7..5f4c486 100755
--- a/tools/create_userrc.sh
+++ b/tools/create_userrc.sh
@@ -43,6 +43,7 @@
 --os-tenant-name <tenant_name>
 --os-tenant-id <tenant_id>
 --os-auth-url <auth_url>
+--os-cacert <cert file>
 --target-dir <target_directory>
 --skip-tenant <tenant-name>
 --debug
@@ -53,7 +54,7 @@
 EOF
 }
 
-if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-name:,os-tenant-id:,os-auth-url:,target-dir:,skip-tenant:,help,debug -- "$@")
+if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-name:,os-tenant-id:,os-auth-url:,target-dir:,skip-tenant:,os-cacert:,help,debug -- "$@")
 then
     #parse error
     display_help
@@ -80,6 +81,7 @@
     --os-tenant-id) export OS_TENANT_ID=$2; shift ;;
     --skip-tenant) SKIP_TENANT="$SKIP_TENANT$2,"; shift ;;
     --os-auth-url) export OS_AUTH_URL=$2; shift ;;
+    --os-cacert) export OS_CACERT=$2; shift ;;
     --target-dir) ACCOUNT_DIR=$2; shift ;;
     --debug) set -o xtrace ;;
     -u) MODE=${MODE:-one};  USER_NAME=$2; shift ;;
@@ -201,6 +203,7 @@
 # Openstack Tenant ID = $tenant_id
 export OS_TENANT_NAME="$tenant_name"
 export OS_AUTH_URL="$OS_AUTH_URL"
+export OS_CACERT="$OS_CACERT"
 export EC2_CERT="$ec2_cert"
 export EC2_PRIVATE_KEY="$ec2_private_key"
 export EC2_USER_ID=42 #not checked by nova (can be a 12-digit id)
diff --git a/tools/docker/install_docker.sh b/tools/docker/install_docker.sh
index 483955b..2e5b510 100755
--- a/tools/docker/install_docker.sh
+++ b/tools/docker/install_docker.sh
@@ -38,7 +38,7 @@
 install_package python-software-properties && \
     sudo sh -c "echo deb $DOCKER_APT_REPO docker main > /etc/apt/sources.list.d/docker.list"
 apt_get update
-install_package --force-yes lxc-docker-${DOCKER_PACKAGE_VERSION} socat
+install_package --force-yes lxc-docker socat
 
 # Start the daemon - restart just in case the package ever auto-starts...
 restart_service docker
diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh
index 9e65b7c..5fb47dc 100755
--- a/tools/fixup_stuff.sh
+++ b/tools/fixup_stuff.sh
@@ -5,11 +5,15 @@
 # fixup_stuff.sh
 #
 # All distro and package specific hacks go in here
+#
 # - prettytable 0.7.2 permissions are 600 in the package and
 #   pip 1.4 doesn't fix it (1.3 did)
+#
 # - httplib2 0.8 permissions are 600 in the package and
 #   pip 1.4 doesn't fix it (1.3 did)
+#
 # - RHEL6:
+#
 #   - set selinux not enforcing
 #   - (re)start messagebus daemon
 #   - remove distro packages python-crypto and python-lxml
@@ -47,7 +51,7 @@
 
 # Fix prettytable 0.7.2 permissions
 # Don't specify --upgrade so we use the existing package if present
-pip_install prettytable
+pip_install 'prettytable>0.7'
 PACKAGE_DIR=$(get_package_path prettytable)
 # Only fix version 0.7.2
 dir=$(echo $PACKAGE_DIR/prettytable-0.7.2*)
@@ -72,8 +76,7 @@
 if [[ $DISTRO =~ (rhel6) ]]; then
 
     # Disable selinux to avoid configuring to allow Apache access
-    # to Horizon files or run nodejs (LP#1175444)
-    # FIXME(dtroyer): see if this can be skipped without node or if Horizon is not enabled
+    # to Horizon files (LP#1175444)
     if selinuxenabled; then
         sudo setenforce 0
     fi
@@ -90,7 +93,7 @@
         # fresh system via Anaconda and the dependency chain
         # ``cas`` -> ``python-paramiko`` -> ``python-crypto``.
         # ``pip uninstall pycrypto`` will remove the packaged ``.egg-info``
-        #  file but leave most of the actual library files behind in
+        # file but leave most of the actual library files behind in
         # ``/usr/lib64/python2.6/Crypto``. Later ``pip install pycrypto``
         # will install over the packaged files resulting
         # in a useless mess of old, rpm-packaged files and pip-installed files.
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index 455323e..6b9b25e 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -26,6 +26,7 @@
 
 # Handle arguments
 
+USE_GET_PIP=${USE_GET_PIP:-0}
 INSTALL_PIP_VERSION=${INSTALL_PIP_VERSION:-"1.4.1"}
 while [[ -n "$1" ]]; do
     case $1 in
@@ -63,7 +64,7 @@
 function install_get_pip() {
     if [[ ! -r $FILES/get-pip.py ]]; then
         (cd $FILES; \
-            curl $PIP_GET_PIP_URL; \
+            curl -O $PIP_GET_PIP_URL; \
         )
     fi
     sudo python $FILES/get-pip.py
diff --git a/tools/xen/functions b/tools/xen/functions
index b0b077d..563303d 100644
--- a/tools/xen/functions
+++ b/tools/xen/functions
@@ -137,14 +137,14 @@
     local name_label
     name_label=$1
 
-    ! [ -z $(xe network-list name-label="$name_label" --minimal) ]
+    ! [ -z "$(xe network-list name-label="$name_label" --minimal)" ]
 }
 
 function _bridge_exists() {
     local bridge
     bridge=$1
 
-    ! [ -z $(xe network-list bridge="$bridge" --minimal) ]
+    ! [ -z "$(xe network-list bridge="$bridge" --minimal)" ]
 }
 
 function _network_uuid() {
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
index 9a2f5a8..6ce334b 100755
--- a/tools/xen/install_os_domU.sh
+++ b/tools/xen/install_os_domU.sh
@@ -111,12 +111,15 @@
 fi
 
 if parameter_is_specified "FLAT_NETWORK_BRIDGE"; then
-    cat >&2 << EOF
-ERROR: FLAT_NETWORK_BRIDGE is specified in localrc file
-This is considered as an error, as its value will be derived from the
-VM_BRIDGE_OR_NET_NAME variable's value.
+    if [ "$(bridge_for "$VM_BRIDGE_OR_NET_NAME")" != "$(bridge_for "$FLAT_NETWORK_BRIDGE")" ]; then
+        cat >&2 << EOF
+ERROR: FLAT_NETWORK_BRIDGE is specified in localrc file, and either no network
+found on XenServer by searching for networks by that value as name-label or
+bridge name or the network found does not match the network specified by
+VM_BRIDGE_OR_NET_NAME. Please check your localrc file.
 EOF
-    exit 1
+        exit 1
+    fi
 fi
 
 if ! xenapi_is_listening_on "$MGT_BRIDGE_OR_NET_NAME"; then
@@ -271,6 +274,12 @@
 # Max out VCPU count for better performance
 max_vcpus "$GUEST_NAME"
 
+# Wipe out all network cards
+destroy_all_vifs_of "$GUEST_NAME"
+
+# Add only one interface to prepare the guest template
+add_interface "$GUEST_NAME" "$MGT_BRIDGE_OR_NET_NAME" "0"
+
 # start the VM to run the prepare steps
 xe vm-start vm="$GUEST_NAME"
 
@@ -304,7 +313,7 @@
         "xen_integration_bridge=${XEN_INTEGRATION_BRIDGE}"
 fi
 
-FLAT_NETWORK_BRIDGE=$(bridge_for "$VM_BRIDGE_OR_NET_NAME")
+FLAT_NETWORK_BRIDGE="${FLAT_NETWORK_BRIDGE:-$(bridge_for "$VM_BRIDGE_OR_NET_NAME")}"
 append_kernel_cmdline "$GUEST_NAME" "flat_network_bridge=${FLAT_NETWORK_BRIDGE}"
 
 # Add a separate xvdb, if it was requested
diff --git a/tools/xen/test_functions.sh b/tools/xen/test_functions.sh
index 5347238..0ae2cb7 100755
--- a/tools/xen/test_functions.sh
+++ b/tools/xen/test_functions.sh
@@ -111,8 +111,8 @@
 
 function test_zip_snapshot_location {
     diff \
-    <(zip_snapshot_location "https://github.com/openstack/nova.git" "master") \
-    <(echo "https://github.com/openstack/nova/zipball/master")
+    <(zip_snapshot_location "git://git.openstack.org/openstack/nova.git" "master") \
+    <(echo "git://git.openstack.org/openstack/nova/zipball/master")
 }
 
 function test_create_directory_for_kernels {