Merge "Set virt_use_execmem boolean if SELinux is enabled."
diff --git a/AUTHORS b/AUTHORS
index 4f771ce..22d5f32 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -1,6 +1,7 @@
 Aaron Lee <aaron.lee@rackspace.com>
 Aaron Rosen <arosen@nicira.com>
 Adam Gandelman <adamg@canonical.com>
+Andrew Laski <andrew.laski@rackspace.com>
 Andy Smith <github@anarkystic.com>
 Anthony Young <sleepsonthefloor@gmail.com>
 Armando Migliaccio <armando.migliaccio@eu.citrix.com>
diff --git a/HACKING.rst b/HACKING.rst
index 7262cff..e8f90c7 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -53,9 +53,23 @@
     source $TOP_DIR/openrc
 
 ``stack.sh`` is a rather large monolithic script that flows through from beginning
-to end.  There is a proposal to segment it to put the OpenStack projects
-into their own sub-scripts to better document the projects as a unit rather than
-have it scattered throughout ``stack.sh``.  Someday.
+to end.  The process of breaking it down into project-level sub-scripts has begun
+with the introduction of ``lib/cinder`` and ``lib/ceilometer``.
+
+These library sub-scripts have a number of fixed entry points, some of which may
+just be stubs.  These entry points will be called by ``stack.sh`` in the
+following order::
+
+    install_XXXX
+    configure_XXXX
+    init_XXXX
+    start_XXXX
+    stop_XXXX
+    cleanup_XXXX
+
+There is a sub-script template in ``lib/templates`` to be used in creating new
+service sub-scripts.  The comments in ``<>`` are meta comments describing
+how to use the template and should be removed.
 
 
 Documentation
diff --git a/README.md b/README.md
index fd66e96..872b16b 100644
--- a/README.md
+++ b/README.md
@@ -57,6 +57,22 @@
 
 You can override environment variables used in `stack.sh` by creating file name `localrc`.  It is likely that you will need to do this to tweak your networking configuration should you need to access your cloud from a different host.
 
+# RPC Backend
+
+Multiple RPC backends are available. Currently, this
+includes RabbitMQ (default), Qpid, and ZeroMQ. Your backend of
+choice may be selected via the `localrc`.
+
+Note that selecting more than one RPC backend will result in a failure.
+
+Example (ZeroMQ):
+
+    ENABLED_SERVICES="$ENABLED_SERVICES,-rabbit,-qpid,zeromq"
+
+Example (Qpid):
+
+    ENABLED_SERVICES="$ENABLED_SERVICES,-rabbit,-zeromq,qpid"
+
 # Swift
 
 Swift is not installed by default, you can enable easily by adding this to your `localrc`:
diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh
index 8a4f9c1..adc3393 100755
--- a/exercises/aggregates.sh
+++ b/exercises/aggregates.sh
@@ -125,16 +125,16 @@
 if [ "$VIRT_DRIVER" == "xenserver" ]; then
     echo "TODO(johngarbutt) add tests for add/remove host from pool aggregate"
 fi
-HOST=`nova host-list | grep compute | get_field 1`
+FIRST_HOST=`nova host-list | grep compute | get_field 1 | head -1`
 # Make sure can add two aggregates to same host
-nova aggregate-add-host $AGGREGATE_ID $HOST
-nova aggregate-add-host $AGGREGATE2_ID $HOST
-if nova aggregate-add-host $AGGREGATE2_ID $HOST; then
+nova aggregate-add-host $AGGREGATE_ID $FIRST_HOST
+nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST
+if nova aggregate-add-host $AGGREGATE2_ID $FIRST_HOST; then
     echo "ERROR could add duplicate host to single aggregate"
     exit -1
 fi
-nova aggregate-remove-host $AGGREGATE2_ID $HOST
-nova aggregate-remove-host $AGGREGATE_ID $HOST
+nova aggregate-remove-host $AGGREGATE2_ID $FIRST_HOST
+nova aggregate-remove-host $AGGREGATE_ID $FIRST_HOST
 
 # Test aggregate-delete
 # =====================
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
index 7fe81ba..c967e39 100755
--- a/exercises/boot_from_volume.sh
+++ b/exercises/boot_from_volume.sh
@@ -49,6 +49,10 @@
 # Default user
 DEFAULT_INSTANCE_USER=${DEFAULT_INSTANCE_USER:-cirros}
 
+# Security group name
+SECGROUP=${SECGROUP:-boot_secgroup}
+
+
 # Launching servers
 # =================
 
@@ -72,7 +76,6 @@
 fi
 
 # Configure Security Groups
-SECGROUP=${SECGROUP:-test_secgroup}
 nova secgroup-delete $SECGROUP || true
 nova secgroup-create $SECGROUP "$SECGROUP description"
 nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0
@@ -246,8 +249,8 @@
     die "Failure deleting instance $INSTANCE_NAME"
 
 # Wait for termination
-if ! timeout $ACTIVE_TIMEOUT sh -c "while nova show $INSTANCE_NAME; do sleep 1; done"; then
-    echo "server didn't terminate!"
+if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then
+    echo "Server $NAME not deleted"
     exit 1
 fi
 
@@ -256,8 +259,7 @@
     die "Failure deleting floating IP $FLOATING_IP"
 
 # Delete a secgroup
-nova secgroup-delete $SECGROUP || \
-    die "Failure deleting security group $SECGROUP"
+nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP"
 
 set +o xtrace
 echo "*********************************************************************"
diff --git a/exercises/euca.sh b/exercises/euca.sh
index 9f7aed1..fb052dd 100755
--- a/exercises/euca.sh
+++ b/exercises/euca.sh
@@ -43,6 +43,9 @@
 # Boot this image, use first AMI-format image if unset
 DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami}
 
+# Security group name
+SECGROUP=${SECGROUP:-euca_secgroup}
+
 
 # Launching a server
 # ==================
@@ -50,9 +53,6 @@
 # Find a machine image to boot
 IMAGE=`euca-describe-images | grep machine | grep ${DEFAULT_IMAGE_NAME} | cut -f2 | head -n1`
 
-# Define secgroup
-SECGROUP=euca_secgroup
-
 # Add a secgroup
 if ! euca-describe-groups | grep -q $SECGROUP; then
     euca-add-group -d "$SECGROUP description" $SECGROUP
@@ -119,14 +119,13 @@
     die "Failure terminating instance $INSTANCE"
 
 # Assure it has terminated within a reasonable time
-if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -q running; do sleep 1; done"; then
+if ! timeout $TERMINATE_TIMEOUT sh -c "while euca-describe-instances $INSTANCE | grep -q $INSTANCE; do sleep 1; done"; then
     echo "server didn't terminate within $TERMINATE_TIMEOUT seconds"
     exit 1
 fi
 
 # Delete group
-euca-delete-group $SECGROUP || \
-    die "Failure deleting security group $SECGROUP"
+euca-delete-group $SECGROUP || die "Failure deleting security group $SECGROUP"
 
 set +o xtrace
 echo "*********************************************************************"
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index 02259c0..77f020e 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -200,12 +200,12 @@
 # Delete second floating IP
 nova floating-ip-delete $TEST_FLOATING_IP || die "Failure deleting floating IP $TEST_FLOATING_IP"
 
-# shutdown the server
+# Shutdown the server
 nova delete $VM_UUID || die "Failure deleting instance $NAME"
 
-# make sure the VM shuts down within a reasonable time
-if ! timeout $TERMINATE_TIMEOUT sh -c "while nova show $VM_UUID | grep status | grep -q ACTIVE; do sleep 1; done"; then
-    echo "server didn't shut down!"
+# Wait for termination
+if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then
+    echo "Server $NAME not deleted"
     exit 1
 fi
 
diff --git a/exercises/quantum-adv-test.sh b/exercises/quantum-adv-test.sh
index cff29d2..8f15b63 100755
--- a/exercises/quantum-adv-test.sh
+++ b/exercises/quantum-adv-test.sh
@@ -76,14 +76,14 @@
 DEMO2_NUM_NET=2
 
 PUBLIC_NET1_CIDR="200.0.0.0/24"
-DEMO1_NET1_CIDR="190.0.0.0/24"
-DEMO2_NET1_CIDR="191.0.0.0/24"
-DEMO2_NET2_CIDR="191.0.1.0/24"
+DEMO1_NET1_CIDR="10.1.0.0/24"
+DEMO2_NET1_CIDR="10.2.0.0/24"
+DEMO2_NET2_CIDR="10.2.1.0/24"
 
 PUBLIC_NET1_GATEWAY="200.0.0.1"
-DEMO1_NET1_GATEWAY="190.0.0.1"
-DEMO2_NET1_GATEWAY="191.0.0.1"
-DEMO2_NET2_GATEWAY="191.0.1.1"
+DEMO1_NET1_GATEWAY="10.1.0.1"
+DEMO2_NET1_GATEWAY="10.2.0.1"
+DEMO2_NET2_GATEWAY="10.2.1.1"
 
 PUBLIC_NUM_VM=1
 DEMO1_NUM_VM=1
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index 0f25355..5db10d3 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -43,6 +43,9 @@
 # Boot this image, use first AMi image if unset
 DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ami}
 
+# Security group name
+SECGROUP=${SECGROUP:-vol_secgroup}
+
 
 # Launching a server
 # ==================
@@ -62,6 +65,25 @@
 # Grab the id of the image to launch
 IMAGE=$(glance image-list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
 
+# Security Groups
+# ---------------
+
+# List of secgroups:
+nova secgroup-list
+
+# Create a secgroup
+if ! nova secgroup-list | grep -q $SECGROUP; then
+    nova secgroup-create $SECGROUP "$SECGROUP description"
+    if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then
+        echo "Security group not created"
+        exit 1
+    fi
+fi
+
+# Configure Security Group Rules
+nova secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0
+nova secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0
+
 # determinine instance type
 # -------------------------
 
@@ -171,8 +193,17 @@
     exit 1
 fi
 
-# shutdown the server
-nova delete $NAME || die "Failure deleting instance $NAME"
+# Shutdown the server
+nova delete $VM_UUID || die "Failure deleting instance $NAME"
+
+# Wait for termination
+if ! timeout $TERMINATE_TIMEOUT sh -c "while nova list | grep -q $VM_UUID; do sleep 1; done"; then
+    echo "Server $NAME not deleted"
+    exit 1
+fi
+
+# Delete a secgroup
+nova secgroup-delete $SECGROUP || die "Failure deleting security group $SECGROUP"
 
 set +o xtrace
 echo "*********************************************************************"
diff --git a/files/default_catalog.templates b/files/default_catalog.templates
index ceb6458..990cc0e 100644
--- a/files/default_catalog.templates
+++ b/files/default_catalog.templates
@@ -35,7 +35,7 @@
 catalog.RegionOne.image.internalURL = http://%SERVICE_HOST%:9292
 catalog.RegionOne.image.name = Image Service
 
-catalog.RegionOne.heat.publicURL = http://%SERVICE_HOST%:8000/v1
-catalog.RegionOne.heat.adminURL = http://%SERVICE_HOST%:8000/v1
-catalog.RegionOne.heat.internalURL = http://%SERVICE_HOST%:8000/v1
-catalog.RegionOne.heat.name = Heat Service
+catalog.RegionOne.orchestration.publicURL = http://%SERVICE_HOST%:8000/v1
+catalog.RegionOne.orchestration.adminURL = http://%SERVICE_HOST%:8000/v1
+catalog.RegionOne.orchestration.internalURL = http://%SERVICE_HOST%:8000/v1
+catalog.RegionOne.orchestration.name = Heat Service
diff --git a/functions b/functions
index 386af09..c109eae 100644
--- a/functions
+++ b/functions
@@ -1,7 +1,16 @@
-# -*- mode: Shell-script -*-
 # functions - Common functions used by DevStack components
 #
-# ENABLED_SERVICES is used by is_service_enabled()
+# The following variables are assumed to be defined by certain functions:
+# ``DISTRO``
+# ``ENABLED_SERVICES``
+# ``EROR_ON_CLONE``
+# ``FILES``
+# ``GLANCE_HOSTPORT``
+# ``OFFLINE``
+# ``PIP_DOWNLOAD_CACHE``
+# ``RECLONE``
+# ``TRACK_DEPENDS``
+# ``http_proxy``, ``https_proxy``, ``no_proxy``
 
 
 # Save trace setting
@@ -9,9 +18,9 @@
 set +o xtrace
 
 
-# Exit 0 if address is in network or 1 if
-# address is not in network or netaddr library
-# is not installed.
+# Exit 0 if address is in network or 1 if address is not in
+# network or netaddr library is not installed.
+# address_in_net ip-address ip-range
 function address_in_net() {
     python -c "
 import netaddr
@@ -21,7 +30,8 @@
 }
 
 
-# apt-get wrapper to set arguments correctly
+# Wrapper for ``apt-get`` to set cache and proxy environment variables
+# Uses globals ``OFFLINE``, ``*_proxy`
 # apt_get operation package [package ...]
 function apt_get() {
     [[ "$OFFLINE" = "True" || -z "$@" ]] && return
@@ -88,15 +98,16 @@
 
 
 # get_packages() collects a list of package names of any type from the
-# prerequisite files in ``files/{apts|pips}``.  The list is intended
-# to be passed to a package installer such as apt or pip.
+# prerequisite files in ``files/{apts|rpms}``.  The list is intended
+# to be passed to a package installer such as apt or yum.
 #
-# Only packages required for the services in ENABLED_SERVICES will be
+# Only packages required for the services in ``ENABLED_SERVICES`` will be
 # included.  Two bits of metadata are recognized in the prerequisite files:
 # - ``# NOPRIME`` defers installation to be performed later in stack.sh
 # - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection
 #   of the package to the distros listed.  The distro names are case insensitive.
 #
+# Uses globals ``DISTRO``, ``ENABLED_SERVICES``
 # get_packages dir
 function get_packages() {
     local package_dir=$1
@@ -241,6 +252,7 @@
 }
 
 # git update using reference as a branch.
+# git_update_branch ref
 function git_update_branch() {
 
     GIT_BRANCH=$1
@@ -254,6 +266,7 @@
 
 # git update using reference as a tag. Be careful editing source at that repo
 # as working copy will be in a detached mode
+# git_update_tag ref
 function git_update_tag() {
 
     GIT_TAG=$1
@@ -265,6 +278,16 @@
 }
 
 
+# git update using reference as a branch.
+# git_update_remote_branch ref
+function git_update_remote_branch() {
+
+    GIT_BRANCH=$1
+
+    git checkout -b $GIT_BRANCH -t origin/$GIT_BRANCH
+}
+
+
 # Translate the OS version values into common nomenclature
 # Sets ``DISTRO`` from the ``os_*`` values
 function GetDistro() {
@@ -289,6 +312,7 @@
 # Set global RECLONE=yes to simulate a clone when dest-dir exists
 # Set global ERROR_ON_CLONE=True to abort execution with an error if the git repo
 # does not exist (default is False, meaning the repo will be cloned).
+# Uses global ``OFFLINE``
 # git_clone remote dest-dir branch
 function git_clone {
     [[ "$OFFLINE" = "True" ]] && return
@@ -329,6 +353,8 @@
                 git_update_tag $GIT_REF
             elif [[ -n "`git show-ref refs/heads/$GIT_REF`" ]]; then
                 git_update_branch $GIT_REF
+            elif [[ -n "`git show-ref refs/remotes/origin/$GIT_REF`" ]]; then
+                git_update_remote_branch $GIT_REF
             else
                 echo $GIT_REF is neither branch nor tag
                 exit 1
@@ -394,16 +420,20 @@
 
 
 # is_service_enabled() checks if the service(s) specified as arguments are
-# enabled by the user in **ENABLED_SERVICES**.
+# enabled by the user in ``ENABLED_SERVICES``.
 #
-# If there are multiple services specified as arguments the test performs a
-# boolean OR or if any of the services specified on the command line
-# return true.
+# Multiple services specified as arguments are ``OR``'ed together; the test
+# is a short-circuit boolean, i.e it returns on the first match.
 #
-# There is a special cases for some 'catch-all' services::
+# There are special cases for some 'catch-all' services::
 #   **nova** returns true if any service enabled start with **n-**
+#   **cinder** returns true if any service enabled start with **c-**
+#   **ceilometer** returns true if any service enabled start with **ceilometer**
 #   **glance** returns true if any service enabled start with **g-**
 #   **quantum** returns true if any service enabled start with **q-**
+#
+# Uses global ``ENABLED_SERVICES``
+# is_service_enabled service [service ...]
 function is_service_enabled() {
     services=$@
     for service in ${services}; do
@@ -417,7 +447,9 @@
     return 1
 }
 
-# remove extra commas from the input string (ENABLED_SERVICES)
+
+# remove extra commas from the input string (i.e. ``ENABLED_SERVICES``)
+# _cleanup_service_list service-list
 function _cleanup_service_list () {
     echo "$1" | sed -e '
         s/,,/,/g;
@@ -426,15 +458,17 @@
     '
 }
 
+
 # enable_service() adds the services passed as argument to the
-# **ENABLED_SERVICES** list, if they are not already present.
+# ``ENABLED_SERVICES`` list, if they are not already present.
 #
 # For example:
-#
 #   enable_service n-vol
 #
 # This function does not know about the special cases
 # for nova, glance, and quantum built into is_service_enabled().
+# Uses global ``ENABLED_SERVICES``
+# enable_service service [service ...]
 function enable_service() {
     local tmpsvcs="${ENABLED_SERVICES}"
     for service in $@; do
@@ -446,15 +480,17 @@
     disable_negated_services
 }
 
+
 # disable_service() removes the services passed as argument to the
-# **ENABLED_SERVICES** list, if they are present.
+# ``ENABLED_SERVICES`` list, if they are present.
 #
 # For example:
-#
 #   disable_service n-vol
 #
 # This function does not know about the special cases
 # for nova, glance, and quantum built into is_service_enabled().
+# Uses global ``ENABLED_SERVICES``
+# disable_service service [service ...]
 function disable_service() {
     local tmpsvcs=",${ENABLED_SERVICES},"
     local service
@@ -466,17 +502,22 @@
     ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs")
 }
 
+
 # disable_all_services() removes all current services
-# from **ENABLED_SERVICES** to reset the configuration
+# from ``ENABLED_SERVICES`` to reset the configuration
 # before a minimal installation
+# Uses global ``ENABLED_SERVICES``
+# disable_all_services
 function disable_all_services() {
     ENABLED_SERVICES=""
 }
 
-# We are looking for services with a - at the beginning to force
-# excluding those services. For example if you want to install all the default
-# services but not nova-volume (n-vol) you can have this set in your localrc :
+
+# Remove all services starting with '-'.  For example, to install all default
+# services except nova-volume (n-vol) set in ``localrc``:
 # ENABLED_SERVICES+=",-n-vol"
+# Uses global ``ENABLED_SERVICES``
+# disable_negated_services
 function disable_negated_services() {
     local tmpsvcs="${ENABLED_SERVICES}"
     local service
@@ -488,6 +529,7 @@
     ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs")
 }
 
+
 # Distro-agnostic package installer
 # install_package package [package ...]
 function install_package() {
@@ -513,7 +555,8 @@
 }
 
 
-# pip install wrapper to set cache and proxy environment variables
+# Wrapper for ``pip install`` to set cache and proxy environment variables
+# Uses globals ``OFFLINE``, ``PIP_DOWNLOAD_CACHE``, ``TRACK_DEPENDES``, ``*_proxy`
 # pip_install package [package ...]
 function pip_install {
     [[ "$OFFLINE" = "True" || -z "$@" ]] && return
@@ -554,8 +597,55 @@
 }
 
 
-# pip install the dependencies of the package before we do the setup.py
-# develop, so that pip and not distutils process the dependency chain
+# Helper to launch a service in a named screen
+# screen_it service "command-line"
+function screen_it {
+    NL=`echo -ne '\015'`
+    SCREEN_NAME=${SCREEN_NAME:-stack}
+    if is_service_enabled $1; then
+        # Append the service to the screen rc file
+        screen_rc "$1" "$2"
+
+        screen -S $SCREEN_NAME -X screen -t $1
+        # sleep to allow bash to be ready to be send the command - we are
+        # creating a new window in screen and then sends characters, so if
+        # bash isn't running by the time we send the command, nothing happens
+        sleep 1.5
+
+        if [[ -n ${SCREEN_LOGDIR} ]]; then
+            screen -S $SCREEN_NAME -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log
+            screen -S $SCREEN_NAME -p $1 -X log on
+            ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log
+        fi
+        screen -S $SCREEN_NAME -p $1 -X stuff "$2$NL"
+    fi
+}
+
+
+# Screen rc file builder
+# screen_rc service "command-line"
+function screen_rc {
+    SCREEN_NAME=${SCREEN_NAME:-stack}
+    SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc
+    if [[ ! -e $SCREENRC ]]; then
+        # Name the screen session
+        echo "sessionname $SCREEN_NAME" > $SCREENRC
+        # Set a reasonable statusbar
+        echo "hardstatus alwayslastline '$SCREEN_HARDSTATUS'" >> $SCREENRC
+        echo "screen -t shell bash" >> $SCREENRC
+    fi
+    # If this service doesn't already exist in the screenrc file
+    if ! grep $1 $SCREENRC 2>&1 > /dev/null; then
+        NL=`echo -ne '\015'`
+        echo "screen -t $1 bash" >> $SCREENRC
+        echo "stuff \"$2$NL\"" >> $SCREENRC
+    fi
+}
+
+
+# ``pip install`` the dependencies of the package before ``setup.py develop``
+# so pip and not distutils processes the dependency chain
+# Uses globals ``TRACK_DEPENDES``, ``*_proxy`
 # setup_develop directory
 function setup_develop() {
     if [[ $TRACK_DEPENDS = True ]] ; then
@@ -606,7 +696,9 @@
 
 
 # Normalize config values to True or False
-# VAR=`trueorfalse default-value test-value`
+# Accepts as False: 0 no false False FALSE
+# Accepts as True: 1 yes true True TRUE
+# VAR=$(trueorfalse default-value test-value)
 function trueorfalse() {
     local default=$1
     local testval=$2
@@ -620,8 +712,8 @@
 
 # Retrieve an image from a URL and upload into Glance
 # Uses the following variables:
-#   **FILES** must be set to the cache dir
-#   **GLANCE_HOSTPORT**
+#   ``FILES`` must be set to the cache dir
+#   ``GLANCE_HOSTPORT``
 # upload_image image-url glance-token
 function upload_image() {
     local image_url=$1
@@ -717,7 +809,8 @@
 }
 
 
-# yum wrapper to set arguments correctly
+# Wrapper for ``yum`` to set proxy environment variables
+# Uses globals ``OFFLINE``, ``*_proxy`
 # yum_install package [package ...]
 function yum_install() {
     [[ "$OFFLINE" = "True" ]] && return
@@ -731,3 +824,8 @@
 
 # Restore xtrace
 $XTRACE
+
+
+# Local variables:
+# -*- mode: Shell-script -*-
+# End:
diff --git a/lib/cinder b/lib/cinder
index 1bad5c0..5f0b255 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -115,6 +115,8 @@
 
     if is_service_enabled qpid ; then
         iniset $CINDER_CONF DEFAULT rpc_backend cinder.openstack.common.rpc.impl_qpid
+    elif is_service_enabled zeromq; then
+        iniset $CINDER_CONF DEFAULT rpc_backend nova.openstack.common.rpc.impl_zmq
     elif [ -n "$RABBIT_HOST" ] &&  [ -n "$RABBIT_PASSWORD" ]; then
         iniset $CINDER_CONF DEFAULT rabbit_host $RABBIT_HOST
         iniset $CINDER_CONF DEFAULT rabbit_password $RABBIT_PASSWORD
diff --git a/lib/n-vol b/lib/n-vol
new file mode 100644
index 0000000..30be0cd
--- /dev/null
+++ b/lib/n-vol
@@ -0,0 +1,118 @@
+# lib/n-vol
+# Install and start Nova volume service
+
+# Dependencies:
+# - functions
+# - KEYSTONE_AUTH_* must be defined
+# SERVICE_{TENANT_NAME|PASSWORD} must be defined
+
+# stack.sh
+# ---------
+# install_nvol
+# configure_nvol
+# init_nvol
+# start_nvol
+# stop_nvol
+# cleanup_nvol
+
+# Print the commands being run so that we can see the command that triggers
+# an error.  It is also useful for following along as the install occurs.
+set -o xtrace
+
+
+# Defaults
+# --------
+
+# Name of the LVM volume group to use/create for iscsi volumes
+VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
+VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
+
+
+# cleanup_nvol() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_nvol() {
+    # kill instances (nova)
+    # delete image files (glance)
+    # This function intentionally left blank
+    :
+}
+
+# configure_nvol() - Set config files, create data dirs, etc
+function configure_nvol() {
+    # sudo python setup.py deploy
+    # iniset $XXX_CONF ...
+    # This function intentionally left blank
+    :
+}
+
+# init_nvol() - Initialize databases, etc.
+function init_nvol() {
+    # Configure a default volume group called '`stack-volumes`' for the volume
+    # service if it does not yet exist.  If you don't wish to use a file backed
+    # volume group, create your own volume group called ``stack-volumes`` before
+    # invoking ``stack.sh``.
+    #
+    # By default, the backing file is 5G in size, and is stored in ``/opt/stack/data``.
+
+    if ! sudo vgs $VOLUME_GROUP; then
+        VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DATA_DIR/${VOLUME_GROUP}-backing-file}
+        # Only create if the file doesn't already exists
+        [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE
+        DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE`
+        # Only create if the loopback device doesn't contain $VOLUME_GROUP
+        if ! sudo vgs $VOLUME_GROUP; then sudo vgcreate $VOLUME_GROUP $DEV; fi
+    fi
+
+    mkdir -p $NOVA_DIR/volumes
+
+    if sudo vgs $VOLUME_GROUP; then
+        if [[ "$os_PACKAGE" = "rpm" ]]; then
+            # RPM doesn't start the service
+            start_service tgtd
+        fi
+
+        # Remove nova iscsi targets
+        sudo tgtadm --op show --mode target | grep $VOLUME_NAME_PREFIX | grep Target | cut -f3 -d ' ' | sudo xargs -n1 tgt-admin --delete || true
+        # Clean out existing volumes
+        for lv in `sudo lvs --noheadings -o lv_name $VOLUME_GROUP`; do
+            # ``VOLUME_NAME_PREFIX`` prefixes the LVs we want
+            if [[ "${lv#$VOLUME_NAME_PREFIX}" != "$lv" ]]; then
+                sudo lvremove -f $VOLUME_GROUP/$lv
+            fi
+        done
+    fi
+}
+
+# install_nvol() - Collect source and prepare
+function install_nvol() {
+    # git clone xxx
+    # Install is handled when installing Nova
+    :
+}
+
+# start_nvol() - Start running processes, including screen
+function start_nvol() {
+    # Setup the tgt configuration file
+    if [[ ! -f /etc/tgt/conf.d/nova.conf ]]; then
+       sudo mkdir -p /etc/tgt/conf.d
+       echo "include $NOVA_DIR/volumes/*" | sudo tee /etc/tgt/conf.d/nova.conf
+    fi
+
+    if [[ "$os_PACKAGE" = "deb" ]]; then
+        # tgt in oneiric doesn't restart properly if tgtd isn't running
+        # do it in two steps
+        sudo stop tgt || true
+        sudo start tgt
+    else
+        restart_service tgtd
+    fi
+
+    screen_it n-vol "cd $NOVA_DIR && $NOVA_DIR/bin/nova-volume"
+}
+
+# stop_nvol() - Stop running processes (non-screen)
+function stop_nvol() {
+    # FIXME(dtroyer): stop only the n-vol screen window?
+
+    stop_service tgt
+}
diff --git a/lib/template b/lib/template
new file mode 100644
index 0000000..78b848d
--- /dev/null
+++ b/lib/template
@@ -0,0 +1,77 @@
+# lib/template
+# Functions to control the configuration and operation of the XXXX service
+# <do not include this template file in ``stack.sh``!>
+
+# Dependencies:
+# ``functions`` file
+# ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined
+# <list other global vars that are assumed to be defined>
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# install_XXXX
+# configure_XXXX
+# init_XXXX
+# start_XXXX
+# stop_XXXX
+# cleanup_XXXX
+
+# Print the commands being run so that we can see the command that triggers
+# an error.  It is also useful for following along as the install occurs.
+set -o xtrace
+
+
+# Defaults
+# --------
+
+# <define global variables here that belong to this project>
+
+# Set up default directories
+XXXX_DIR=$DEST/XXXX
+XXX_CONF_DIR=/etc/XXXX
+
+
+# Entry Points
+# ------------
+
+# cleanup_XXXX() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_XXXX() {
+    # kill instances (nova)
+    # delete image files (glance)
+    # This function intentionally left blank
+    :
+}
+
+# configure_XXXX() - Set config files, create data dirs, etc
+function configure_XXXX() {
+    # sudo python setup.py deploy
+    # iniset $XXXX_CONF ...
+    # This function intentionally left blank
+    :
+}
+
+# init_XXXX() - Initialize databases, etc.
+function init_XXXX() {
+    # clean up from previous (possibly aborted) runs
+    # create required data files
+    :
+}
+
+# install_XXXX() - Collect source and prepare
+function install_XXXX() {
+    # git clone xxx
+    :
+}
+
+# start_XXXX() - Start running processes, including screen
+function start_XXXX() {
+    # screen_it XXXX "cd $XXXX_DIR && $XXXX_DIR/bin/XXXX-bin"
+    :
+}
+
+# stop_XXXX() - Stop running processes (non-screen)
+function stop_XXXX() {
+    # FIXME(dtroyer): stop only our screen screen window?
+    :
+}
diff --git a/samples/local.sh b/samples/local.sh
index 2c54b10..eb9bc24 100755
--- a/samples/local.sh
+++ b/samples/local.sh
@@ -7,9 +7,10 @@
 # work properly.
 
 # This is a collection of some of the things we have found to be useful to run
-# after stack.sh to tweak the OpenStack configuration that DevStack produces.
+# after ``stack.sh`` to tweak the OpenStack configuration that DevStack produces.
 # These should be considered as samples and are unsupported DevStack code.
 
+
 # Keep track of the devstack directory
 TOP_DIR=$(cd $(dirname "$0") && pwd)
 
@@ -34,7 +35,7 @@
 
 # Add first keypair found in localhost:$HOME/.ssh
 for i in $HOME/.ssh/id_rsa.pub $HOME/.ssh/id_dsa.pub; do
-    if [[ -f $i ]]; then
+    if [[ -r $i ]]; then
         nova keypair-add --pub_key=$i `hostname`
         break
     fi
@@ -55,8 +56,9 @@
 if [[ -z $(nova flavor-list | grep $MI_NAME) ]]; then
     nova flavor-create $MI_NAME 6 128 0 1
 fi
+
+
 # Other Uses
 # ----------
 
-# Add tcp/22 to default security group
-
+# Add tcp/22 and icmp to default security group
diff --git a/samples/localrc b/samples/localrc
index 4fb093d..bcaa788 100644
--- a/samples/localrc
+++ b/samples/localrc
@@ -1,9 +1,10 @@
 # Sample ``localrc`` for user-configurable variables in ``stack.sh``
 
-# NOTE: Copy this file to the root ``devstack`` directory for it to work properly.
+# NOTE: Copy this file to the root ``devstack`` directory for it to
+# work properly.
 
-# ``localrc`` is a user-maintained setings file that is sourced at the end of
-# ``stackrc``. This gives it the ability to override any variables set in ``stackrc``.
+# ``localrc`` is a user-maintained setings file that is sourced from ``stackrc``.
+# This gives it the ability to override any variables set in ``stackrc``.
 # Also, most of the settings in ``stack.sh`` are written to only be set if no
 # value has already been set; this lets ``localrc`` effectively override the
 # default values.
@@ -21,40 +22,51 @@
 # there are a few minimal variables set:
 
 # If the ``*_PASSWORD`` variables are not set here you will be prompted to enter
-# values for them by ``stack.sh``.
+# values for them by ``stack.sh`` and they will be added to ``localrc``.
 ADMIN_PASSWORD=nomoresecrete
 MYSQL_PASSWORD=stackdb
 RABBIT_PASSWORD=stackqueue
 SERVICE_PASSWORD=$ADMIN_PASSWORD
 
-# HOST_IP should be set manually for best results.  It is auto-detected during the
-# first run of ``stack.sh`` but often is indeterminate on later runs due to the IP
-# being moved from an Ethernet interface to a bridge on the host. Setting it here
-# also makes it available for ``openrc`` to include when setting ``OS_AUTH_URL``.
+# ``HOST_IP`` should be set manually for best results if the NIC configuration
+# of the host is unusual, i.e. ``eth1`` has the default route but ``eth0`` is the
+# public interface.  It is auto-detected in ``stack.sh`` but often is indeterminate
+# on later runs due to the IP moving from an Ethernet interface to a bridge on
+# the host. Setting it here also makes it available for ``openrc`` to include
+# when setting ``OS_AUTH_URL``.
 # ``HOST_IP`` is not set by default.
-HOST_IP=w.x.y.z
+#HOST_IP=w.x.y.z
 
 
-# Set DevStack Install Directory
-# ------------------------------
+# Logging
+# -------
 
-# The DevStack install directory is set by the ``DEST`` variable. By setting it
-# early in ``localrc`` you can reference it in later variables. The default value
-# is ``/opt/stack``. It can be useful to set it even though it is not changed from
-# the default value.
-DEST=/opt/stack
+# By default ``stack.sh`` output only goes to the terminal where it runs.  It can
+# be configured to additionally log to a file by setting ``LOGFILE`` to the full
+# path of the destination log file.  A timestamp will be appended to the given name.
+LOGFILE=$DEST/logs/stack.sh.log
+
+# Old log files are automatically removed after 7 days to keep things neat.  Change
+# the number of days by setting ``LOGDAYS``.
+LOGDAYS=2
+
+# Nova logs will be colorized if ``SYSLOG`` is not set; turn this off by setting
+# ``LOG_COLOR`` false.
+#LOG_COLOR=False
 
 
 # Using milestone-proposed branches
 # ---------------------------------
 
 # Uncomment these to grab the milestone-proposed branches from the repos:
+#CINDER_BRANCH=milestone-proposed
 #GLANCE_BRANCH=milestone-proposed
 #HORIZON_BRANCH=milestone-proposed
 #KEYSTONE_BRANCH=milestone-proposed
 #KEYSTONECLIENT_BRANCH=milestone-proposed
 #NOVA_BRANCH=milestone-proposed
 #NOVACLIENT_BRANCH=milestone-proposed
+#QUANTUM_BRANCH=milestone-proposed
 #SWIFT_BRANCH=milestone-proposed
 
 
diff --git a/stack.sh b/stack.sh
index 5c27462..617acf2 100755
--- a/stack.sh
+++ b/stack.sh
@@ -1,8 +1,9 @@
 #!/usr/bin/env bash
 
 # ``stack.sh`` is an opinionated OpenStack developer installation.  It
-# installs and configures various combinations of **Glance**, **Horizon**,
-# **Keystone**, **Nova**, **Quantum**, **Heat** and **Swift**
+# installs and configures various combinations of **Ceilometer**, **Cinder**,
+# **Glance**, **Heat**, **Horizon**, **Keystone**, **Nova**, **Quantum**
+# and **Swift**
 
 # This script allows you to specify configuration options of what git
 # repositories to use, enabled services, network configuration and various
@@ -10,14 +11,14 @@
 # shared settings for common resources (mysql, rabbitmq) and build a multi-node
 # developer install.
 
-# To keep this script simple we assume you are running on an **Ubuntu 11.10
-# Oneiric** or **Ubuntu 12.04 Precise** machine.  It should work in a VM or
-# physical server.  Additionally we put the list of ``apt`` and ``pip``
-# dependencies and other configuration files in this repo.  So start by
-# grabbing this script and the dependencies.
+# To keep this script simple we assume you are running on a recent **Ubuntu**
+# (11.10 Oneiric or 12.04 Precise) or **Fedora** (F16 or F17) machine.  It
+# should work in a VM or physical server.  Additionally we put the list of
+# ``apt`` and ``rpm`` dependencies and other configuration files in this repo.
 
 # Learn more and get the most recent version at http://devstack.org
 
+
 # Keep track of the devstack directory
 TOP_DIR=$(cd $(dirname "$0") && pwd)
 
@@ -47,25 +48,31 @@
 #     MYSQL_USER=hellaroot
 #
 # We try to have sensible defaults, so you should be able to run ``./stack.sh``
-# in most cases.
+# in most cases.  ``localrc`` is not distributed with DevStack and will never
+# be overwritten by a DevStack update.
 #
 # DevStack distributes ``stackrc`` which contains locations for the OpenStack
 # repositories and branches to configure.  ``stackrc`` sources ``localrc`` to
-# allow you to safely override those settings without being overwritten
-# when updating DevStack.
+# allow you to safely override those settings.
+
 if [[ ! -r $TOP_DIR/stackrc ]]; then
     echo "ERROR: missing $TOP_DIR/stackrc - did you grab more than just stack.sh?"
     exit 1
 fi
 source $TOP_DIR/stackrc
 
-# HTTP and HTTPS proxy servers are supported via the usual environment variables
-# ``http_proxy`` and ``https_proxy``.  Additionally if you would like to access
-# to specific server directly and not through the proxy server, you can use
-# ``no_proxy`` environment variable.  They can be set in ``localrc`` if necessary
-# or on the command line::
+
+# Proxy Settings
+# --------------
+
+# HTTP and HTTPS proxy servers are supported via the usual environment variables [1]
+# ``http_proxy``, ``https_proxy`` and ``no_proxy``. They can be set in
+# ``localrc`` if necessary or on the command line::
+#
+# [1] http://www.w3.org/Daemon/User/Proxies/ProxyClients.html
 #
 #     http_proxy=http://proxy.example.com:3128/ no_proxy=repo.example.net ./stack.sh
+
 if [[ -n "$http_proxy" ]]; then
     export http_proxy=$http_proxy
 fi
@@ -98,6 +105,7 @@
     fi
 fi
 
+# Disallow qpid on oneiric
 if [ "${DISTRO}" = "oneiric" ] && is_service_enabled qpid ; then
     # Qpid was introduced in precise
     echo "You must use Ubuntu Precise or newer for Qpid support."
@@ -114,36 +122,56 @@
 # ``stack.sh`` keeps function libraries here
 # Make sure ``$TOP_DIR/lib`` directory is present
 if [ ! -d $TOP_DIR/lib ]; then
-    echo "ERROR: missing devstack/lib - did you grab more than just stack.sh?"
+    echo "ERROR: missing devstack/lib"
     exit 1
 fi
 
-# stack.sh keeps the list of ``apt`` and ``pip`` dependencies in external
-# files, along with config templates and other useful files.  You can find these
-# in the ``files`` directory (next to this script).  We will reference this
-# directory using the ``FILES`` variable in this script.
+# ``stack.sh`` keeps the list of ``apt`` and ``rpm`` dependencies and config
+# templates and other useful files in the ``files`` subdirectory
 FILES=$TOP_DIR/files
 if [ ! -d $FILES ]; then
-    echo "ERROR: missing devstack/files - did you grab more than just stack.sh?"
+    echo "ERROR: missing devstack/files"
     exit 1
 fi
 
+SCREEN_NAME=${SCREEN_NAME:-stack}
 # Check to see if we are already running DevStack
-if type -p screen >/dev/null && screen -ls | egrep -q "[0-9].stack"; then
+if type -p screen >/dev/null && screen -ls | egrep -q "[0-9].$SCREEN_NAME"; then
     echo "You are already running a stack.sh session."
     echo "To rejoin this session type 'screen -x stack'."
-    echo "To destroy this session, kill the running screen."
+    echo "To destroy this session, type './unstack.sh'."
     exit 1
 fi
 
+# Make sure we only have one rpc backend enabled.
+rpc_backend_cnt=0
+for svc in qpid zeromq rabbit; do
+    is_service_enabled $svc &&
+        ((rpc_backend_cnt++))
+done
+if [ "$rpc_backend_cnt" -gt 1 ]; then
+    echo "ERROR: only one rpc backend may be enabled,"
+    echo "       set only one of 'rabbit', 'qpid', 'zeromq'"
+    echo "       via ENABLED_SERVICES."
+elif [ "$rpc_backend_cnt" == 0 ]; then
+    echo "ERROR: at least one rpc backend must be enabled,"
+    echo "       set one of 'rabbit', 'qpid', 'zeromq'"
+    echo "       via ENABLED_SERVICES."
+fi
+unset rpc_backend_cnt
+
 # Make sure we only have one volume service enabled.
 if is_service_enabled cinder && is_service_enabled n-vol; then
     echo "ERROR: n-vol and cinder must not be enabled at the same time"
     exit 1
 fi
 
-# OpenStack is designed to be run as a regular user (Horizon will fail to run
-# as root, since apache refused to startup serve content from root user).  If
+
+# root Access
+# -----------
+
+# OpenStack is designed to be run as a non-root user; Horizon will fail to run
+# as **root** since Apache will not serve content from **root** user).  If
 # ``stack.sh`` is run as **root**, it automatically creates a **stack** user with
 # sudo privileges and runs as that user.
 
@@ -153,8 +181,7 @@
     echo "In $ROOTSLEEP seconds, we will create a user 'stack' and run as that user"
     sleep $ROOTSLEEP
 
-    # since this script runs as a normal user, we need to give that user
-    # ability to run sudo
+    # Give the non-root user the ability to run as **root** via ``sudo``
     if [[ "$os_PACKAGE" = "deb" ]]; then
         dpkg -l sudo || apt_get update && install_package sudo
     else
@@ -170,7 +197,7 @@
     fi
 
     echo "Giving stack user passwordless sudo priviledges"
-    # some uec images sudoers does not have a '#includedir'. add one.
+    # UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one
     grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers ||
         echo "#includedir /etc/sudoers.d" >> /etc/sudoers
     ( umask 226 && echo "stack ALL=(ALL) NOPASSWD:ALL" \
@@ -187,7 +214,7 @@
     fi
     exit 1
 else
-    # We're not root, make sure sudo is available
+    # We're not **root**, make sure ``sudo`` is available
     if [[ "$os_PACKAGE" = "deb" ]]; then
         CHECK_SUDO_CMD="dpkg -l sudo"
     else
@@ -195,7 +222,7 @@
     fi
     $CHECK_SUDO_CMD || die "Sudo is required.  Re-run stack.sh as root ONE TIME ONLY to set up sudo."
 
-    # UEC images /etc/sudoers does not have a '#includedir'. add one.
+    # UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one
     sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers ||
         echo "#includedir /etc/sudoers.d" | sudo tee -a /etc/sudoers
 
@@ -219,14 +246,14 @@
     sudo chown `whoami` $DEST
 fi
 
-# Set True to configure ``stack.sh`` to run cleanly without Internet access.
-# ``stack.sh`` must have been previously run with Internet access to install
-# prerequisites and initialize ``$DEST``.
+# Set ``OFFLINE`` to ``True`` to configure ``stack.sh`` to run cleanly without
+# Internet access. ``stack.sh`` must have been previously run with Internet
+# access to install prerequisites and fetch repositories.
 OFFLINE=`trueorfalse False $OFFLINE`
 
-# Set True to configure ``stack.sh`` to exit with an error code if it is asked
-# to clone any git repositories.  If devstack is used in a testing environment,
-# this may be used to ensure that the correct code is being tested.
+# Set ``ERROR_ON_CLONE`` to ``True`` to configure ``stack.sh`` to exit if
+# the destination git repository does not exist during the ``git_clone``
+# operation.
 ERROR_ON_CLONE=`trueorfalse False $ERROR_ON_CLONE`
 
 # Destination path for service data
@@ -235,15 +262,16 @@
 sudo chown `whoami` $DATA_DIR
 
 
-# Projects
-# --------
+# Configure Projects
+# ==================
 
 # Get project function libraries
 source $TOP_DIR/lib/cinder
+source $TOP_DIR/lib/n-vol
 source $TOP_DIR/lib/ceilometer
 source $TOP_DIR/lib/heat
 
-# Set the destination directories for openstack projects
+# Set the destination directories for OpenStack projects
 NOVA_DIR=$DEST/nova
 HORIZON_DIR=$DEST/horizon
 GLANCE_DIR=$DEST/glance
@@ -273,17 +301,19 @@
 # Use namespace or not
 Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True}
 
-# Name of the lvm volume group to use/create for iscsi volumes
+# Name of the LVM volume group to use/create for iscsi volumes
 VOLUME_GROUP=${VOLUME_GROUP:-stack-volumes}
 VOLUME_NAME_PREFIX=${VOLUME_NAME_PREFIX:-volume-}
 INSTANCE_NAME_PREFIX=${INSTANCE_NAME_PREFIX:-instance-}
 
-# Nova supports pluggable schedulers.  ``FilterScheduler`` should work in most
-# cases.
+# Nova supports pluggable schedulers.  The default ``FilterScheduler``
+# should work in most cases.
 SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler}
 
 # Set fixed and floating range here so we can make sure not to use addresses
-# from either range when attempting to guess the ip to use for the host
+# from either range when attempting to guess the IP to use for the host.
+# Note that setting FIXED_RANGE may be necessary when running DevStack
+# in an OpenStack cloud that uses eith of these address ranges internally.
 FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24}
 FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.224/28}
 
@@ -294,10 +324,12 @@
     HOST_IP=""
     HOST_IPS=`LC_ALL=C ip -f inet addr show ${HOST_IP_IFACE} | awk '/inet/ {split($2,parts,"/");  print parts[1]}'`
     for IP in $HOST_IPS; do
-        # Attempt to filter out ip addresses that are part of the fixed and
-        # floating range. Note that this method only works if the 'netaddr'
+        # Attempt to filter out IP addresses that are part of the fixed and
+        # floating range. Note that this method only works if the ``netaddr``
         # python library is installed. If it is not installed, an error
-        # will be printed and the first ip from the interface will be used.
+        # will be printed and the first IP from the interface will be used.
+        # If that is not correct set ``HOST_IP`` in ``localrc`` to the correct
+        # address.
         if ! (address_in_net $IP $FIXED_RANGE || address_in_net $IP $FLOATING_RANGE); then
             HOST_IP=$IP
             break;
@@ -318,7 +350,7 @@
 SYSLOG_HOST=${SYSLOG_HOST:-$HOST_IP}
 SYSLOG_PORT=${SYSLOG_PORT:-516}
 
-# Use color for logging output
+# Use color for logging output (only available if syslog is not used)
 LOG_COLOR=`trueorfalse True $LOG_COLOR`
 
 # Service startup timeout
@@ -374,7 +406,7 @@
 
 if [ "$VIRT_DRIVER" = 'xenserver' ]; then
     PUBLIC_INTERFACE_DEFAULT=eth3
-    # allow build_domU.sh to specify the flat network bridge via kernel args
+    # Allow ``build_domU.sh`` to specify the flat network bridge via kernel args
     FLAT_NETWORK_BRIDGE_DEFAULT=$(grep -o 'flat_network_bridge=[[:alnum:]]*' /proc/cmdline | cut -d= -f 2 | sort -u)
     GUEST_INTERFACE_DEFAULT=eth1
 else
@@ -396,19 +428,19 @@
 TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test}
 TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29}
 
-# **MULTI_HOST** is a mode where each compute node runs its own network node.  This
+# ``MULTI_HOST`` is a mode where each compute node runs its own network node.  This
 # allows network operations and routing for a VM to occur on the server that is
 # running the VM - removing a SPOF and bandwidth bottleneck.
 MULTI_HOST=`trueorfalse False $MULTI_HOST`
 
-# If you are using FlatDHCP on multiple hosts, set the ``FLAT_INTERFACE``
-# variable but make sure that the interface doesn't already have an
-# ip or you risk breaking things.
+# If you are using the FlatDHCP network mode on multiple hosts, set the
+# ``FLAT_INTERFACE`` variable but make sure that the interface doesn't already
+# have an IP or you risk breaking things.
 #
 # **DHCP Warning**:  If your flat interface device uses DHCP, there will be a
 # hiccup while the network is moved from the flat interface to the flat network
 # bridge.  This will happen when you launch your first instance.  Upon launch
-# you will lose all connectivity to the node, and the vm launch will probably
+# you will lose all connectivity to the node, and the VM launch will probably
 # fail.
 #
 # If you are running on a single node and don't need to access the VMs from
@@ -431,6 +463,7 @@
 #
 # With Quantum networking the NET_MAN variable is ignored.
 
+
 # MySQL & (RabbitMQ or Qpid)
 # --------------------------
 
@@ -446,7 +479,7 @@
 MYSQL_USER=${MYSQL_USER:-root}
 read_password MYSQL_PASSWORD "ENTER A PASSWORD TO USE FOR MYSQL."
 
-# NOTE: Don't specify /db in this string so we can use it for multiple services
+# NOTE: Don't specify ``/db`` in this string so we can use it for multiple services
 BASE_SQL_CONN=${BASE_SQL_CONN:-mysql://$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST}
 
 # Rabbit connection info
@@ -455,6 +488,10 @@
     read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT."
 fi
 
+
+# Glance
+# ------
+
 # Glance connection info.  Note the port must be specified.
 GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$SERVICE_HOST:9292}
 
@@ -464,19 +501,17 @@
 
 # TODO: add logging to different location.
 
-# By default the location of swift drives and objects is located inside
-# the swift source directory. SWIFT_DATA_DIR variable allow you to redefine
-# this.
+# Set ``SWIFT_DATA_DIR`` to the location of swift drives and objects.
+# Default is the common DevStack data directory.
 SWIFT_DATA_DIR=${SWIFT_DATA_DIR:-${DEST}/data/swift}
 
-# We are going to have the configuration files inside the source
-# directory, change SWIFT_CONFIG_DIR if you want to adjust that.
+# Set ``SWIFT_CONFIG_DIR`` to the location of the configuration files.
+# Default is ``/etc/swift``.
 SWIFT_CONFIG_DIR=${SWIFT_CONFIG_DIR:-/etc/swift}
 
 # DevStack will create a loop-back disk formatted as XFS to store the
-# swift data. By default the disk size is 1 gigabyte. The variable
-# SWIFT_LOOPBACK_DISK_SIZE specified in bytes allow you to change
-# that.
+# swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in bytes.
+# Default is 1 gigabyte.
 SWIFT_LOOPBACK_DISK_SIZE=${SWIFT_LOOPBACK_DISK_SIZE:-1000000}
 
 # The ring uses a configurable number of bits from a path’s MD5 hash as
@@ -489,7 +524,7 @@
 # By default we define 9 for the partition count (which mean 512).
 SWIFT_PARTITION_POWER_SIZE=${SWIFT_PARTITION_POWER_SIZE:-9}
 
-# This variable allows you to configure how many replicas you want to be
+# Set ``SWIFT_REPLICAS`` to configure how many replicas are to be
 # configured for your Swift cluster.  By default the three replicas would need a
 # bit of IO and Memory on a VM you may want to lower that to 1 if you want to do
 # only some quick testing.
@@ -514,8 +549,8 @@
 # Keystone
 # --------
 
-# Service Token - Openstack components need to have an admin token
-# to validate user tokens.
+# The ``SERVICE_TOKEN`` is used to bootstrap the Keystone database.  It is
+# just a string and is not a 'real' Keystone token.
 read_password SERVICE_TOKEN "ENTER A SERVICE_TOKEN TO USE FOR THE SERVICE ADMIN TOKEN."
 # Services authenticate to Identity with servicename/SERVICE_PASSWORD
 read_password SERVICE_PASSWORD "ENTER A SERVICE_PASSWORD TO USE FOR THE SERVICE AUTHENTICATION."
@@ -547,10 +582,10 @@
 # Log files
 # ---------
 
-# Set up logging for stack.sh
-# Set LOGFILE to turn on logging
-# We append '.xxxxxxxx' to the given name to maintain history
-# where xxxxxxxx is a representation of the date the file was created
+# Set up logging for ``stack.sh``
+# Set ``LOGFILE`` to turn on logging
+# Append '.xxxxxxxx' to the given name to maintain history
+# where 'xxxxxxxx' is a representation of the date the file was created
 if [[ -n "$LOGFILE" || -n "$SCREEN_LOGDIR" ]]; then
     LOGDAYS=${LOGDAYS:-7}
     TIMESTAMP_FORMAT=${TIMESTAMP_FORMAT:-"%F-%H%M%S"}
@@ -558,7 +593,7 @@
 fi
 
 if [[ -n "$LOGFILE" ]]; then
-    # First clean up old log files.  Use the user-specified LOGFILE
+    # First clean up old log files.  Use the user-specified ``LOGFILE``
     # as the template to search for, appending '.*' to match the date
     # we added on earlier runs.
     LOGDIR=$(dirname "$LOGFILE")
@@ -575,11 +610,11 @@
 fi
 
 # Set up logging of screen windows
-# Set SCREEN_LOGDIR to turn on logging of screen windows to the
-# directory specified in SCREEN_LOGDIR, we will log to the the file
-# screen-$SERVICE_NAME-$TIMESTAMP.log in that dir and have a link
-# screen-$SERVICE_NAME.log to the latest log file.
-# Logs are kept for as long specified in LOGDAYS.
+# Set ``SCREEN_LOGDIR`` to turn on logging of screen windows to the
+# directory specified in ``SCREEN_LOGDIR``, we will log to the the file
+# ``screen-$SERVICE_NAME-$TIMESTAMP.log`` in that dir and have a link
+# ``screen-$SERVICE_NAME.log`` to the latest log file.
+# Logs are kept for as long specified in ``LOGDAYS``.
 if [[ -n "$SCREEN_LOGDIR" ]]; then
 
     # We make sure the directory is created.
@@ -591,8 +626,11 @@
     fi
 fi
 
-# So that errors don't compound we exit on any errors so you see only the
-# first error that occurred.
+
+# Set Up Script Execution
+# -----------------------
+
+# Exit on any errors so that errors don't compound
 trap failed ERR
 failed() {
     local r=$?
@@ -609,7 +647,7 @@
 # Install Packages
 # ================
 
-# Openstack uses a fair number of other projects.
+# OpenStack uses a fair number of other projects.
 
 # Install package requirements
 if [[ "$os_PACKAGE" = "deb" ]]; then
@@ -636,6 +674,12 @@
     else
         install_package qpidd
     fi
+elif is_service_enabled zeromq; then
+    if [[ "$os_PACKAGE" = "rpm" ]]; then
+        install_package zeromq python-zmq
+    else
+        install_package libzmq1 python-zmq
+    fi
 fi
 
 if is_service_enabled mysql; then
@@ -650,7 +694,7 @@
 MYSQL_PRESEED
     fi
 
-    # while ``.my.cnf`` is not needed for openstack to function, it is useful
+    # while ``.my.cnf`` is not needed for OpenStack to function, it is useful
     # as it allows you to access the mysql databases via ``mysql nova`` instead
     # of having to specify the username/password each time.
     if [[ ! -e $HOME/.my.cnf ]]; then
@@ -702,8 +746,6 @@
 
 if is_service_enabled n-cpu; then
 
-    # Virtualization Configuration
-    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
     if [[ "$os_PACKAGE" = "deb" ]]; then
         LIBVIRT_PKG_NAME=libvirt-bin
     else
@@ -746,7 +788,10 @@
 # Install python requirements
 pip_install $(get_packages $FILES/pips | sort -u)
 
-# Check out OpenStack sources
+
+# Check Out Source
+# ----------------
+
 git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH
 
 # Check out the client libs that are used most
@@ -799,6 +844,7 @@
     install_ceilometer
 fi
 
+
 # Initialization
 # ==============
 
@@ -822,6 +868,7 @@
 fi
 
 # Do this _after_ glance is installed to override the old binary
+# TODO(dtroyer): figure out when this is no longer necessary
 setup_develop $GLANCECLIENT_DIR
 
 setup_develop $NOVA_DIR
@@ -848,6 +895,7 @@
     exit 0
 fi
 
+
 # Syslog
 # ------
 
@@ -870,8 +918,8 @@
 fi
 
 
-# Rabbit or Qpid
-# --------------
+# Finalize queue instllation
+# --------------------------
 
 if is_service_enabled rabbit; then
     # Start rabbitmq-server
@@ -889,10 +937,9 @@
 # Mysql
 # -----
 
-
 if is_service_enabled mysql; then
 
-    #start mysql-server
+    # Start mysql-server
     if [[ "$os_PACKAGE" = "rpm" ]]; then
         # RPM doesn't start the service
         start_service mysqld
@@ -931,51 +978,11 @@
     SCREEN_HARDSTATUS='%{= .} %-Lw%{= .}%> %n%f %t*%{= .}%+Lw%< %-=%{g}(%{d}%H/%l%{g})'
 fi
 
-# Our screenrc file builder
-function screen_rc {
-    SCREENRC=$TOP_DIR/stack-screenrc
-    if [[ ! -e $SCREENRC ]]; then
-        # Name the screen session
-        echo "sessionname stack" > $SCREENRC
-        # Set a reasonable statusbar
-        echo "hardstatus alwayslastline '$SCREEN_HARDSTATUS'" >> $SCREENRC
-        echo "screen -t stack bash" >> $SCREENRC
-    fi
-    # If this service doesn't already exist in the screenrc file
-    if ! grep $1 $SCREENRC 2>&1 > /dev/null; then
-        NL=`echo -ne '\015'`
-        echo "screen -t $1 bash" >> $SCREENRC
-        echo "stuff \"$2$NL\"" >> $SCREENRC
-    fi
-}
-
-# Our screen helper to launch a service in a hidden named screen
-function screen_it {
-    NL=`echo -ne '\015'`
-    if is_service_enabled $1; then
-        # Append the service to the screen rc file
-        screen_rc "$1" "$2"
-
-        screen -S stack -X screen -t $1
-        # sleep to allow bash to be ready to be send the command - we are
-        # creating a new window in screen and then sends characters, so if
-        # bash isn't running by the time we send the command, nothing happens
-        sleep 1.5
-
-        if [[ -n ${SCREEN_LOGDIR} ]]; then
-            screen -S stack -p $1 -X logfile ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log
-            screen -S stack -p $1 -X log on
-            ln -sf ${SCREEN_LOGDIR}/screen-${1}.${CURRENT_LOG_TIME}.log ${SCREEN_LOGDIR}/screen-${1}.log
-        fi
-        screen -S stack -p $1 -X stuff "$2$NL"
-    fi
-}
-
 # Create a new named screen to run processes in
-screen -d -m -S stack -t stack -s /bin/bash
+screen -d -m -S $SCREEN_NAME -t shell -s /bin/bash
 sleep 1
 # Set a reasonable statusbar
-screen -r stack -X hardstatus alwayslastline "$SCREEN_HARDSTATUS"
+screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS"
 
 
 # Horizon
@@ -1015,7 +1022,8 @@
         APACHE_CONF=conf.d/horizon.conf
         sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf
     fi
-    ## Configure apache to run horizon
+
+    # Configure apache to run horizon
     sudo sh -c "sed -e \"
         s,%USER%,$APACHE_USER,g;
         s,%GROUP%,$APACHE_GROUP,g;
@@ -1023,6 +1031,7 @@
         s,%APACHE_NAME%,$APACHE_NAME,g;
         s,%DEST%,$DEST,g;
     \" $FILES/apache-horizon.template >/etc/$APACHE_NAME/$APACHE_CONF"
+
     restart_service $APACHE_NAME
 fi
 
@@ -1036,13 +1045,17 @@
         sudo mkdir -p $GLANCE_CONF_DIR
     fi
     sudo chown `whoami` $GLANCE_CONF_DIR
+
     GLANCE_IMAGE_DIR=$DEST/glance/images
     # Delete existing images
     rm -rf $GLANCE_IMAGE_DIR
-
-    # Use local glance directories
     mkdir -p $GLANCE_IMAGE_DIR
 
+    GLANCE_CACHE_DIR=$DEST/glance/cache
+    # Delete existing images
+    rm -rf $GLANCE_CACHE_DIR
+    mkdir -p $GLANCE_CACHE_DIR
+
     # (re)create glance database
     mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS glance;'
     mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE glance CHARACTER SET utf8;'
@@ -1055,16 +1068,13 @@
     iniset $GLANCE_REGISTRY_CONF DEFAULT sql_connection $BASE_SQL_CONN/glance?charset=utf8
     iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG
     iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone
-
-    GLANCE_REGISTRY_PASTE_INI=$GLANCE_CONF_DIR/glance-registry-paste.ini
-    cp $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI
-    iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST
-    iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT
-    iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
-    iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
-    iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME
-    iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken admin_user glance
-    iniset $GLANCE_REGISTRY_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD
+    iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST
+    iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT
+    iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
+    iniset $GLANCE_REGISTRY_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
+    iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
+    iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_user glance
+    iniset $GLANCE_REGISTRY_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
 
     GLANCE_API_CONF=$GLANCE_CONF_DIR/glance-api.conf
     cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF
@@ -1073,7 +1083,15 @@
     iniset $GLANCE_API_CONF DEFAULT sql_connection $BASE_SQL_CONN/glance?charset=utf8
     iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG
     iniset $GLANCE_API_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/
-    iniset $GLANCE_API_CONF paste_deploy flavor keystone
+    iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/
+    iniset $GLANCE_API_CONF paste_deploy flavor keystone+cachemanagement
+    iniset $GLANCE_API_CONF keystone_authtoken auth_host $KEYSTONE_AUTH_HOST
+    iniset $GLANCE_API_CONF keystone_authtoken auth_port $KEYSTONE_AUTH_PORT
+    iniset $GLANCE_API_CONF keystone_authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
+    iniset $GLANCE_API_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
+    iniset $GLANCE_API_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
+    iniset $GLANCE_API_CONF keystone_authtoken admin_user glance
+    iniset $GLANCE_API_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
 
     # Store the images in swift if enabled.
     if is_service_enabled swift; then
@@ -1084,15 +1102,28 @@
         iniset $GLANCE_API_CONF DEFAULT swift_store_create_container_on_put True
     fi
 
+    GLANCE_REGISTRY_PASTE_INI=$GLANCE_CONF_DIR/glance-registry-paste.ini
+    cp $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI
+
     GLANCE_API_PASTE_INI=$GLANCE_CONF_DIR/glance-api-paste.ini
     cp $GLANCE_DIR/etc/glance-api-paste.ini $GLANCE_API_PASTE_INI
-    iniset $GLANCE_API_PASTE_INI filter:authtoken auth_host $KEYSTONE_AUTH_HOST
-    iniset $GLANCE_API_PASTE_INI filter:authtoken auth_port $KEYSTONE_AUTH_PORT
-    iniset $GLANCE_API_PASTE_INI filter:authtoken auth_protocol $KEYSTONE_AUTH_PROTOCOL
-    iniset $GLANCE_API_PASTE_INI filter:authtoken auth_uri $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/
-    iniset $GLANCE_API_PASTE_INI filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME
-    iniset $GLANCE_API_PASTE_INI filter:authtoken admin_user glance
-    iniset $GLANCE_API_PASTE_INI filter:authtoken admin_password $SERVICE_PASSWORD
+
+    GLANCE_CACHE_CONF=$GLANCE_CONF_DIR/glance-cache.conf
+    cp $GLANCE_DIR/etc/glance-cache.conf $GLANCE_CACHE_CONF
+    iniset $GLANCE_CACHE_CONF DEFAULT debug True
+    inicomment $GLANCE_CACHE_CONF DEFAULT log_file
+    iniset $GLANCE_CACHE_CONF DEFAULT use_syslog $SYSLOG
+    iniset $GLANCE_CACHE_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/
+    iniset $GLANCE_CACHE_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/
+    iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_url
+    iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0
+    iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_tenant_name
+    iniset $GLANCE_CACHE_CONF DEFAULT admin_tenant_name $SERVICE_TENANT_NAME
+    iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_user
+    iniset $GLANCE_CACHE_CONF DEFAULT admin_user glance
+    iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_password
+    iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD
+
 
     GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json
     cp $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON
@@ -1106,7 +1137,7 @@
 # -------
 
 if is_service_enabled quantum; then
-    # Put config files in /etc/quantum for everyone to find
+    # Put config files in ``/etc/quantum`` for everyone to find
     if [[ ! -d /etc/quantum ]]; then
         sudo mkdir -p /etc/quantum
     fi
@@ -1127,7 +1158,7 @@
         exit 1
     fi
 
-    # If needed, move config file from $QUANTUM_DIR/etc/quantum to /etc/quantum
+    # If needed, move config file from ``$QUANTUM_DIR/etc/quantum`` to ``/etc/quantum``
     mkdir -p /$Q_PLUGIN_CONF_PATH
     Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME
     cp $QUANTUM_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE
@@ -1135,14 +1166,19 @@
     sudo sed -i -e "s/^sql_connection =.*$/sql_connection = mysql:\/\/$MYSQL_USER:$MYSQL_PASSWORD@$MYSQL_HOST\/$Q_DB_NAME?charset=utf8/g" /$Q_PLUGIN_CONF_FILE
 
     OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-True}
-    if [[ "$Q_PLUGIN" = "openvswitch" && $OVS_ENABLE_TUNNELING = "True" ]]; then
+    if [[ "$Q_PLUGIN" = "openvswitch" && "$OVS_ENABLE_TUNNELING" = "True" ]]; then
         OVS_VERSION=`ovs-vsctl --version | head -n 1 | awk '{print $4;}'`
         if [ $OVS_VERSION \< "1.4" ] && ! is_service_enabled q-svc ; then
             echo "You are running OVS version $OVS_VERSION."
             echo "OVS 1.4+ is required for tunneling between multiple hosts."
             exit 1
         fi
-        sudo sed -i -e "s/.*enable_tunneling = .*$/enable_tunneling = $OVS_ENABLE_TUNNELING/g" /$Q_PLUGIN_CONF_FILE
+        if [[ "$OVS_DEFAULT_BRIDGE" = "" ]]; then
+            iniset /$Q_PLUGIN_CONF_FILE OVS network_vlan_ranges ""
+        else
+            iniset /$Q_PLUGIN_CONF_FILE OVS network_vlan_ranges default
+        fi
+        iniset /$Q_PLUGIN_CONF_FILE OVS tunnel_id_ranges 1:1000
     fi
 
     Q_CONF_FILE=/etc/quantum/quantum.conf
@@ -1189,7 +1225,19 @@
         sudo ovs-vsctl --no-wait -- --if-exists del-br $OVS_BRIDGE
         sudo ovs-vsctl --no-wait add-br $OVS_BRIDGE
         sudo ovs-vsctl --no-wait br-set-external-id $OVS_BRIDGE bridge-id br-int
-        sudo sed -i -e "s/.*local_ip = .*/local_ip = $HOST_IP/g" /$Q_PLUGIN_CONF_FILE
+        if [[ "$OVS_ENABLE_TUNNELING" == "True" ]]; then
+            iniset /$Q_PLUGIN_CONF_FILE OVS local_ip $HOST_IP
+        else
+            # Need bridge if not tunneling
+            OVS_DEFAULT_BRIDGE=${OVS_DEFAULT_BRIDGE:-br-$GUEST_INTERFACE_DEFAULT}
+        fi
+        if [[ "$OVS_DEFAULT_BRIDGE" = "" ]]; then
+            iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings ""
+        else
+            # Configure bridge manually with physical interface as port for multi-node
+            sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_DEFAULT_BRIDGE
+            iniset /$Q_PLUGIN_CONF_FILE OVS bridge_mappings default:$OVS_DEFAULT_BRIDGE
+        fi
         AGENT_BINARY="$QUANTUM_DIR/quantum/plugins/openvswitch/agent/ovs_quantum_agent.py"
     elif [[ "$Q_PLUGIN" = "linuxbridge" ]]; then
        # Start up the quantum <-> linuxbridge agent
@@ -1233,6 +1281,8 @@
     iniset $Q_CONF_FILE DEFAULT control_exchange quantum
     if is_service_enabled qpid ; then
         iniset $Q_CONF_FILE DEFAULT rpc_backend quantum.openstack.common.rpc.impl_qpid
+    elif is_service_enabled zeromq; then
+        iniset $Q_CONF_FILE DEFAULT rpc_backend quantum.openstack.common.rpc.impl_zmq
     elif [ -n "$RABBIT_HOST" ] &&  [ -n "$RABBIT_PASSWORD" ]; then
         iniset $Q_CONF_FILE DEFAULT rabbit_host $RABBIT_HOST
         iniset $Q_CONF_FILE DEFAULT rabbit_password $RABBIT_PASSWORD
@@ -1248,10 +1298,11 @@
 # Start up the quantum agent
 screen_it q-dhcp "sudo python $AGENT_DHCP_BINARY --config-file $Q_CONF_FILE --config-file=$Q_DHCP_CONF_FILE"
 
+
 # Nova
 # ----
 
-# Put config files in /etc/nova for everyone to find
+# Put config files in ``/etc/nova`` for everyone to find
 NOVA_CONF_DIR=/etc/nova
 if [[ ! -d $NOVA_CONF_DIR ]]; then
     sudo mkdir -p $NOVA_CONF_DIR
@@ -1261,7 +1312,7 @@
 cp -p $NOVA_DIR/etc/nova/policy.json $NOVA_CONF_DIR
 
 # If Nova ships the new rootwrap filters files, deploy them
-# (owned by root) and add a parameter to $NOVA_ROOTWRAP
+# (owned by root) and add a parameter to ``$NOVA_ROOTWRAP``
 ROOTWRAP_SUDOER_CMD="$NOVA_ROOTWRAP"
 if [[ -d $NOVA_DIR/etc/nova/rootwrap.d ]]; then
     # Wipe any existing rootwrap.d files first
@@ -1334,7 +1385,7 @@
     # Force IP forwarding on, just on case
     sudo sysctl -w net.ipv4.ip_forward=1
 
-    # attempt to load modules: network block device - used to manage qcow images
+    # Attempt to load modules: network block device - used to manage qcow images
     sudo modprobe nbd || true
 
     # Check for kvm (hardware based virtualization).  If unable to initialize
@@ -1402,9 +1453,11 @@
 EOF'
         LIBVIRT_DAEMON=libvirtd
     fi
-    # The user that nova runs as needs to be member of libvirtd group otherwise
+
+    # The user that nova runs as needs to be member of **libvirtd** group otherwise
     # nova-compute will be unable to use libvirt.
     sudo usermod -a -G libvirtd `whoami`
+
     # libvirt detects various settings on startup, as we potentially changed
     # the system configuration (modules, filesystems), we need to restart
     # libvirt to detect those changes.
@@ -1462,17 +1515,17 @@
 
 if is_service_enabled swift; then
 
-    # We make sure to kill all swift processes first
+    # Make sure to kill all swift processes first
     swift-init all stop || true
 
-    # We first do a bit of setup by creating the directories and
+    # First do a bit of setup by creating the directories and
     # changing the permissions so we can run it as our user.
 
     USER_GROUP=$(id -g)
     sudo mkdir -p ${SWIFT_DATA_DIR}/drives
     sudo chown -R $USER:${USER_GROUP} ${SWIFT_DATA_DIR}
 
-    # We then create a loopback disk and format it to XFS.
+    # Create a loopback disk and format it to XFS.
     if [[ -e ${SWIFT_DATA_DIR}/drives/images/swift.img ]]; then
         if egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
             sudo umount ${SWIFT_DATA_DIR}/drives/sdb1
@@ -1485,24 +1538,22 @@
         dd if=/dev/zero of=${SWIFT_DATA_DIR}/drives/images/swift.img \
             bs=1024 count=0 seek=${SWIFT_LOOPBACK_DISK_SIZE}
     fi
+
     # Make a fresh XFS filesystem
     mkfs.xfs -f -i size=1024  ${SWIFT_DATA_DIR}/drives/images/swift.img
 
-    # After the drive being created we mount the disk with a few mount
-    # options to make it most efficient as possible for swift.
+    # Mount the disk with mount options to make it as efficient as possible
     mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1
     if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
         sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8  \
             ${SWIFT_DATA_DIR}/drives/images/swift.img ${SWIFT_DATA_DIR}/drives/sdb1
     fi
 
-    # We then create link to that mounted location so swift would know
-    # where to go.
+    # Create a link to the above mount
     for x in $(seq ${SWIFT_REPLICAS}); do
         sudo ln -sf ${SWIFT_DATA_DIR}/drives/sdb1/$x ${SWIFT_DATA_DIR}/$x; done
 
-    # We now have to emulate a few different servers into one we
-    # create all the directories needed for swift
+    # Create all of the directories needed to emulate a few different servers
     for x in $(seq ${SWIFT_REPLICAS}); do
             drive=${SWIFT_DATA_DIR}/drives/sdb1/${x}
             node=${SWIFT_DATA_DIR}/${x}/node
@@ -1518,7 +1569,7 @@
    sudo chown -R $USER: ${SWIFT_CONFIG_DIR} /var/run/swift
 
     if [[ "$SWIFT_CONFIG_DIR" != "/etc/swift" ]]; then
-        # Some swift tools are hard-coded to use /etc/swift and are apparenty not going to be fixed.
+        # Some swift tools are hard-coded to use ``/etc/swift`` and are apparenty not going to be fixed.
         # Create a symlink if the config dir is moved
         sudo ln -sf ${SWIFT_CONFIG_DIR} /etc/swift
     fi
@@ -1609,9 +1660,8 @@
     cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONFIG_DIR}/swift.conf
     iniset ${SWIFT_CONFIG_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH}
 
-    # We need to generate a object/account/proxy configuration
-    # emulating 4 nodes on different ports we have a little function
-    # that help us doing that.
+    # This function generates an object/account/proxy configuration
+    # emulating 4 nodes on different ports
     function generate_swift_configuration() {
         local server_type=$1
         local bind_port=$2
@@ -1654,8 +1704,8 @@
     generate_swift_configuration container 6011 2
     generate_swift_configuration account 6012 2
 
-    # We have some specific configuration for swift for rsyslog. See
-    # the file /etc/rsyslog.d/10-swift.conf for more info.
+    # Specific configuration for swift for rsyslog. See
+    # ``/etc/rsyslog.d/10-swift.conf`` for more info.
     swift_log_dir=${SWIFT_DATA_DIR}/logs
     rm -rf ${swift_log_dir}
     mkdir -p ${swift_log_dir}/hourly
@@ -1696,7 +1746,7 @@
 
     } && popd >/dev/null
 
-   # We then can start rsync.
+   # Start rsync
     if [[ "$os_PACKAGE" = "deb" ]]; then
         sudo /etc/init.d/rsync restart || :
     else
@@ -1720,57 +1770,7 @@
 if is_service_enabled cinder; then
     init_cinder
 elif is_service_enabled n-vol; then
-    # Configure a default volume group called '`stack-volumes`' for the volume
-    # service if it does not yet exist.  If you don't wish to use a file backed
-    # volume group, create your own volume group called ``stack-volumes`` before
-    # invoking ``stack.sh``.
-    #
-    # By default, the backing file is 5G in size, and is stored in ``/opt/stack/data``.
-
-    if ! sudo vgs $VOLUME_GROUP; then
-        VOLUME_BACKING_FILE=${VOLUME_BACKING_FILE:-$DATA_DIR/${VOLUME_GROUP}-backing-file}
-        # Only create if the file doesn't already exists
-        [[ -f $VOLUME_BACKING_FILE ]] || truncate -s $VOLUME_BACKING_FILE_SIZE $VOLUME_BACKING_FILE
-        DEV=`sudo losetup -f --show $VOLUME_BACKING_FILE`
-        # Only create if the loopback device doesn't contain $VOLUME_GROUP
-        if ! sudo vgs $VOLUME_GROUP; then sudo vgcreate $VOLUME_GROUP $DEV; fi
-    fi
-
-    if sudo vgs $VOLUME_GROUP; then
-        if [[ "$os_PACKAGE" = "rpm" ]]; then
-            # RPM doesn't start the service
-            start_service tgtd
-        fi
-
-        # Setup tgtd configuration files
-        mkdir -p $NOVA_DIR/volumes
-
-        # Remove nova iscsi targets
-        sudo tgtadm --op show --mode target | grep $VOLUME_NAME_PREFIX | grep Target | cut -f3 -d ' ' | sudo xargs -n1 tgt-admin --delete || true
-        # Clean out existing volumes
-        for lv in `sudo lvs --noheadings -o lv_name $VOLUME_GROUP`; do
-            # VOLUME_NAME_PREFIX prefixes the LVs we want
-            if [[ "${lv#$VOLUME_NAME_PREFIX}" != "$lv" ]]; then
-                sudo lvremove -f $VOLUME_GROUP/$lv
-            fi
-        done
-    fi
-
-    if [[ "$os_PACKAGE" = "deb" ]]; then
-
-        # Setup the tgt configuration file
-        if [[ ! -f /etc/tgt/conf.d/nova.conf ]]; then
-           sudo mkdir -p /etc/tgt/conf.d
-           echo "include $NOVA_DIR/volumes/*" | sudo tee /etc/tgt/conf.d/nova.conf
-        fi
-
-        # tgt in oneiric doesn't restart properly if tgtd isn't running
-        # do it in two steps
-        sudo stop tgt || true
-        sudo start tgt
-    else
-        restart_service tgtd
-    fi
+    init_nvol
 fi
 
 # Support entry points installation of console scripts
@@ -1785,10 +1785,10 @@
     echo "$1" >> $NOVA_CONF_DIR/$NOVA_CONF
 }
 
-# Remove legacy nova.conf
+# Remove legacy ``nova.conf``
 rm -f $NOVA_DIR/bin/nova.conf
 
-# (re)create nova.conf
+# (Re)create ``nova.conf``
 rm -f $NOVA_CONF_DIR/$NOVA_CONF
 add_nova_opt "[DEFAULT]"
 add_nova_opt "verbose=True"
@@ -1861,7 +1861,9 @@
 add_nova_opt "api_paste_config=$NOVA_CONF_DIR/api-paste.ini"
 add_nova_opt "image_service=nova.image.glance.GlanceImageService"
 add_nova_opt "ec2_dmz_host=$EC2_DMZ_HOST"
-if is_service_enabled qpid ; then
+if is_service_enabled zeromq; then
+    add_nova_opt "rpc_backend=nova.openstack.common.rpc.impl_zmq"
+elif is_service_enabled qpid; then
     add_nova_opt "rpc_backend=nova.rpc.impl_qpid"
 elif [ -n "$RABBIT_HOST" ] &&  [ -n "$RABBIT_PASSWORD" ]; then
     add_nova_opt "rabbit_host=$RABBIT_HOST"
@@ -1898,13 +1900,13 @@
     add_nova_opt "volume_api_class=nova.volume.cinder.API"
 fi
 
-# Provide some transition from EXTRA_FLAGS to EXTRA_OPTS
+# Provide some transition from ``EXTRA_FLAGS`` to ``EXTRA_OPTS``
 if [[ -z "$EXTRA_OPTS" && -n "$EXTRA_FLAGS" ]]; then
     EXTRA_OPTS=$EXTRA_FLAGS
 fi
 
-# You can define extra nova conf flags by defining the array EXTRA_OPTS,
-# For Example: EXTRA_OPTS=(foo=true bar=2)
+# Define extra nova conf flags by defining the array ``EXTRA_OPTS``.
+# For Example: ``EXTRA_OPTS=(foo=true bar=2)``
 for I in "${EXTRA_OPTS[@]}"; do
     # Attempt to convert flags to options
     add_nova_opt ${I//--}
@@ -1941,42 +1943,46 @@
 
 
 # Nova Database
-# ~~~~~~~~~~~~~
+# -------------
 
 # All nova components talk to a central database.  We will need to do this step
 # only once for an entire cluster.
 
 if is_service_enabled mysql && is_service_enabled nova; then
-    # (re)create nova database
+    # (Re)create nova database
     mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS nova;'
+
     # Explicitly use latin1: to avoid lp#829209, nova expects the database to
     # use latin1 by default, and then upgrades the database to utf8 (see the
     # 082_essex.py in nova)
     mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE nova CHARACTER SET latin1;'
 
-    # (re)create nova database
+    # (Re)create nova database
     $NOVA_BIN_DIR/nova-manage db sync
 fi
 
+
 # Heat
-# ------
+# ----
+
 if is_service_enabled heat; then
     init_heat
 fi
 
+
 # Launch Services
 # ===============
 
-# nova api crashes if we start it with a regular screen command,
+# Nova api crashes if we start it with a regular screen command,
 # so send the start command by forcing text into the window.
 # Only run the services specified in ``ENABLED_SERVICES``
 
-# launch the glance registry service
+# Launch the glance registry service
 if is_service_enabled g-reg; then
     screen_it g-reg "cd $GLANCE_DIR; bin/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf"
 fi
 
-# launch the glance api and wait for it to answer before continuing
+# Launch the glance api and wait for it to answer before continuing
 if is_service_enabled g-api; then
     screen_it g-api "cd $GLANCE_DIR; bin/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf"
     echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..."
@@ -1987,7 +1993,7 @@
 fi
 
 if is_service_enabled key; then
-    # (re)create keystone database
+    # (Re)create keystone database
     mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'DROP DATABASE IF EXISTS keystone;'
     mysql -u$MYSQL_USER -p$MYSQL_PASSWORD -e 'CREATE DATABASE keystone CHARACTER SET utf8;'
 
@@ -2005,7 +2011,7 @@
         cp -p $KEYSTONE_DIR/etc/policy.json $KEYSTONE_CONF_DIR
     fi
 
-    # Rewrite stock keystone.conf:
+    # Rewrite stock ``keystone.conf``
     iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN"
     iniset $KEYSTONE_CONF sql connection "$BASE_SQL_CONN/keystone?charset=utf8"
     iniset $KEYSTONE_CONF ec2 driver "keystone.contrib.ec2.backends.sql.Ec2"
@@ -2016,12 +2022,13 @@
     iniset $KEYSTONE_CONF filter:s3_extension paste.filter_factory "keystone.contrib.s3:S3Extension.factory"
 
     if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then
-        # Configure keystone.conf to use sql
+        # Configure ``keystone.conf`` to use sql
         iniset $KEYSTONE_CONF catalog driver keystone.catalog.backends.sql.Catalog
         inicomment $KEYSTONE_CONF catalog template_file
     else
         KEYSTONE_CATALOG=$KEYSTONE_CONF_DIR/default_catalog.templates
         cp -p $FILES/default_catalog.templates $KEYSTONE_CATALOG
+
         # Add swift endpoints to service catalog if swift is enabled
         if is_service_enabled swift; then
             echo "catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
@@ -2043,7 +2050,7 @@
             s,%S3_SERVICE_PORT%,$S3_SERVICE_PORT,g;
         " -i $KEYSTONE_CATALOG
 
-        # Configure keystone.conf to use templates
+        # Configure ``keystone.conf`` to use templates
         iniset $KEYSTONE_CONF catalog driver "keystone.catalog.backends.templated.TemplatedCatalog"
         iniset $KEYSTONE_CONF catalog template_file "$KEYSTONE_CATALOG"
     fi
@@ -2060,10 +2067,11 @@
 
     # Initialize keystone database
     $KEYSTONE_DIR/bin/keystone-manage db_sync
-    # set up certificates
+
+    # Set up certificates
     $KEYSTONE_DIR/bin/keystone-manage pki_setup
 
-    # launch keystone and wait for it to answer before continuing
+    # Launch keystone and wait for it to answer before continuing
     screen_it key "cd $KEYSTONE_DIR && $KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF $KEYSTONE_LOG_CONFIG -d --debug"
     echo "Waiting for keystone to start..."
     if ! timeout $SERVICE_TIMEOUT sh -c "while ! http_proxy= curl -s $KEYSTONE_AUTH_PROTOCOL://$SERVICE_HOST:$KEYSTONE_API_PORT/v2.0/ >/dev/null; do sleep 1; done"; then
@@ -2071,13 +2079,13 @@
       exit 1
     fi
 
-    # keystone_data.sh creates services, admin and demo users, and roles.
+    # ``keystone_data.sh`` creates services, admin and demo users, and roles.
     SERVICE_ENDPOINT=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v2.0
 
     ADMIN_PASSWORD=$ADMIN_PASSWORD SERVICE_TENANT_NAME=$SERVICE_TENANT_NAME SERVICE_PASSWORD=$SERVICE_PASSWORD \
     SERVICE_TOKEN=$SERVICE_TOKEN SERVICE_ENDPOINT=$SERVICE_ENDPOINT SERVICE_HOST=$SERVICE_HOST \
     S3_SERVICE_PORT=$S3_SERVICE_PORT KEYSTONE_CATALOG_BACKEND=$KEYSTONE_CATALOG_BACKEND \
-    DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES \
+    DEVSTACK_DIR=$TOP_DIR ENABLED_SERVICES=$ENABLED_SERVICES HEAT_API_PORT=$HEAT_API_PORT \
         bash -x $FILES/keystone_data.sh
 
     # Set up auth creds now that keystone is bootstrapped
@@ -2099,6 +2107,8 @@
     fi
 fi
 
+screen_it zeromq "cd $NOVA_DIR && $NOVA_DIR/bin/nova-rpc-zmq-receiver"
+
 # Launch the nova-api and wait for it to answer before continuing
 if is_service_enabled n-api; then
     add_nova_opt "enabled_apis=$NOVA_ENABLED_APIS"
@@ -2117,7 +2127,7 @@
 
     # Create a small network
     # Since quantum command is executed in admin context at this point,
-    # --tenant_id needs to be specified.
+    # ``--tenant_id`` needs to be specified.
     NET_ID=$(quantum net-create --tenant_id $TENANT_ID net1 | grep ' id ' | get_field 2)
     quantum subnet-create --tenant_id $TENANT_ID --ip_version 4 --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE
 elif is_service_enabled mysql && is_service_enabled nova; then
@@ -2131,20 +2141,19 @@
     $NOVA_BIN_DIR/nova-manage floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL
 fi
 
-# Launching nova-compute should be as simple as running ``nova-compute`` but
-# have to do a little more than that in our script.  Since we add the group
-# ``libvirtd`` to our user in this script, when nova-compute is run it is
-# within the context of our original shell (so our groups won't be updated).
-# Use 'sg' to execute nova-compute as a member of the libvirtd group.
-# We don't check for is_service_enable as screen_it does it for us
+# The group **libvirtd** is added to the current user in this script.
+# Use 'sg' to execute nova-compute as a member of the **libvirtd** group.
+# ``screen_it`` checks ``is_service_enabled``, it is not needed here
 screen_it n-cpu "cd $NOVA_DIR && sg libvirtd $NOVA_BIN_DIR/nova-compute"
 screen_it n-crt "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cert"
-screen_it n-vol "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-volume"
 screen_it n-net "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-network"
 screen_it n-sch "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-scheduler"
 screen_it n-novnc "cd $NOVNC_DIR && ./utils/nova-novncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF --web ."
 screen_it n-xvnc "cd $NOVA_DIR && ./bin/nova-xvpvncproxy --config-file $NOVA_CONF_DIR/$NOVA_CONF"
 screen_it n-cauth "cd $NOVA_DIR && ./bin/nova-consoleauth"
+if is_service_enabled n-vol; then
+    start_nvol
+fi
 if is_service_enabled cinder; then
     start_cinder
 fi
@@ -2165,18 +2174,17 @@
     start_heat
 fi
 
+
 # Install Images
 # ==============
 
 # Upload an image to glance.
 #
-# The default image is cirros, a small testing image, which lets you login as root
-#
+# The default image is cirros, a small testing image which lets you login as **root**
 # cirros also uses ``cloud-init``, supporting login via keypair and sending scripts as
 # userdata.  See https://help.ubuntu.com/community/CloudInit for more on cloud-init
 #
-# Override ``IMAGE_URLS`` with a comma-separated list of uec images.
-#
+# Override ``IMAGE_URLS`` with a comma-separated list of UEC images.
 #  * **oneiric**: http://uec-images.ubuntu.com/oneiric/current/oneiric-server-cloudimg-amd64.tar.gz
 #  * **precise**: http://uec-images.ubuntu.com/precise/current/precise-server-cloudimg-amd64.tar.gz
 
@@ -2211,7 +2219,7 @@
 
 
 # Using the cloud
-# ===============
+# ---------------
 
 echo ""
 echo ""
@@ -2231,7 +2239,7 @@
     echo "The password: $ADMIN_PASSWORD"
 fi
 
-# Echo HOST_IP - useful for build_uec.sh, which uses dhcp to give the instance an address
+# Echo ``HOST_IP`` - useful for ``build_uec.sh``, which uses dhcp to give the instance an address
 echo "This is your host ip: $HOST_IP"
 
 # Warn that ``EXTRA_FLAGS`` needs to be converted to ``EXTRA_OPTS``
@@ -2239,5 +2247,5 @@
     echo "WARNING: EXTRA_FLAGS is defined and may need to be converted to EXTRA_OPTS"
 fi
 
-# Indicate how long this took to run (bash maintained variable 'SECONDS')
+# Indicate how long this took to run (bash maintained variable ``SECONDS``)
 echo "stack.sh completed in $SECONDS seconds."
diff --git a/stackrc b/stackrc
index d8d1008..3002c46 100644
--- a/stackrc
+++ b/stackrc
@@ -1,3 +1,5 @@
+# stackrc
+#
 # Find the other rc files
 RC_DIR=$(cd $(dirname "$BASH_SOURCE") && pwd)
 
@@ -5,21 +7,22 @@
 DEST=/opt/stack
 
 # Specify which services to launch.  These generally correspond to
-# screen tabs. If you like to add other services that are not enabled
-# by default you can append them in your ENABLED_SERVICES variable in
-# your localrc. For example for swift you can just add this in your
-# localrc to add it with the other services:
-# ENABLED_SERVICES+=,swift
+# screen tabs. To change the default list, use the ``enable_service`` and
+# ``disable_service`` functions in ``localrc``.
+# For example, to enable Swift add this to ``localrc``:
+# enable_service swift
 #
-# If you like to explicitly remove services you can add a -$service in
-# ENABLED_SERVICES, for example in your localrc to install all defaults but not
-# cinder you would just need to set this :
-# ENABLED_SERVICES+=,-cinder
+# And to disable Cinder and use Nova Volumes instead:
+# disable_service c-api c-sch c-vol cinder
+# enable_service n-vol
 ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,cinder,c-sch,c-api,c-vol,n-sch,n-novnc,n-xvnc,n-cauth,horizon,mysql,rabbit
 
 # Set the default Nova APIs to enable
 NOVA_ENABLED_APIS=ec2,osapi_compute,osapi_volume,metadata
 
+# Repositories
+# ------------
+
 # Base GIT Repo URL
 # Another option is http://review.openstack.org/p
 GIT_BASE=https://github.com
@@ -46,7 +49,6 @@
 SWIFT3_REPO=https://github.com/fujita/swift3.git
 SWIFT3_BRANCH=master
 
-
 # python swift client library
 SWIFTCLIENT_REPO=${GIT_BASE}/openstack/python-swiftclient
 SWIFTCLIENT_BRANCH=master
@@ -75,7 +77,7 @@
 NOVACLIENT_REPO=${GIT_BASE}/openstack/python-novaclient.git
 NOVACLIENT_BRANCH=master
 
-# Shared openstack python client library
+# consolidated openstack python client
 OPENSTACKCLIENT_REPO=${GIT_BASE}/openstack/python-openstackclient.git
 OPENSTACKCLIENT_BRANCH=master
 
@@ -110,7 +112,7 @@
     source $RC_DIR/localrc
 fi
 
-# Specify a comma-separated list of uec images to download and install into glance.
+# Specify a comma-separated list of UEC images to download and install into glance.
 # supported urls here are:
 #  * "uec-style" images:
 #     If the file ends in .tar.gz, uncompress the tarball and and select the first
@@ -123,13 +125,17 @@
 #    example:
 #      http://cloud-images.ubuntu.com/releases/oneiric/release/ubuntu-11.10-server-cloudimg-armel-disk1.img
 #      http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-rootfs.img.gz
+#  * OpenVZ image:
+#    OpenVZ uses its own format of image, and does not support UEC style images
+
 #IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image
 #IMAGE_URLS="http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img" # cirros full disk image
-#
-# Set default image based on LIBVIRT_TYPE or VIRT_DRIVER, which may be set in localrc
-# but allow DEFAULT_IMAGE_NAME and IMAGE_URLS to be set directly in localrc, too.
+
+# Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of
+# which may be set in ``localrc``.  Also allow ``DEFAULT_IMAGE_NAME`` and 
+# ``IMAGE_URLS`` to be set directly in ``localrc``.
 case "$VIRT_DRIVER" in
-    openvz) # OpenVZ uses its own format of image, and does not support uec style images
+    openvz) 
         DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-11.10-x86_64}
         IMAGE_URLS=${IMAGE_URLS:-"http://download.openvz.org/template/precreated/ubuntu-11.10-x86_64.tar.gz"};;
     libvirt)
diff --git a/tools/configure_tempest.sh b/tools/configure_tempest.sh
index d502248..2df0315 100755
--- a/tools/configure_tempest.sh
+++ b/tools/configure_tempest.sh
@@ -162,11 +162,16 @@
 COMPUTE_CATALOG_TYPE=compute
 COMPUTE_CREATE_IMAGE_ENABLED=True
 COMPUTE_ALLOW_TENANT_ISOLATION=True
+COMPUTE_ALLOW_TENANT_REUSE=True
 COMPUTE_RESIZE_AVAILABLE=False
 COMPUTE_CHANGE_PASSWORD_AVAILABLE=False  # not supported with QEMU...
 COMPUTE_LOG_LEVEL=ERROR
 BUILD_INTERVAL=3
 BUILD_TIMEOUT=400
+COMPUTE_BUILD_INTERVAL=3
+COMPUTE_BUILD_TIMEOUT=400
+VOLUME_BUILD_INTERVAL=3
+VOLUME_BUILD_TIMEOUT=300
 RUN_SSH=True
 # Check for DEFAULT_INSTANCE_USER and try to connect with that account
 SSH_USER=${DEFAULT_INSTANCE_USER:-$OS_USERNAME}
@@ -212,6 +217,7 @@
     s,%ALT_TENANT_NAME%,$ALT_TENANT_NAME,g;
     s,%COMPUTE_CATALOG_TYPE%,$COMPUTE_CATALOG_TYPE,g;
     s,%COMPUTE_ALLOW_TENANT_ISOLATION%,$COMPUTE_ALLOW_TENANT_ISOLATION,g;
+    s,%COMPUTE_ALLOW_TENANT_REUSE%,$COMPUTE_ALLOW_TENANT_REUSE,g;
     s,%COMPUTE_CREATE_IMAGE_ENABLED%,$COMPUTE_CREATE_IMAGE_ENABLED,g;
     s,%COMPUTE_RESIZE_AVAILABLE%,$COMPUTE_RESIZE_AVAILABLE,g;
     s,%COMPUTE_CHANGE_PASSWORD_AVAILABLE%,$COMPUTE_CHANGE_PASSWORD_AVAILABLE,g;
@@ -219,6 +225,8 @@
     s,%COMPUTE_LOG_LEVEL%,$COMPUTE_LOG_LEVEL,g;
     s,%BUILD_INTERVAL%,$BUILD_INTERVAL,g;
     s,%BUILD_TIMEOUT%,$BUILD_TIMEOUT,g;
+    s,%COMPUTE_BUILD_INTERVAL%,$COMPUTE_BUILD_INTERVAL,g;
+    s,%COMPUTE_BUILD_TIMEOUT%,$COMPUTE_BUILD_TIMEOUT,g;
     s,%RUN_SSH%,$RUN_SSH,g;
     s,%SSH_USER%,$SSH_USER,g;
     s,%NETWORK_FOR_SSH%,$NETWORK_FOR_SSH,g;
@@ -246,6 +254,8 @@
     s,%NETWORK_CATALOG_TYPE%,$NETWORK_CATALOG_TYPE,g;
     s,%NETWORK_API_VERSION%,$NETWORK_API_VERSION,g;
     s,%VOLUME_CATALOG_TYPE%,$VOLUME_CATALOG_TYPE,g;
+    s,%VOLUME_BUILD_INTERVAL%,$VOLUME_BUILD_INTERVAL,g;
+    s,%VOLUME_BUILD_TIMEOUT%,$VOLUME_BUILD_TIMEOUT,g;
 " -i $TEMPEST_CONF
 
 echo "Created tempest configuration file:"
diff --git a/unstack.sh b/unstack.sh
index 17752a8..30ee512 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -6,7 +6,7 @@
 # mysql and rabbit are left running as OpenStack code refreshes
 # do not require them to be restarted.
 #
-# Stop all processes by setting UNSTACK_ALL or specifying ``--all``
+# Stop all processes by setting ``UNSTACK_ALL`` or specifying ``--all``
 # on the command line
 
 # Keep track of the current devstack directory.
@@ -67,6 +67,11 @@
     if is_service_enabled mysql; then
         stop_service mysql
     fi
+
+    # Stop rabbitmq-server
+    if is_service_enabled rabbit; then
+        stop_service rabbitmq-server
+    fi
 fi
 
 # Quantum dhcp agent runs dnsmasq