Merge "Update to PLUMgrid plugin configuration"
diff --git a/README.md b/README.md
index fbf7b4a..46d3f96 100644
--- a/README.md
+++ b/README.md
@@ -83,6 +83,13 @@
 
     ENABLED_SERVICES="$ENABLED_SERVICES,-rabbit,-zeromq,qpid"
 
+# Apache Frontend
+
+Apache web server is enabled for wsgi services by setting `APACHE_ENABLED_SERVICES` in your localrc. But remember to enable these services at first as above.
+
+Example:
+    APACHE_ENABLED_SERVICES+=keystone,swift
+
 # Swift
 
 Swift is disabled by default.  When enabled, it is configured with
@@ -148,6 +155,32 @@
     Q_AGENT_EXTRA_AGENT_OPTS=(tunnel_type=vxlan vxlan_udp_port=8472)
     Q_SRV_EXTRA_OPTS=(tenant_network_type=vxlan)
 
+devstack also supports configuring the Neutron ML2 plugin. The ML2 plugin can run with the OVS, LinuxBridge, or Hyper-V agents on compute hosts. A simple way to configure the ml2 plugin is shown below:
+
+    # VLAN configuration
+    Q_PLUGIN=ml2
+    ENABLE_TENANT_VLANS=True
+
+    # GRE tunnel configuration
+    Q_PLUGIN=ml2
+    ENABLE_TENANT_TUNNELS=True
+
+    # VXLAN tunnel configuration
+    Q_PLUGIN=ml2
+    Q_ML2_TENANT_NETWORK_TYPE=vxlan
+
+The above will default in devstack to using the OVS on each compute host. To change this, set the `Q_AGENT` variable to the agent you want to run (e.g. linuxbridge).
+
+    Variable Name                    Notes
+    -------------------------------------------------------------------------------------
+    Q_AGENT                          This specifies which agent to run with the ML2 Plugin (either `openvswitch` or `linuxbridge`).
+    Q_ML2_PLUGIN_MECHANISM_DRIVERS   The ML2 MechanismDrivers to load. The default is none. Note, ML2 will work with the OVS and LinuxBridge agents by default.
+    Q_ML2_PLUGIN_TYPE_DRIVERS        The ML2 TypeDrivers to load. Defaults to all available TypeDrivers.
+    Q_ML2_PLUGIN_GRE_TYPE_OPTIONS    GRE TypeDriver options. Defaults to none.
+    Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS  VXLAN TypeDriver options. Defaults to none.
+    Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS   VLAN TypeDriver options. Defaults to none.
+    Q_AGENT_EXTRA_AGENT_OPTS         Extra configuration options to pass to the OVS or LinuxBridge Agent.
+
 # Tempest
 
 If tempest has been successfully configured, a basic set of smoke tests can be run as follows:
@@ -188,15 +221,5 @@
 To setup a cells environment add the following to your `localrc`:
 
     enable_service n-cell
-    enable_service n-api-meta
-    MULTI_HOST=True
 
-    # The following have not been tested with cells, they may or may not work.
-    disable_service n-obj
-    disable_service cinder
-    disable_service c-sch
-    disable_service c-api
-    disable_service c-vol
-    disable_service n-xvnc
-
-Be aware that there are some features currently missing in cells, one notable one being security groups.
+Be aware that there are some features currently missing in cells, one notable one being security groups.  The exercises have been patched to disable functionality not supported by cells.
diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh
index 3c83725..e2baecd 100755
--- a/exercises/aggregates.sh
+++ b/exercises/aggregates.sh
@@ -42,6 +42,8 @@
 # Test as the admin user
 . $TOP_DIR/openrc admin admin
 
+# Cells does not support aggregates.
+is_service_enabled n-cell && exit 55
 
 # Create an aggregate
 # ===================
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
index 1814732..a3a14eb 100755
--- a/exercises/boot_from_volume.sh
+++ b/exercises/boot_from_volume.sh
@@ -80,12 +80,18 @@
 # List security groups
 nova secgroup-list
 
-# Create a secgroup
-if ! nova secgroup-list | grep -q $SECGROUP; then
-    nova secgroup-create $SECGROUP "$SECGROUP description"
-    if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then
-        echo "Security group not created"
-        exit 1
+if is_service_enabled n-cell; then
+    # Cells does not support security groups, so force the use of "default"
+    SECGROUP="default"
+    echo "Using the default security group because of Cells."
+else
+    # Create a secgroup
+    if ! nova secgroup-list | grep -q $SECGROUP; then
+        nova secgroup-create $SECGROUP "$SECGROUP description"
+        if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then
+            echo "Security group not created"
+            exit 1
+        fi
     fi
 fi
 
@@ -200,8 +206,12 @@
 end_time=$(date +%s)
 echo "Completed cinder delete in $((end_time - start_time)) seconds"
 
-# Delete secgroup
-nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP"
+if [[ $SECGROUP = "default" ]] ; then
+    echo "Skipping deleting default security group"
+else
+    # Delete secgroup
+    nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP"
+fi
 
 set +o xtrace
 echo "*********************************************************************"
diff --git a/exercises/euca.sh b/exercises/euca.sh
index eec8636..5b0d1ba 100755
--- a/exercises/euca.sh
+++ b/exercises/euca.sh
@@ -58,11 +58,17 @@
 IMAGE=`euca-describe-images | grep machine | grep ${DEFAULT_IMAGE_NAME} | cut -f2 | head -n1`
 die_if_not_set $LINENO IMAGE "Failure getting image $DEFAULT_IMAGE_NAME"
 
-# Add a secgroup
-if ! euca-describe-groups | grep -q $SECGROUP; then
-    euca-add-group -d "$SECGROUP description" $SECGROUP
-    if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! euca-describe-groups | grep -q $SECGROUP; do sleep 1; done"; then
-        die $LINENO "Security group not created"
+if is_service_enabled n-cell; then
+    # Cells does not support security groups, so force the use of "default"
+    SECGROUP="default"
+    echo "Using the default security group because of Cells."
+else
+    # Add a secgroup
+    if ! euca-describe-groups | grep -q $SECGROUP; then
+        euca-add-group -d "$SECGROUP description" $SECGROUP
+        if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! euca-describe-groups | grep -q $SECGROUP; do sleep 1; done"; then
+            die $LINENO "Security group not created"
+        fi
     fi
 fi
 
@@ -77,7 +83,7 @@
 
 # Volumes
 # -------
-if [[ "$ENABLED_SERVICES" =~ "c-vol" ]]; then
+if is_service_enabled c-vol && ! is_service_enabled n-cell; then
    VOLUME_ZONE=`euca-describe-availability-zones | head -n1 | cut -f2`
    die_if_not_set $LINENO VOLUME_ZONE "Failure to find zone for volume"
 
@@ -117,41 +123,45 @@
     echo "Volume Tests Skipped"
 fi
 
-# Allocate floating address
-FLOATING_IP=`euca-allocate-address | cut -f2`
-die_if_not_set $LINENO FLOATING_IP "Failure allocating floating IP"
+if is_service_enabled n-cell; then
+    echo "Floating IP Tests Skipped because of Cells."
+else
+    # Allocate floating address
+    FLOATING_IP=`euca-allocate-address | cut -f2`
+    die_if_not_set $LINENO FLOATING_IP "Failure allocating floating IP"
 
-# Associate floating address
-euca-associate-address -i $INSTANCE $FLOATING_IP || \
-    die $LINENO "Failure associating address $FLOATING_IP to $INSTANCE"
+    # Associate floating address
+    euca-associate-address -i $INSTANCE $FLOATING_IP || \
+        die $LINENO "Failure associating address $FLOATING_IP to $INSTANCE"
 
-# Authorize pinging
-euca-authorize -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \
-    die $LINENO "Failure authorizing rule in $SECGROUP"
+    # Authorize pinging
+    euca-authorize -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \
+        die $LINENO "Failure authorizing rule in $SECGROUP"
 
-# Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds
-ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT
+    # Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds
+    ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT
 
-# Revoke pinging
-euca-revoke -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \
-    die $LINENO "Failure revoking rule in $SECGROUP"
+    # Revoke pinging
+    euca-revoke -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \
+        die $LINENO "Failure revoking rule in $SECGROUP"
 
-# Release floating address
-euca-disassociate-address $FLOATING_IP || \
-    die $LINENO "Failure disassociating address $FLOATING_IP"
+    # Release floating address
+    euca-disassociate-address $FLOATING_IP || \
+        die $LINENO "Failure disassociating address $FLOATING_IP"
 
-# Wait just a tick for everything above to complete so release doesn't fail
-if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep $INSTANCE | grep -q $FLOATING_IP; do sleep 1; done"; then
-    die $LINENO "Floating ip $FLOATING_IP not disassociated within $ASSOCIATE_TIMEOUT seconds"
-fi
+    # Wait just a tick for everything above to complete so release doesn't fail
+    if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep $INSTANCE | grep -q $FLOATING_IP; do sleep 1; done"; then
+        die $LINENO "Floating ip $FLOATING_IP not disassociated within $ASSOCIATE_TIMEOUT seconds"
+    fi
 
-# Release floating address
-euca-release-address $FLOATING_IP || \
-    die $LINENO "Failure releasing address $FLOATING_IP"
+    # Release floating address
+    euca-release-address $FLOATING_IP || \
+        die $LINENO "Failure releasing address $FLOATING_IP"
 
-# Wait just a tick for everything above to complete so terminate doesn't fail
-if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep -q $FLOATING_IP; do sleep 1; done"; then
-    die $LINENO "Floating ip $FLOATING_IP not released within $ASSOCIATE_TIMEOUT seconds"
+    # Wait just a tick for everything above to complete so terminate doesn't fail
+    if ! timeout $ASSOCIATE_TIMEOUT sh -c "while euca-describe-addresses | grep -q $FLOATING_IP; do sleep 1; done"; then
+        die $LINENO "Floating ip $FLOATING_IP not released within $ASSOCIATE_TIMEOUT seconds"
+    fi
 fi
 
 # Terminate instance
@@ -166,8 +176,12 @@
     die $LINENO "server didn't terminate within $TERMINATE_TIMEOUT seconds"
 fi
 
-# Delete secgroup
-euca-delete-group $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP"
+if [[ "$SECGROUP" = "default" ]] ; then
+    echo "Skipping deleting default security group"
+else
+    # Delete secgroup
+    euca-delete-group $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP"
+fi
 
 set +o xtrace
 echo "*********************************************************************"
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index b22ef11..ac65cf7 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -56,6 +56,8 @@
 # Instance name
 VM_NAME="ex-float"
 
+# Cells does not support floating ips API calls
+is_service_enabled n-cell && exit 55
 
 # Launching a server
 # ==================
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index f574bb3..b2b391c 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -78,12 +78,18 @@
 # List security groups
 nova secgroup-list
 
-# Create a secgroup
-if ! nova secgroup-list | grep -q $SECGROUP; then
-    nova secgroup-create $SECGROUP "$SECGROUP description"
-    if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then
-        echo "Security group not created"
-        exit 1
+if is_service_enabled n-cell; then
+    # Cells does not support security groups, so force the use of "default"
+    SECGROUP="default"
+    echo "Using the default security group because of Cells."
+else
+    # Create a secgroup
+    if ! nova secgroup-list | grep -q $SECGROUP; then
+        nova secgroup-create $SECGROUP "$SECGROUP description"
+        if ! timeout $ASSOCIATE_TIMEOUT sh -c "while ! nova secgroup-list | grep -q $SECGROUP; do sleep 1; done"; then
+            echo "Security group not created"
+            exit 1
+        fi
     fi
 fi
 
@@ -201,8 +207,12 @@
     die $LINENO "Server $VM_NAME not deleted"
 fi
 
-# Delete secgroup
-nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP"
+if [[ $SECGROUP = "default" ]] ; then
+    echo "Skipping deleting default security group"
+else
+    # Delete secgroup
+    nova secgroup-delete $SECGROUP || die $LINENO "Failure deleting security group $SECGROUP"
+fi
 
 set +o xtrace
 echo "*********************************************************************"
diff --git a/files/apts/general b/files/apts/general
index ec6dd0d..fdf8e20 100644
--- a/files/apts/general
+++ b/files/apts/general
@@ -20,4 +20,3 @@
 euca2ools # only for testing client
 tar
 python-cmd2 # dist:precise
-python-netaddr
diff --git a/files/apts/horizon b/files/apts/horizon
index e1ce85f..0865931 100644
--- a/files/apts/horizon
+++ b/files/apts/horizon
@@ -21,4 +21,3 @@
 python-migrate
 nodejs
 nodejs-legacy # dist:quantal
-python-netaddr
diff --git a/files/apts/neutron b/files/apts/neutron
index 64fc1bf..0f4b69f 100644
--- a/files/apts/neutron
+++ b/files/apts/neutron
@@ -9,7 +9,6 @@
 python-paste
 python-routes
 python-suds
-python-netaddr
 python-pastedeploy
 python-greenlet
 python-kombu
diff --git a/files/apts/nova b/files/apts/nova
index 6a7ef74..ae925c3 100644
--- a/files/apts/nova
+++ b/files/apts/nova
@@ -30,7 +30,6 @@
 python-libvirt # NOPRIME
 python-libxml2
 python-routes
-python-netaddr
 python-numpy # used by websockify for spice console
 python-pastedeploy
 python-eventlet
diff --git a/files/dnsmasq-for-baremetal-from-nova-network.conf b/files/dnsmasq-for-baremetal-from-nova-network.conf
new file mode 100644
index 0000000..66a3751
--- /dev/null
+++ b/files/dnsmasq-for-baremetal-from-nova-network.conf
@@ -0,0 +1,3 @@
+enable-tftp
+tftp-root=/tftpboot
+dhcp-boot=pxelinux.0
diff --git a/files/rpms-suse/general b/files/rpms-suse/general
index 93711ff..f28267c 100644
--- a/files/rpms-suse/general
+++ b/files/rpms-suse/general
@@ -7,7 +7,6 @@
 openssl
 psmisc
 python-cmd2 # dist:opensuse-12.3
-python-netaddr
 python-pip
 python-pylint
 python-unittest2
diff --git a/files/rpms-suse/horizon b/files/rpms-suse/horizon
index 405fb7a..73932ac 100644
--- a/files/rpms-suse/horizon
+++ b/files/rpms-suse/horizon
@@ -15,7 +15,6 @@
 python-eventlet
 python-kombu
 python-mox
-python-netaddr
 python-nose
 python-pylint
 python-sqlalchemy-migrate
diff --git a/files/rpms-suse/neutron b/files/rpms-suse/neutron
index aadb156..e9ccf59 100644
--- a/files/rpms-suse/neutron
+++ b/files/rpms-suse/neutron
@@ -10,7 +10,6 @@
 python-iso8601
 python-kombu
 python-mysql
-python-netaddr
 python-Paste
 python-PasteDeploy
 python-pyudev
diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova
index edb1a8a..ee4917d 100644
--- a/files/rpms-suse/nova
+++ b/files/rpms-suse/nova
@@ -35,7 +35,6 @@
 python-lxml # needed for glance which is needed for nova --- this shouldn't be here
 python-mox
 python-mysql
-python-netaddr
 python-numpy # needed by websockify for spice console
 python-paramiko
 python-python-gflags
diff --git a/files/rpms/general b/files/rpms/general
index 5cb3e28..9fa305c 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -11,7 +11,6 @@
 libxslt-devel # dist:rhel6 [2]
 psmisc
 pylint
-python-netaddr
 python-pip
 python-prettytable # dist:rhel6 [1]
 python-unittest2
diff --git a/files/rpms/horizon b/files/rpms/horizon
index b844d98..0ca18ca 100644
--- a/files/rpms/horizon
+++ b/files/rpms/horizon
@@ -16,7 +16,6 @@
 python-kombu
 python-migrate
 python-mox
-python-netaddr
 python-nose
 python-paste        #dist:f16,f17,f18,f19
 python-paste-deploy #dist:f16,f17,f18,f19
diff --git a/files/rpms/neutron b/files/rpms/neutron
index 6a8fd36..a7700f7 100644
--- a/files/rpms/neutron
+++ b/files/rpms/neutron
@@ -10,7 +10,6 @@
 python-greenlet
 python-iso8601
 python-kombu
-python-netaddr
 #rhel6 gets via pip
 python-paste        # dist:f16,f17,f18,f19
 python-paste-deploy # dist:f16,f17,f18,f19
diff --git a/files/rpms/nova b/files/rpms/nova
index 8d8a0b8..c99f3de 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -28,7 +28,6 @@
 python-lockfile
 python-migrate
 python-mox
-python-netaddr
 python-paramiko # dist:f16,f17,f18,f19
 # ^ on RHEL, brings in python-crypto which conflicts with version from
 # pip we need
diff --git a/functions b/functions
index eb83dfb..4b8a06e 100644
--- a/functions
+++ b/functions
@@ -745,12 +745,17 @@
 #   For backward compatibility if we have **swift** in ENABLED_SERVICES all the
 #   **s-** services will be enabled. This will be deprecated in the future.
 #
+# Cells within nova is enabled if **n-cell** is in ``ENABLED_SERVICES``.
+# We also need to make sure to treat **n-cell-region** and **n-cell-child**
+# as enabled in this case.
+#
 # Uses global ``ENABLED_SERVICES``
 # is_service_enabled service [service ...]
 function is_service_enabled() {
     services=$@
     for service in ${services}; do
         [[ ,${ENABLED_SERVICES}, =~ ,${service}, ]] && return 0
+        [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && return 0
         [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && return 0
         [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && return 0
         [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && return 0
@@ -1121,9 +1126,9 @@
 }
 
 
-# ``pip install`` the dependencies of the package before ``setup.py develop``
-# so pip and not distutils processes the dependency chain
-# Uses globals ``TRACK_DEPENDES``, ``*_proxy`
+# ``pip install -e`` the package, which processes the dependencies
+# using pip before running `setup.py develop`
+# Uses globals ``STACK_USER``, ``TRACK_DEPENDES``, ``*_proxy`
 # setup_develop directory
 function setup_develop() {
     if [[ $TRACK_DEPENDS = True ]]; then
@@ -1131,19 +1136,13 @@
     else
         SUDO_CMD="sudo"
     fi
-    for reqs_file in $1/requirements.txt $1/tools/pip-requires ; do
-        if [ -f $reqs_file ] ; then
-            pip_install -r $reqs_file
-        fi
-    done
-    (cd $1; \
-        python setup.py egg_info; \
-        $SUDO_CMD \
-            HTTP_PROXY=$http_proxy \
-            HTTPS_PROXY=$https_proxy \
-            NO_PROXY=$no_proxy \
-            python setup.py develop \
-    )
+    $SUDO_CMD \
+        HTTP_PROXY=$http_proxy \
+        HTTPS_PROXY=$https_proxy \
+        NO_PROXY=$no_proxy \
+        pip install -e $1
+    # ensure that further actions can do things like setup.py sdist
+    $SUDO_CMD chown -R $STACK_USER $1/*.egg-info
 }
 
 
@@ -1214,6 +1213,14 @@
         return
     fi
 
+    # vmdk format images
+    if [[ "$image_url" =~ '.vmdk' ]]; then
+        IMAGE="$FILES/${IMAGE_FNAME}"
+        IMAGE_NAME="${IMAGE_FNAME%.vmdk}"
+        glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public=True --container-format bare --disk-format vmdk --property vmware-disktype="preallocated" < "${IMAGE}"
+        return
+    fi
+
     # XenServer-ovf-format images are provided as .vhd.tgz as well
     # and should not be decompressed prior to loading
     if [[ "$image_url" =~ '.vhd.tgz' ]]; then
@@ -1283,9 +1290,9 @@
 
     if [ "$CONTAINER_FORMAT" = "bare" ]; then
         if [ "$UNPACK" = "zcat" ]; then
-            glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --public --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < <(zcat --force "${IMAGE}")
+            glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < <(zcat --force "${IMAGE}")
         else
-            glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --public --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < "${IMAGE}"
+            glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME" --is-public True --container-format=$CONTAINER_FORMAT --disk-format $DISK_FORMAT < "${IMAGE}"
         fi
     else
         # Use glance client to add the kernel the root filesystem.
@@ -1293,12 +1300,12 @@
         # kernel for use when uploading the root filesystem.
         KERNEL_ID=""; RAMDISK_ID="";
         if [ -n "$KERNEL" ]; then
-            KERNEL_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-kernel" --public --container-format aki --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2)
+            KERNEL_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-kernel" --is-public True --container-format aki --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2)
         fi
         if [ -n "$RAMDISK" ]; then
-            RAMDISK_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-ramdisk" --public --container-format ari --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2)
+            RAMDISK_ID=$(glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "$IMAGE_NAME-ramdisk" --is-public True --container-format ari --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2)
         fi
-        glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "${IMAGE_NAME%.img}" --public --container-format ami --disk-format ami ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}"
+        glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT image-create --name "${IMAGE_NAME%.img}" --is-public True --container-format ami --disk-format ami ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} ${RAMDISK_ID:+--property ramdisk_id=$RAMDISK_ID} < "${IMAGE}"
     fi
 }
 
@@ -1478,11 +1485,7 @@
 # Get the path to the pip command.
 # get_pip_command
 function get_pip_command() {
-    if is_fedora; then
-        which pip-python
-    else
-        which pip
-    fi
+    which pip || which pip-python
 
     if [ $? -ne 0 ]; then
         die $LINENO "Unable to find pip; cannot continue"
diff --git a/lib/apache b/lib/apache
new file mode 100644
index 0000000..a2b0534
--- /dev/null
+++ b/lib/apache
@@ -0,0 +1,118 @@
+# lib/apache
+# Functions to control configuration and operation of apache web server
+
+# Dependencies:
+# ``functions`` file
+# is_apache_enabled_service
+# change_apache_user_group
+# install_apache_wsgi
+# config_apache_wsgi
+# start_apache_server
+# stop_apache_server
+# restart_apache_server
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+# Allow overriding the default Apache user and group, default to
+# current user and his default group.
+APACHE_USER=${APACHE_USER:-$USER}
+APACHE_GROUP=${APACHE_GROUP:-$(id -gn $APACHE_USER)}
+
+
+# Set up apache name and configuration directory
+if is_ubuntu; then
+    APACHE_NAME=apache2
+    APACHE_CONF_DIR=sites-available
+elif is_fedora; then
+    APACHE_NAME=httpd
+    APACHE_CONF_DIR=conf.d
+elif is_suse; then
+    APACHE_NAME=apache2
+    APACHE_CONF_DIR=vhosts.d
+fi
+
+# Functions
+# ---------
+
+# is_apache_enabled_service() checks if the service(s) specified as arguments are
+# apache enabled by the user in ``APACHE_ENABLED_SERVICES`` as web front end.
+#
+# Multiple services specified as arguments are ``OR``'ed together; the test
+# is a short-circuit boolean, i.e it returns on the first match.
+#
+# Uses global ``APACHE_ENABLED_SERVICES``
+# APACHE_ENABLED_SERVICES service [service ...]
+function is_apache_enabled_service() {
+    services=$@
+    for service in ${services}; do
+        [[ ,${APACHE_ENABLED_SERVICES}, =~ ,${service}, ]] && return 0
+    done
+    return 1
+}
+
+# change_apache_user_group() - Change the User/Group to run Apache server
+function change_apache_user_group(){
+    local stack_user=$@
+    if is_ubuntu; then
+        sudo sed -e "
+            s/^export APACHE_RUN_USER=.*/export APACHE_RUN_USER=${stack_user}/g;
+            s/^export APACHE_RUN_GROUP=.*/export APACHE_RUN_GROUP=${stack_user}/g
+        " -i /etc/${APACHE_NAME}/envvars
+    elif is_fedora; then
+        sudo sed -e "
+            s/^User .*/User ${stack_user}/g;
+            s/^Group .*/Group ${stack_user}/g
+        " -i /etc/${APACHE_NAME}/httpd.conf
+    elif is_suse; then
+        sudo sed -e "
+            s/^User .*/User ${stack_user}/g;
+            s/^Group .*/Group ${stack_user}/g
+        " -i /etc/${APACHE_NAME}/uid.conf
+    else
+        exit_distro_not_supported "apache user and group"
+    fi
+}
+
+# install_apache_wsgi() - Install Apache server and wsgi module
+function install_apache_wsgi() {
+    # Apache installation, because we mark it NOPRIME
+    if is_ubuntu; then
+        # Install apache2, which is NOPRIME'd
+        install_package apache2 libapache2-mod-wsgi
+    elif is_fedora; then
+        sudo rm -f /etc/httpd/conf.d/000-*
+        install_package httpd mod_wsgi
+    elif is_suse; then
+        install_package apache2 apache2-mod_wsgi
+    else
+        exit_distro_not_supported "apache installation"
+    fi
+}
+
+# start_apache_server() - Start running apache server
+function start_apache_server() {
+    start_service $APACHE_NAME
+}
+
+# stop_apache_server() - Stop running apache server
+function stop_apache_server() {
+    if [ -n "$APACHE_NAME" ]; then
+        stop_service $APACHE_NAME
+    else
+        exit_distro_not_supported "apache configuration"
+    fi
+}
+
+# restart_apache_server
+function restart_apache_server() {
+    restart_service $APACHE_NAME
+}
+
+# Restore xtrace
+$XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/baremetal b/lib/baremetal
index bed3c09..145544d 100644
--- a/lib/baremetal
+++ b/lib/baremetal
@@ -80,6 +80,15 @@
 # change the virtualization type: --engine qemu
 BM_POSEUR_EXTRA_OPTS=${BM_POSEUR_EXTRA_OPTS:-}
 
+# To provide PXE, configure nova-network's dnsmasq rather than run the one
+# dedicated to baremetal. When enable this, make sure these conditions are
+# fulfilled:
+#  1) nova-compute and nova-network runs on the same host
+#  2) nova-network uses FlatDHCPManager
+# NOTE: the other BM_DNSMASQ_* have no effect on the behavior if this option
+#       is enabled.
+BM_DNSMASQ_FROM_NOVA_NETWORK=`trueorfalse False $BM_DNSMASQ_FROM_NOVA_NETWORK`
+
 # BM_DNSMASQ_IFACE should match FLAT_NETWORK_BRIDGE
 if [ "$BM_USE_FAKE_ENV" ]; then
     BM_DNSMASQ_IFACE=${BM_DNSMASQ_IFACE:-br99}
@@ -239,14 +248,14 @@
          --os-image-url http://$GLANCE_HOSTPORT \
          image-create \
          --name $BM_DEPLOY_KERNEL \
-         --public --disk-format=aki \
+         --is-public True --disk-format=aki \
          < $TOP_DIR/files/$BM_DEPLOY_KERNEL  | grep ' id ' | get_field 2)
     BM_DEPLOY_RAMDISK_ID=$(glance \
          --os-auth-token $token \
          --os-image-url http://$GLANCE_HOSTPORT \
          image-create \
          --name $BM_DEPLOY_RAMDISK \
-         --public --disk-format=ari \
+         --is-public True --disk-format=ari \
          < $TOP_DIR/files/$BM_DEPLOY_RAMDISK  | grep ' id ' | get_field 2)
 }
 
@@ -294,14 +303,14 @@
          --os-image-url http://$GLANCE_HOSTPORT \
          image-create \
          --name $image_name-kernel \
-         --public --disk-format=aki \
+         --is-public True --disk-format=aki \
          < $TOP_DIR/files/$OUT_KERNEL | grep ' id ' | get_field 2)
     RAMDISK_ID=$(glance \
          --os-auth-token $token \
          --os-image-url http://$GLANCE_HOSTPORT \
          image-create \
          --name $image_name-initrd \
-         --public --disk-format=ari \
+         --is-public True --disk-format=ari \
          < $TOP_DIR/files/$OUT_RAMDISK | grep ' id ' | get_field 2)
 }
 
@@ -371,14 +380,14 @@
             --os-auth-token $token \
             --os-image-url http://$GLANCE_HOSTPORT \
             image-create \
-            --name "$IMAGE_NAME-kernel" --public \
+            --name "$IMAGE_NAME-kernel" --is-public True \
             --container-format aki \
             --disk-format aki < "$KERNEL" | grep ' id ' | get_field 2)
         RAMDISK_ID=$(glance \
             --os-auth-token $token \
             --os-image-url http://$GLANCE_HOSTPORT \
             image-create \
-            --name "$IMAGE_NAME-ramdisk" --public \
+            --name "$IMAGE_NAME-ramdisk" --is-public True \
             --container-format ari \
             --disk-format ari < "$RAMDISK" | grep ' id ' | get_field 2)
     else
@@ -390,7 +399,7 @@
        --os-auth-token $token \
        --os-image-url http://$GLANCE_HOSTPORT \
        image-create \
-       --name "${IMAGE_NAME%.img}" --public \
+       --name "${IMAGE_NAME%.img}" --is-public True \
        --container-format $CONTAINER_FORMAT \
        --disk-format $DISK_FORMAT \
        ${KERNEL_ID:+--property kernel_id=$KERNEL_ID} \
diff --git a/lib/ceilometer b/lib/ceilometer
index 548496e..8768122 100644
--- a/lib/ceilometer
+++ b/lib/ceilometer
@@ -1,8 +1,11 @@
 # lib/ceilometer
 # Install and start **Ceilometer** service
 
-# To enable Ceilometer services, add the following to localrc
-# enable_service ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api
+# To enable a minimal set of Ceilometer services, add the following to localrc:
+#   enable_service ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api
+#
+# To ensure Ceilometer alarming services are enabled also, further add to the localrc:
+#   enable_service ceilometer-alarm-notify ceilometer-alarm-eval
 
 # Dependencies:
 # - functions
@@ -81,7 +84,6 @@
     iniset $CEILOMETER_CONF DEFAULT os_username ceilometer
     iniset $CEILOMETER_CONF DEFAULT os_password $SERVICE_PASSWORD
     iniset $CEILOMETER_CONF DEFAULT os_tenant_name $SERVICE_TENANT_NAME
-    iniset $CEILOMETER_CONF DEFAULT os_auth_url $OS_AUTH_URL
 
     iniset $CEILOMETER_CONF keystone_authtoken auth_protocol http
     iniset $CEILOMETER_CONF keystone_authtoken admin_user ceilometer
@@ -136,12 +138,14 @@
     screen_it ceilometer-acentral "ceilometer-agent-central --config-file $CEILOMETER_CONF"
     screen_it ceilometer-collector "ceilometer-collector --config-file $CEILOMETER_CONF"
     screen_it ceilometer-api "ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF"
+    screen_it ceilometer-alarm-notify "ceilometer-alarm-notifier --config-file $CEILOMETER_CONF"
+    screen_it ceilometer-alarm-eval "ceilometer-alarm-singleton --config-file $CEILOMETER_CONF"
 }
 
 # stop_ceilometer() - Stop running processes
 function stop_ceilometer() {
     # Kill the ceilometer screen windows
-    for serv in ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api; do
+    for serv in ceilometer-acompute ceilometer-acentral ceilometer-collector ceilometer-api ceilometer-alarm-notify ceilometer-alarm-eval; do
         screen -S $SCREEN_NAME -p $serv -X kill
     done
 }
diff --git a/lib/horizon b/lib/horizon
index b537484..89bd659 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -4,6 +4,7 @@
 
 # Dependencies:
 # ``functions`` file
+# ``apache`` file
 # ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined
 # <list other global vars that are assumed to be defined>
 
@@ -33,23 +34,6 @@
 # The example file in Horizon repo is used by default.
 HORIZON_SETTINGS=${HORIZON_SETTINGS:-$HORIZON_DIR/openstack_dashboard/local/local_settings.py.example}
 
-# Allow overriding the default Apache user and group, default to
-# current user and his default group.
-APACHE_USER=${APACHE_USER:-$USER}
-APACHE_GROUP=${APACHE_GROUP:-$(id -gn $APACHE_USER)}
-
-# Set up service name and configuration path
-if is_ubuntu; then
-    APACHE_NAME=apache2
-    APACHE_CONF=sites-available/horizon
-elif is_fedora; then
-    APACHE_NAME=httpd
-    APACHE_CONF=conf.d/horizon.conf
-elif is_suse; then
-    APACHE_NAME=apache2
-    APACHE_CONF=vhosts.d/horizon.conf
-fi
-
 
 # Functions
 # ---------
@@ -104,6 +88,9 @@
     local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py
     cp $HORIZON_SETTINGS $local_settings
 
+    if is_service_enabled neutron; then
+        _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_security_group $Q_USE_SECGROUP
+    fi
     # enable loadbalancer dashboard in case service is enabled
     if is_service_enabled q-lbaas; then
         _horizon_config_set $local_settings OPENSTACK_NEUTRON_NETWORK enable_lb True
@@ -119,11 +106,12 @@
     sudo mkdir -p $HORIZON_DIR/.blackhole
 
     HORIZON_REQUIRE=''
+    local horizon_conf=/etc/$APACHE_NAME/$APACHE_CONF_DIR/horizon
     if is_ubuntu; then
         # Clean up the old config name
         sudo rm -f /etc/apache2/sites-enabled/000-default
         # Be a good citizen and use the distro tools here
-        sudo touch /etc/$APACHE_NAME/$APACHE_CONF
+        sudo touch $horizon_conf
         sudo a2ensite horizon
         # WSGI isn't enabled by default, enable it
         sudo a2enmod wsgi
@@ -153,23 +141,13 @@
         s,%APACHE_NAME%,$APACHE_NAME,g;
         s,%DEST%,$DEST,g;
         s,%HORIZON_REQUIRE%,$HORIZON_REQUIRE,g;
-    \" $FILES/apache-horizon.template >/etc/$APACHE_NAME/$APACHE_CONF"
+    \" $FILES/apache-horizon.template >$horizon_conf"
 }
 
 # install_horizon() - Collect source and prepare
 function install_horizon() {
     # Apache installation, because we mark it NOPRIME
-    if is_ubuntu; then
-        # Install apache2, which is NOPRIME'd
-        install_package apache2 libapache2-mod-wsgi
-    elif is_fedora; then
-        sudo rm -f /etc/httpd/conf.d/000-*
-        install_package httpd mod_wsgi
-    elif is_suse; then
-        install_package apache2 apache2-mod_wsgi
-    else
-        exit_distro_not_supported "apache installation"
-    fi
+    install_apache_wsgi
 
     # NOTE(sdague) quantal changed the name of the node binary
     if is_ubuntu; then
@@ -185,17 +163,13 @@
 
 # start_horizon() - Start running processes, including screen
 function start_horizon() {
-    restart_service $APACHE_NAME
+    restart_apache_server
     screen_it horizon "cd $HORIZON_DIR && sudo tail -f /var/log/$APACHE_NAME/horizon_error.log"
 }
 
 # stop_horizon() - Stop running processes (non-screen)
 function stop_horizon() {
-    if [ -n "$APACHE_NAME" ]; then
-        stop_service $APACHE_NAME
-    else
-        exit_distro_not_supported "apache configuration"
-    fi
+    stop_apache_server
 }
 
 
diff --git a/lib/infra b/lib/infra
new file mode 100644
index 0000000..0b73259
--- /dev/null
+++ b/lib/infra
@@ -0,0 +1,56 @@
+# lib/infra
+#
+# Functions to install infrastructure projects needed by other projects
+# early in the cycle. We need this so we can do things like gate on
+# requirements as a global list
+
+# Dependencies:
+# ``functions`` file
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# unfubar_setuptools
+# install_infra
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+PBR_DIR=$DEST/pbr
+REQUIREMENTS_DIR=$DEST/requirements
+
+# Entry Points
+# ------------
+
+# unfubar_setuptools() - Unbreak the giant mess that is the current state of setuptools
+function unfubar_setuptools() {
+    # this is a giant game of who's on first, but it does consistently work
+    # there is hope that upstream python packaging fixes this in the future
+    echo_summary "Unbreaking setuptools"
+    pip_install -U setuptools
+    pip_install -U pip
+    uninstall_package python-setuptools
+    pip_install -U setuptools
+    pip_install -U pip
+}
+
+
+# install_infra() - Collect source and prepare
+function install_infra() {
+    # bring down global requirements
+    git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH
+
+    # Install pbr
+    git_clone $PBR_REPO $PBR_DIR $PBR_BRANCH
+    setup_develop $PBR_DIR
+}
+
+# Restore xtrace
+$XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet
new file mode 100644
index 0000000..4d343f5
--- /dev/null
+++ b/lib/neutron_plugins/midonet
@@ -0,0 +1,82 @@
+# Neutron MidoNet plugin
+# ----------------------
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+function is_neutron_ovs_base_plugin() {
+    # MidoNet does not use l3-agent
+    # 0 means True here
+    return 1
+}
+
+function neutron_plugin_create_nova_conf() {
+    NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"midonet.nova.virt.libvirt.vif.MidonetVifDriver"}
+}
+
+function neutron_plugin_install_agent_packages() {
+    :
+}
+
+function neutron_plugin_configure_common() {
+    Q_PLUGIN_CONF_PATH=etc/neutron/plugins/midonet
+    Q_PLUGIN_CONF_FILENAME=midonet.ini
+    Q_DB_NAME="neutron_midonet"
+    Q_PLUGIN_CLASS="neutron.plugins.midonet.plugin.MidonetPluginV2"
+}
+
+function neutron_plugin_configure_debug_command() {
+    :
+}
+
+function neutron_plugin_configure_dhcp_agent() {
+   die $LINENO "q-dhcp must not be executed with MidoNet plugin!"
+}
+
+function neutron_plugin_configure_l3_agent() {
+   die $LINENO "q-l3 must not be executed with MidoNet plugin!"
+}
+
+function neutron_plugin_configure_plugin_agent() {
+   die $LINENO "q-agt must not be executed with MidoNet plugin!"
+}
+
+function neutron_plugin_configure_service() {
+    if [[ "$MIDONET_API_URI" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE MIDONET midonet_uri $MIDONET_API_URI
+    fi
+    if [[ "$MIDONET_USERNAME" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE MIDONET username $MIDONET_USERNAME
+    fi
+    if [[ "$MIDONET_PASSWORD" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE MIDONET password $MIDONET_PASSWORD
+    fi
+    if [[ "$MIDONET_PROJECT_ID" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE MIDONET project_id $MIDONET_PROJECT_ID
+    fi
+    if [[ "$MIDONET_PROVIDER_ROUTER_ID" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE MIDONET provider_router_id $MIDONET_PROVIDER_ROUTER_ID
+    fi
+    if [[ "$MIDONET_METADATA_ROUTER_ID" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE MIDONET metadata_router_id $MIDONET_METADATA_ROUTER_ID
+    fi
+}
+
+function neutron_plugin_setup_interface_driver() {
+    # May change in the future
+    :
+}
+
+function has_neutron_plugin_security_group() {
+    # 0 means True here
+    return 0
+}
+
+function neutron_plugin_check_adv_test_requirements() {
+    # 0 means True here
+    return 1
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2
index fcff870..ff49d8e 100644
--- a/lib/neutron_plugins/ml2
+++ b/lib/neutron_plugins/ml2
@@ -5,10 +5,42 @@
 MY_XTRACE=$(set +o | grep xtrace)
 set +o xtrace
 
+# Enable this to simply and quickly enable tunneling with ML2.
+# Select either 'gre', 'vxlan', or '(gre vxlan)'
+Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-}
+# This has to be set here since the agent will set this in the config file
+if [[ "$Q_ML2_TENANT_NETWORK_TYPE" != "" ]]; then
+    Q_AGENT_EXTRA_AGENT_OPTS=(tunnel_types=$Q_ML2_TENANT_NETWORK_TYPE)
+elif [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then
+    Q_AGENT_EXTRA_AGENT_OPTS=(tunnel_types=gre)
+fi
+
 # Default openvswitch L2 agent
 Q_AGENT=${Q_AGENT:-openvswitch}
 source $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent
 
+# List of MechanismDrivers to load
+Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_PLUGIN_MECHANISM_DRIVERS:-}
+# List of Type Drivers to load
+Q_ML2_PLUGIN_TYPE_DRIVERS=${Q_ML2_PLUGIN_TYPE_DRIVERS:-local,flat,vlan,gre,vxlan}
+# Default GRE TypeDriver options
+Q_ML2_PLUGIN_GRE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GRE_TYPE_OPTIONS:-tunnel_id_ranges=$TENANT_TUNNEL_RANGES}
+# Default VXLAN TypeDriver options
+Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS=${Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS:-vni_ranges=1001:2000}
+# Default VLAN TypeDriver options
+Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=${Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS:-}
+
+function populate_ml2_config() {
+    OPTS=$1
+    CONF=$2
+    SECTION=$3
+
+    for I in "${OPTS[@]}"; do
+        # Replace the first '=' with ' ' for iniset syntax
+        iniset $CONF $SECTION ${I/=/ }
+    done
+}
+
 function neutron_plugin_configure_common() {
     Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ml2
     Q_PLUGIN_CONF_FILENAME=ml2_conf.ini
@@ -17,26 +49,31 @@
 }
 
 function neutron_plugin_configure_service() {
-    if [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE ml2 tenant_network_types gre
-        iniset /$Q_PLUGIN_CONF_FILE ml2_type_gre tunnel_id_ranges $TENANT_TUNNEL_RANGES
+    if [[ "$Q_ML2_TENANT_NETWORK_TYPE" != "" ]]; then
+        Q_SRV_EXTRA_OPTS=(tenant_network_types=$Q_ML2_TENANT_NETWORK_TYPE)
+    elif [[ "$ENABLE_TENANT_TUNNELS" = "True" ]]; then
+        # This assumes you want a simple configuration, and will overwrite
+        # Q_SRV_EXTRA_OPTS if set in addition to ENABLE_TENANT_TUNNELS.
+        Q_SRV_EXTRA_OPTS=(tenant_network_types=gre)
+        Q_ML2_PLUGIN_GRE_TYPE_OPTIONS=(tunnel_id_ranges=$TENANT_TUNNEL_RANGES)
     elif [[ "$ENABLE_TENANT_VLANS" = "True" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE ml2 tenant_network_types vlan
+        Q_SRV_EXTRA_OPTS=(tenant_network_types=vlan)
     else
         echo "WARNING - The ml2 plugin is using local tenant networks, with no connectivity between hosts."
     fi
 
-    # Override ``ML2_VLAN_RANGES`` and any needed agent configuration
-    # variables in ``localrc`` for more complex physical network
-    # configurations.
-    if [[ "$ML2_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then
-        ML2_VLAN_RANGES=$PHYSICAL_NETWORK
-        if [[ "$TENANT_VLAN_RANGE" != "" ]]; then
-            ML2_VLAN_RANGES=$ML2_VLAN_RANGES:$TENANT_VLAN_RANGE
+    # Allow for overrding VLAN configuration (for example, to configure provider
+    # VLANs) by first checking if Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS is set.
+    if [ "$Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS" == "" ]; then
+        if [[ "$ML2_VLAN_RANGES" = "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]]; then
+            ML2_VLAN_RANGES=$PHYSICAL_NETWORK
+            if [[ "$TENANT_VLAN_RANGE" != "" ]]; then
+                ML2_VLAN_RANGES=$ML2_VLAN_RANGES:$TENANT_VLAN_RANGE
+            fi
         fi
-    fi
-    if [[ "$ML2_VLAN_RANGES" != "" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE ml2_type_vlan network_vlan_ranges $ML2_VLAN_RANGES
+        if [[ "$ML2_VLAN_RANGES" != "" ]]; then
+            Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=(network_vlan_ranges=$ML2_VLAN_RANGES)
+        fi
     fi
 
     # REVISIT(rkukura): Setting firewall_driver here for
@@ -52,6 +89,20 @@
         iniset /$Q_PLUGIN_CONF_FILE SECURITYGROUP firewall_driver neutron.agent.firewall.NoopFirewallDriver
     fi
 
+    # Since we enable the tunnel TypeDrivers, also enable a local_ip
+    iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $HOST_IP
+
+    populate_ml2_config type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS /$Q_PLUGIN_CONF_FILE ml2
+
+    populate_ml2_config $Q_SRV_EXTRA_OPTS /$Q_PLUGIN_CONF_FILE ml2
+
+    populate_ml2_config $Q_ML2_PLUGIN_GRE_TYPE_OPTIONS /$Q_PLUGIN_CONF_FILE ml2_type_gre
+
+    populate_ml2_config $Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS /$Q_PLUGIN_CONF_FILE ml2_type_vxlan
+
+    if [ "$Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS" != "" ]; then
+        populate_ml2_config $Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS /$Q_PLUGIN_CONF_FILE ml2_type_vlan
+    fi
 }
 
 function has_neutron_plugin_security_group() {
diff --git a/lib/neutron_thirdparty/midonet b/lib/neutron_thirdparty/midonet
new file mode 100644
index 0000000..b3c726f
--- /dev/null
+++ b/lib/neutron_thirdparty/midonet
@@ -0,0 +1,64 @@
+# MidoNet
+# -------
+
+# This file implements functions required to configure MidoNet as the third-party
+# system used with devstack's Neutron.  To include this file, specify the following
+# variables in localrc:
+#
+# * enable_service midonet
+#
+
+# MidoNet devstack destination dir
+MIDONET_DIR=${MIDONET_DIR:-$DEST/midonet}
+
+# MidoNet client repo
+MIDONET_CLIENT_REPO=${MIDONET_CLIENT_REPO:-https://github.com/midokura/python-midonetclient.git}
+MIDONET_CLIENT_BRANCH=${MIDONET_CLIENT_BRANCH:-master}
+MIDONET_CLIENT_DIR=$MIDONET_DIR/python-midonetclient
+
+# MidoNet OpenStack repo
+MIDONET_OS_REPO=${MIDONET_OS_REPO:-https://github.com/midokura/midonet-openstack.git}
+MIDONET_OS_BRANCH=${MIDONET_OS_BRANCH:-master}
+MIDONET_OS_DIR=$MIDONET_DIR/midonet-openstack
+MIDONET_SETUP_SCRIPT=${MIDONET_SETUP_SCRIPT:-$MIDONET_OS_DIR/bin/setup_midonet_topology.py}
+
+
+MIDOLMAN_LOG=${MIDOLMAN_LOG:-/var/log/midolman/midolman.log}
+MIDONET_API_LOG=${MIDONET_API_LOG:-/var/log/tomcat7/midonet-api.log}
+
+# Save trace setting
+MY_XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+function configure_midonet() {
+    :
+}
+
+function init_midonet() {
+
+    # Initialize DB.  Evaluate the output of setup_midonet_topology.py to set
+    # env variables for provider router ID and metadata router ID
+    eval `python $MIDONET_SETUP_SCRIPT admin $ADMIN_PASSWORD $ADMIN_TENANT provider_devices`
+    die_if_not_set $LINENO provider_router_id "Error running midonet setup script, provider_router_id was not set."
+    die_if_not_set $LINENO metadata_router_id "Error running midonet setup script, metadata_router_id was not set."
+
+    iniset /$Q_PLUGIN_CONF_FILE MIDONET provider_router_id $provider_router_id
+    iniset /$Q_PLUGIN_CONF_FILE MIDONET metadata_router_id $metadata_router_id
+}
+
+function install_midonet() {
+    git_clone $MIDONET_CLIENT_REPO $MIDONET_CLIENT_DIR $MIDONET_CLIENT_BRANCH
+    git_clone $MIDONET_OS_REPO $MIDONET_OS_DIR $MIDONET_OS_BRANCH
+    export PYTHONPATH=$MIDONET_CLIENT_DIR/src:$MIDONET_OS_DIR/src:$PYTHONPATH
+}
+
+function start_midonet() {
+    :
+}
+
+function stop_midonet() {
+    :
+}
+
+# Restore xtrace
+$MY_XTRACE
diff --git a/lib/nova b/lib/nova
index 617fb08..7a5ff1f 100644
--- a/lib/nova
+++ b/lib/nova
@@ -568,11 +568,11 @@
         iniset $NOVA_CELLS_CONF DEFAULT rabbit_virtual_host child_cell
         iniset $NOVA_CELLS_CONF DEFAULT dhcpbridge_flagfile $NOVA_CELLS_CONF
         iniset $NOVA_CELLS_CONF cells enable True
+        iniset $NOVA_CELLS_CONF cells cell_type compute
         iniset $NOVA_CELLS_CONF cells name child
 
-        iniset $NOVA_CONF DEFAULT scheduler_topic cells
-        iniset $NOVA_CONF DEFAULT compute_api_class nova.compute.cells_api.ComputeCellsAPI
         iniset $NOVA_CONF cells enable True
+        iniset $NOVA_CONF cells cell_type api
         iniset $NOVA_CONF cells name region
 
         if is_service_enabled n-api-meta; then
@@ -714,8 +714,8 @@
     if is_service_enabled n-cell; then
         NOVA_CONF_BOTTOM=$NOVA_CELLS_CONF
         screen_it n-cond "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-conductor --config-file $NOVA_CELLS_CONF"
-        screen_it n-cell "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CONF"
-        screen_it n-cell "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CELLS_CONF"
+        screen_it n-cell-region "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CONF"
+        screen_it n-cell-child "cd $NOVA_DIR && $NOVA_BIN_DIR/nova-cells --config-file $NOVA_CELLS_CONF"
     fi
 
     if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
diff --git a/lib/oslo b/lib/oslo
new file mode 100644
index 0000000..1eb13db
--- /dev/null
+++ b/lib/oslo
@@ -0,0 +1,42 @@
+# lib/oslo
+#
+# Functions to install oslo libraries from git
+#
+# We need this to handle the fact that projects would like to use
+# pre-released versions of oslo libraries.
+
+# Dependencies:
+# ``functions`` file
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# install_oslo
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+OSLOCFG_DIR=$DEST/oslo.config
+OSLOMSG_DIR=$DEST/oslo.messaging
+
+# Entry Points
+# ------------
+
+# install_oslo() - Collect source and prepare
+function install_oslo() {
+    git_clone $OSLOCFG_REPO $OSLOCFG_DIR $OSLOCFG_BRANCH
+    setup_develop $OSLOCFG_DIR
+
+    git_clone $OSLOMSG_REPO $OSLOMSG_DIR $OSLOMSG_BRANCH
+    setup_develop $OSLOMSG_DIR
+}
+
+# Restore xtrace
+$XTRACE
+
+# Local variables:
+# mode: shell-script
+# End:
diff --git a/lib/swift b/lib/swift
index e53d674..c93b8b3 100644
--- a/lib/swift
+++ b/lib/swift
@@ -3,6 +3,7 @@
 
 # Dependencies:
 # ``functions`` file
+# ``apache`` file
 # ``DEST``, ``SCREEN_NAME``, `SWIFT_HASH` must be defined
 # ``STACK_USER`` must be defined
 # ``SWIFT_DATA_DIR`` or ``DATA_DIR`` must be defined
@@ -10,11 +11,13 @@
 # ``stack.sh`` calls the entry points in this order:
 #
 # install_swift
+# _config_swift_apache_wsgi
 # configure_swift
 # init_swift
 # start_swift
 # stop_swift
 # cleanup_swift
+# _cleanup_swift_apache_wsgi
 
 # Save trace setting
 XTRACE=$(set +o | grep xtrace)
@@ -28,6 +31,7 @@
 SWIFT_DIR=$DEST/swift
 SWIFTCLIENT_DIR=$DEST/python-swiftclient
 SWIFT_AUTH_CACHE_DIR=${SWIFT_AUTH_CACHE_DIR:-/var/cache/swift}
+SWIFT_APACHE_WSGI_DIR=${SWIFT_APACHE_WSGI_DIR:-/var/www/swift}
 SWIFT3_DIR=$DEST/swift3
 
 # TODO: add logging to different location.
@@ -97,6 +101,103 @@
       rm ${SWIFT_DATA_DIR}/drives/images/swift.img
    fi
    rm -rf ${SWIFT_DATA_DIR}/run/
+   if is_apache_enabled_service swift; then
+       _cleanup_swift_apache_wsgi
+   fi
+}
+
+# _cleanup_swift_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file
+function _cleanup_swift_apache_wsgi() {
+    sudo rm -f $SWIFT_APACHE_WSGI_DIR/*.wsgi
+    ! is_fedora && sudo a2dissite proxy-server
+    for node_number in ${SWIFT_REPLICAS_SEQ}; do
+        for type in object container account; do
+            site_name=${type}-server-${node_number}
+            ! is_fedora && sudo a2dissite ${site_name}
+            sudo rm -f /etc/$APACHE_NAME/$APACHE_CONF_DIR/${site_name}
+        done
+    done
+}
+
+# _config_swift_apache_wsgi() - Set WSGI config files of Swift
+function _config_swift_apache_wsgi() {
+    sudo mkdir -p ${SWIFT_APACHE_WSGI_DIR}
+    local apache_vhost_dir=/etc/${APACHE_NAME}/$APACHE_CONF_DIR
+    local proxy_port=${SWIFT_DEFAULT_BIND_PORT:-8080}
+
+    # copy proxy vhost and wsgi file
+    sudo cp ${SWIFT_DIR}/examples/apache2/proxy-server.template ${apache_vhost_dir}/proxy-server
+    sudo sed -e "
+        /^#/d;/^$/d;
+        s/%PORT%/$proxy_port/g;
+        s/%SERVICENAME%/proxy-server/g;
+        s/%APACHE_NAME%/${APACHE_NAME}/g;
+    " -i ${apache_vhost_dir}/proxy-server
+
+    sudo cp ${SWIFT_DIR}/examples/wsgi/proxy-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/proxy-server.wsgi
+    sudo sed -e "
+        /^#/d;/^$/d;
+        s/%SERVICECONF%/proxy-server.conf/g;
+    " -i ${SWIFT_APACHE_WSGI_DIR}/proxy-server.wsgi
+    ! is_fedora && sudo a2ensite proxy-server
+
+    # copy apache vhost file and set name and port
+    for node_number in ${SWIFT_REPLICAS_SEQ}; do
+        object_port=$[OBJECT_PORT_BASE + 10 * ($node_number - 1)]
+        container_port=$[CONTAINER_PORT_BASE + 10 * ($node_number - 1)]
+        account_port=$[ACCOUNT_PORT_BASE + 10 * ($node_number - 1)]
+
+        sudo cp ${SWIFT_DIR}/examples/apache2/object-server.template ${apache_vhost_dir}/object-server-${node_number}
+        sudo sed -e "
+            s/%PORT%/$object_port/g;
+            s/%SERVICENAME%/object-server-${node_number}/g;
+            s/%APACHE_NAME%/${APACHE_NAME}/g;
+        " -i ${apache_vhost_dir}/object-server-${node_number}
+        ! is_fedora && sudo a2ensite object-server-${node_number}
+
+        sudo cp ${SWIFT_DIR}/examples/wsgi/object-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/object-server-${node_number}.wsgi
+        sudo sed -e "
+            /^#/d;/^$/d;
+            s/%SERVICECONF%/object-server\/${node_number}.conf/g;
+        " -i ${SWIFT_APACHE_WSGI_DIR}/object-server-${node_number}.wsgi
+
+        sudo cp ${SWIFT_DIR}/examples/apache2/container-server.template ${apache_vhost_dir}/container-server-${node_number}
+        sudo sed -e "
+            /^#/d;/^$/d;
+            s/%PORT%/$container_port/g;
+            s/%SERVICENAME%/container-server-${node_number}/g;
+            s/%APACHE_NAME%/${APACHE_NAME}/g;
+        " -i ${apache_vhost_dir}/container-server-${node_number}
+        ! is_fedora && sudo a2ensite container-server-${node_number}
+
+        sudo cp ${SWIFT_DIR}/examples/wsgi/container-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/container-server-${node_number}.wsgi
+        sudo sed -e "
+            /^#/d;/^$/d;
+            s/%SERVICECONF%/container-server\/${node_number}.conf/g;
+        " -i ${SWIFT_APACHE_WSGI_DIR}/container-server-${node_number}.wsgi
+
+        sudo cp ${SWIFT_DIR}/examples/apache2/account-server.template ${apache_vhost_dir}/account-server-${node_number}
+        sudo sed -e "
+             /^#/d;/^$/d;
+            s/%PORT%/$account_port/g;
+            s/%SERVICENAME%/account-server-${node_number}/g;
+            s/%APACHE_NAME%/${APACHE_NAME}/g;
+        " -i ${apache_vhost_dir}/account-server-${node_number}
+        ! is_fedora && sudo a2ensite account-server-${node_number}
+
+        sudo cp ${SWIFT_DIR}/examples/wsgi/account-server.wsgi.template ${SWIFT_APACHE_WSGI_DIR}/account-server-${node_number}.wsgi
+        sudo sed -e "
+             /^#/d;/^$/d;
+            s/%SERVICECONF%/account-server\/${node_number}.conf/g;
+        " -i ${SWIFT_APACHE_WSGI_DIR}/account-server-${node_number}.wsgi
+
+    done
+
+    # run apache server as stack user
+    change_apache_user_group ${STACK_USER}
+
+    # WSGI isn't enabled by default, enable it
+    ! is_fedora && sudo a2enmod wsgi
 }
 
 # configure_swift() - Set config files, create data dirs and loop image
@@ -288,6 +389,9 @@
     sudo chown -R $USER:adm ${swift_log_dir}
     sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \
         tee /etc/rsyslog.d/10-swift.conf
+    if is_apache_enabled_service swift; then
+        _config_swift_apache_wsgi
+    fi
 }
 
 # create_swift_disk - Create Swift backing disk
@@ -423,6 +527,9 @@
 function install_swift() {
     git_clone $SWIFT_REPO $SWIFT_DIR $SWIFT_BRANCH
     setup_develop $SWIFT_DIR
+    if is_apache_enabled_service swift; then
+        install_apache_wsgi
+    fi
 }
 
 function install_swiftclient() {
@@ -444,6 +551,22 @@
         sudo systemctl start xinetd.service
     fi
 
+    if is_apache_enabled_service swift; then
+        # Make sure the apache lock dir is owned by $STACK_USER
+        # for running apache server to avoid failure of restarting
+        # apache server due to permission problem.
+        sudo chown -R $STACK_USER /var/run/lock/$APACHE_NAME
+        restart_apache_server
+        swift-init --run-dir=${SWIFT_DATA_DIR}/run rest start
+        screen_it s-proxy "cd $SWIFT_DIR && sudo tail -f /var/log/$APACHE_NAME/proxy-server"
+        if [[ ${SWIFT_REPLICAS} == 1 ]]; then
+            for type in object container account; do
+                screen_it s-${type} "cd $SWIFT_DIR && sudo tail -f /var/log/$APACHE_NAME/${type}-server-1"
+            done
+        fi
+        return 0
+    fi
+
    # By default with only one replica we are launching the proxy,
    # container, account and object server in screen in foreground and
    # other services in background. If we have SWIFT_REPLICAS set to something
@@ -460,7 +583,7 @@
    done
    screen_it s-proxy "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v"
    if [[ ${SWIFT_REPLICAS} == 1 ]]; then
-       for type in object container account;do
+       for type in object container account; do
            screen_it s-${type} "cd $SWIFT_DIR && $SWIFT_DIR/bin/swift-${type}-server ${SWIFT_CONF_DIR}/${type}-server/1.conf -v"
        done
    fi
@@ -468,6 +591,11 @@
 
 # stop_swift() - Stop running processes (non-screen)
 function stop_swift() {
+
+    if is_apache_enabled_service swift; then
+        swift-init --run-dir=${SWIFT_DATA_DIR}/run rest stop && return 0
+    fi
+
     # screen normally killed by unstack.sh
     if type -p swift-init >/dev/null; then
         swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true
diff --git a/lib/tempest b/lib/tempest
index 6c68337..aaa7281 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -199,6 +199,8 @@
     # Oslo
     iniset $TEMPEST_CONF DEFAULT lock_path $TEMPEST_STATE_PATH
     mkdir -p $TEMPEST_STATE_PATH
+    iniset $TEMPEST_CONF DEFAULT use_stderr False
+    iniset $TEMPEST_CONF DEFAULT log_file tempest.log
 
     # Timeouts
     iniset $TEMPEST_CONF compute build_timeout $BUILD_TIMEOUT
diff --git a/stack.sh b/stack.sh
index 4e23505..5ba60d2 100755
--- a/stack.sh
+++ b/stack.sh
@@ -298,7 +298,10 @@
 # ==================
 
 # Source project function libraries
+source $TOP_DIR/lib/apache
 source $TOP_DIR/lib/tls
+source $TOP_DIR/lib/infra
+source $TOP_DIR/lib/oslo
 source $TOP_DIR/lib/horizon
 source $TOP_DIR/lib/keystone
 source $TOP_DIR/lib/glance
@@ -313,8 +316,6 @@
 
 # Set the destination directories for other OpenStack projects
 OPENSTACKCLIENT_DIR=$DEST/python-openstackclient
-PBR_DIR=$DEST/pbr
-
 
 # Interactive Configuration
 # -------------------------
@@ -587,6 +588,8 @@
     install_neutron_agent_packages
 fi
 
+# Unbreak the giant mess that is the current state of setuptools
+unfubar_setuptools
 
 # System-specific preconfigure
 # ============================
@@ -657,9 +660,11 @@
 
 echo_summary "Installing OpenStack project source"
 
-# Install pbr
-git_clone $PBR_REPO $PBR_DIR $PBR_BRANCH
-setup_develop $PBR_DIR
+# Install required infra support libraries
+install_infra
+
+# Install oslo libraries that have graduated
+install_oslo
 
 # Install clients libraries
 install_keystoneclient
@@ -1049,6 +1054,11 @@
         iniset $NOVA_CONF baremetal driver $BM_DRIVER
         iniset $NOVA_CONF baremetal power_manager $BM_POWER_MANAGER
         iniset $NOVA_CONF baremetal tftp_root /tftpboot
+        if [[ "$BM_DNSMASQ_FROM_NOVA_NETWORK" = "True" ]]; then
+            BM_DNSMASQ_CONF=$NOVA_CONF_DIR/dnsmasq-for-baremetal-from-nova-network.conf
+            sudo cp "$FILES/dnsmasq-for-baremetal-from-nova-network.conf" "$BM_DNSMASQ_CONF"
+            iniset $NOVA_CONF DEFAULT dnsmasq_config_file "$BM_DNSMASQ_CONF"
+        fi
 
         # Define extra baremetal nova conf flags by defining the array ``EXTRA_BAREMETAL_OPTS``.
         for I in "${EXTRA_BAREMETAL_OPTS[@]}"; do
@@ -1294,15 +1304,16 @@
        create_baremetal_flavor $BM_DEPLOY_KERNEL_ID $BM_DEPLOY_RAMDISK_ID
 
     # otherwise user can manually add it later by calling nova-baremetal-manage
-    # otherwise user can manually add it later by calling nova-baremetal-manage
     [[ -n "$BM_FIRST_MAC" ]] && add_baremetal_node
 
-    # NOTE: we do this here to ensure that our copy of dnsmasq is running
-    sudo pkill dnsmasq || true
-    sudo dnsmasq --conf-file= --port=0 --enable-tftp --tftp-root=/tftpboot \
-        --dhcp-boot=pxelinux.0 --bind-interfaces --pid-file=/var/run/dnsmasq.pid \
-        --interface=$BM_DNSMASQ_IFACE --dhcp-range=$BM_DNSMASQ_RANGE \
-        ${BM_DNSMASQ_DNS:+--dhcp-option=option:dns-server,$BM_DNSMASQ_DNS}
+    if [[ "$BM_DNSMASQ_FROM_NOVA_NETWORK" = "False" ]]; then
+        # NOTE: we do this here to ensure that our copy of dnsmasq is running
+        sudo pkill dnsmasq || true
+        sudo dnsmasq --conf-file= --port=0 --enable-tftp --tftp-root=/tftpboot \
+            --dhcp-boot=pxelinux.0 --bind-interfaces --pid-file=/var/run/dnsmasq.pid \
+            --interface=$BM_DNSMASQ_IFACE --dhcp-range=$BM_DNSMASQ_RANGE \
+            ${BM_DNSMASQ_DNS:+--dhcp-option=option:dns-server,$BM_DNSMASQ_DNS}
+    fi
     # ensure callback daemon is running
     sudo pkill nova-baremetal-deploy-helper || true
     screen_it baremetal "nova-baremetal-deploy-helper"
diff --git a/stackrc b/stackrc
index 50774e4..74a399c 100644
--- a/stackrc
+++ b/stackrc
@@ -116,6 +116,14 @@
 OPENSTACKCLIENT_REPO=${OPENSTACKCLIENT_REPO:-${GIT_BASE}/openstack/python-openstackclient.git}
 OPENSTACKCLIENT_BRANCH=${OPENSTACKCLIENT_BRANCH:-master}
 
+# oslo.config
+OSLOCFG_REPO=${OSLOCFG_REPO:-${GIT_BASE}/openstack/oslo.config.git}
+OSLOCFG_BRANCH=${OSLOCFG_BRANCH:-master}
+
+# oslo.messaging
+OSLOMSG_REPO=${OSLOMSG_REPO:-${GIT_BASE}/openstack/oslo.messaging.git}
+OSLOMSG_BRANCH=${OSLOMSG_BRANCH:-master}
+
 # pbr drives the setuptools configs
 PBR_REPO=${PBR_REPO:-${GIT_BASE}/openstack-dev/pbr.git}
 PBR_BRANCH=${PBR_BRANCH:-master}
@@ -128,6 +136,10 @@
 NEUTRONCLIENT_REPO=${NEUTRONCLIENT_REPO:-${GIT_BASE}/openstack/python-neutronclient.git}
 NEUTRONCLIENT_BRANCH=${NEUTRONCLIENT_BRANCH:-master}
 
+# consolidated openstack requirements
+REQUIREMENTS_REPO=${REQUIREMENTS_REPO:-${GIT_BASE}/openstack/requirements.git}
+REQUIREMENTS_BRANCH=${REQUIREMENTS_BRANCH:-master}
+
 # storage service
 SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git}
 SWIFT_BRANCH=${SWIFT_BRANCH:-master}
@@ -215,8 +227,8 @@
 # ``IMAGE_URLS`` to be set directly in ``localrc``.
 case "$VIRT_DRIVER" in
     openvz)
-        DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-11.10-x86_64}
-        IMAGE_URLS=${IMAGE_URLS:-"http://download.openvz.org/template/precreated/ubuntu-11.10-x86_64.tar.gz"};;
+        DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-12.04-x86_64}
+        IMAGE_URLS=${IMAGE_URLS:-"http://download.openvz.org/template/precreated/ubuntu-12.04-x86_64.tar.gz"};;
     libvirt)
         case "$LIBVIRT_TYPE" in
             lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc
@@ -228,7 +240,8 @@
         esac
         ;;
     vsphere)
-        IMAGE_URLS="";;
+        DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-debian-2.6.32-i686}
+        IMAGE_URLS=${IMAGE_URLS:-"http://partnerweb.vmware.com/programs/vmdkimage/debian-2.6.32-i686.vmdk"};;
     *) # Default to Cirros with kernel, ramdisk and disk image
         DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.1-x86_64-uec}
         IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-uec.tar.gz"};;
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
index 2cba33c..e762f6d 100755
--- a/tools/xen/install_os_domU.sh
+++ b/tools/xen/install_os_domU.sh
@@ -151,9 +151,7 @@
     # Destroy any instances that were launched
     for uuid in `xe vm-list | grep -1 instance | grep uuid | sed "s/.*\: //g"`; do
         echo "Shutting down nova instance $uuid"
-        xe vm-unpause uuid=$uuid || true
-        xe vm-shutdown uuid=$uuid || true
-        xe vm-destroy uuid=$uuid
+        xe vm-uninstall uuid=$uuid force=true
     done
 
     # Destroy orphaned vdis
diff --git a/tools/xen/scripts/manage-vdi b/tools/xen/scripts/manage-vdi
index 381e671..909ce32 100755
--- a/tools/xen/scripts/manage-vdi
+++ b/tools/xen/scripts/manage-vdi
@@ -41,7 +41,17 @@
        echo "Failed to find mapping"
        exit -1
     fi
-    echo "/dev/mapper/${mapping}"
+
+    local device="/dev/mapper/${mapping}"
+    for (( i = 0; i < 5; i++ )) ; do
+        if [ -b $device ] ; then
+            echo $device
+            return
+        fi
+        sleep 1
+    done
+    echo "ERROR: timed out waiting for dev-mapper"
+    exit 1
   else
     echo "/dev/$dev$part"
   fi
diff --git a/unstack.sh b/unstack.sh
index 1e80bf3..2268b90 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -24,6 +24,9 @@
 # Destination path for service data
 DATA_DIR=${DATA_DIR:-${DEST}/data}
 
+# Import apache functions
+source $TOP_DIR/lib/apache
+
 # Get project function libraries
 source $TOP_DIR/lib/baremetal
 source $TOP_DIR/lib/cinder