Merge "Neutron-legacy: Remove LINUXNET_VIF_DRIVER option"
diff --git a/Makefile b/Makefile
index a6bb230..a94d60a 100644
--- a/Makefile
+++ b/Makefile
@@ -13,7 +13,6 @@
 
 # Duplicated from stackrc for now
 DEST=/opt/stack
-WHEELHOUSE=$(DEST)/.wheelhouse
 
 all:
 	echo "This just saved you from a terrible mistake!"
@@ -25,9 +24,6 @@
 unstack:
 	./unstack.sh
 
-wheels:
-	WHEELHOUSE=$(WHEELHOUSE) tools/build_wheels.sh
-
 docs:
 	tox -edocs
 
@@ -57,7 +53,7 @@
 
 # Clean out the cache too
 realclean: clean
-	rm -rf files/cirros*.tar.gz files/Fedora*.qcow2 $(WHEELHOUSE)
+	rm -rf files/cirros*.tar.gz files/Fedora*.qcow2
 
 # Repo stuffs
 
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index aae4f33..d70d3da 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -297,6 +297,12 @@
 
     SWIFT_USE_MOD_WSGI="True"
 
+Example (Heat):
+
+::
+
+    HEAT_USE_MOD_WSGI="True"
+
 
 Example (Cinder):
 
diff --git a/doc/source/faq.rst b/doc/source/faq.rst
index 3562bfa..7aca8d0 100644
--- a/doc/source/faq.rst
+++ b/doc/source/faq.rst
@@ -54,7 +54,7 @@
 releases other than those documented in ``README.md`` on a best-effort
 basis.
 
-Are there any differences between Ubuntu and Centos/Fedora support?
+Are there any differences between Ubuntu and CentOS/Fedora support?
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 Both should work well and are tested by DevStack CI.
@@ -146,7 +146,7 @@
 
 
 Upstream DevStack is only tested with master and stable
-branches. Setting custom BRANCH definitions is not guarunteed to
+branches. Setting custom BRANCH definitions is not guaranteed to
 produce working results.
 
 What can I do about RabbitMQ not wanting to start on my fresh new VM?
diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst
index 5891f68..ee29087 100644
--- a/doc/source/guides/neutron.rst
+++ b/doc/source/guides/neutron.rst
@@ -35,7 +35,7 @@
                 network hardware_network {
                         address = "172.18.161.0/24"
                         router [ address = "172.18.161.1" ];
-                        devstack_laptop [ address = "172.18.161.6" ];
+                        devstack-1 [ address = "172.18.161.6" ];
                 }
         }
 
@@ -43,9 +43,13 @@
 DevStack Configuration
 ----------------------
 
+The following is a complete `local.conf` for the host named
+`devstack-1`. It will run all the API and services, as well as
+serving as a hypervisor for guest instances.
 
 ::
 
+        [[local|localrc]]
         HOST_IP=172.18.161.6
         SERVICE_HOST=172.18.161.6
         MYSQL_HOST=172.18.161.6
@@ -57,6 +61,12 @@
         SERVICE_PASSWORD=secrete
         SERVICE_TOKEN=secrete
 
+        # Do not use Nova-Network
+        disable_service n-net
+        # Enable Neutron
+        ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt,q-l3
+
+
         ## Neutron options
         Q_USE_SECGROUP=True
         FLOATING_RANGE="172.18.161.0/24"
@@ -71,6 +81,166 @@
         OVS_BRIDGE_MAPPINGS=public:br-ex
 
 
+Adding Additional Compute Nodes
+-------------------------------
+
+Let's suppose that after installing DevStack on the first host, you
+also want to do multinode testing and networking.
+
+Physical Network Setup
+~~~~~~~~~~~~~~~~~~~~~~
+
+.. nwdiag::
+
+        nwdiag {
+                inet [ shape = cloud ];
+                router;
+                inet -- router;
+
+                network hardware_network {
+                        address = "172.18.161.0/24"
+                        router [ address = "172.18.161.1" ];
+                        devstack-1 [ address = "172.18.161.6" ];
+                        devstack-2 [ address = "172.18.161.7" ];
+                }
+        }
+
+
+After DevStack installs and configures Neutron, traffic from guest VMs
+flows out of `devstack-2` (the compute node) and is encapsulated in a
+VXLAN tunnel back to `devstack-1` (the control node) where the L3
+agent is running.
+
+::
+
+    stack@devstack-2:~/devstack$ sudo ovs-vsctl show
+    8992d965-0ba0-42fd-90e9-20ecc528bc29
+        Bridge br-int
+            fail_mode: secure
+            Port br-int
+                Interface br-int
+                    type: internal
+            Port patch-tun
+                Interface patch-tun
+                    type: patch
+                    options: {peer=patch-int}
+        Bridge br-tun
+            fail_mode: secure
+            Port "vxlan-c0a801f6"
+                Interface "vxlan-c0a801f6"
+                    type: vxlan
+                    options: {df_default="true", in_key=flow, local_ip="172.18.161.7", out_key=flow, remote_ip="172.18.161.6"}
+            Port patch-int
+                Interface patch-int
+                    type: patch
+                    options: {peer=patch-tun}
+            Port br-tun
+                Interface br-tun
+                    type: internal
+        ovs_version: "2.0.2"
+
+Open vSwitch on the control node, where the L3 agent runs, is
+configured to de-encapsulate traffic from compute nodes, then forward
+it over the `br-ex` bridge, where `eth0` is attached.
+
+::
+
+    stack@devstack-1:~/devstack$ sudo ovs-vsctl show
+    422adeea-48d1-4a1f-98b1-8e7239077964
+        Bridge br-tun
+            fail_mode: secure
+            Port br-tun
+                Interface br-tun
+                    type: internal
+            Port patch-int
+                Interface patch-int
+                    type: patch
+                    options: {peer=patch-tun}
+            Port "vxlan-c0a801d8"
+                Interface "vxlan-c0a801d8"
+                    type: vxlan
+                    options: {df_default="true", in_key=flow, local_ip="172.18.161.6", out_key=flow, remote_ip="172.18.161.7"}
+        Bridge br-ex
+            Port phy-br-ex
+                Interface phy-br-ex
+                    type: patch
+                    options: {peer=int-br-ex}
+            Port "eth0"
+                Interface "eth0"
+            Port br-ex
+                Interface br-ex
+                    type: internal
+        Bridge br-int
+            fail_mode: secure
+            Port "tapce66332d-ea"
+                tag: 1
+                Interface "tapce66332d-ea"
+                    type: internal
+            Port "qg-65e5a4b9-15"
+                tag: 2
+                Interface "qg-65e5a4b9-15"
+                    type: internal
+            Port "qr-33e5e471-88"
+                tag: 1
+                Interface "qr-33e5e471-88"
+                    type: internal
+            Port "qr-acbe9951-70"
+                tag: 1
+                Interface "qr-acbe9951-70"
+                    type: internal
+            Port br-int
+                Interface br-int
+                    type: internal
+            Port patch-tun
+                Interface patch-tun
+                    type: patch
+                    options: {peer=patch-int}
+            Port int-br-ex
+                Interface int-br-ex
+                    type: patch
+                    options: {peer=phy-br-ex}
+        ovs_version: "2.0.2"
+
+`br-int` is a bridge that the Open vSwitch mechanism driver creates,
+which is used as the "integration bridge" where ports are created, and
+plugged into the virtual switching fabric. `br-ex` is an OVS bridge
+that is used to connect physical ports (like `eth0`), so that floating
+IP traffic for tenants can be received from the physical network
+infrastructure (and the internet), and routed to tenant network ports.
+`br-tun` is a tunnel bridge that is used to connect OpenStack nodes
+(like `devstack-2`) together. This bridge is used so that tenant
+network traffic, using the VXLAN tunneling protocol, flows between
+each compute node where tenant instances run.
+
+
+
+DevStack Compute Configuration
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The host `devstack-2` has a very minimal `local.conf`.
+
+::
+
+    [[local|localrc]]
+    HOST_IP=172.18.161.7
+    SERVICE_HOST=172.18.161.6
+    MYSQL_HOST=172.18.161.6
+    RABBIT_HOST=172.18.161.6
+    GLANCE_HOSTPORT=172.18.161.6:9292
+    ADMIN_PASSWORD=secrete
+    MYSQL_PASSWORD=secrete
+    RABBIT_PASSWORD=secrete
+    SERVICE_PASSWORD=secrete
+    SERVICE_TOKEN=secrete
+
+    ## Neutron options
+    PUBLIC_INTERFACE=eth0
+    ENABLED_SERVICES=n-cpu,rabbit,q-agt
+
+Network traffic from `eth0` on the compute nodes is then NAT'd by the
+controller node that runs Neutron's `neutron-l3-agent` and provides L3
+connectivity.
+
 
 Neutron Networking with Open vSwitch and Provider Networks
 ==========================================================
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 99e96b1..b65730f 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -206,7 +206,6 @@
 
 * `tools/build\_docs.sh <tools/build_docs.sh.html>`__
 * `tools/build\_venv.sh <tools/build_venv.sh.html>`__
-* `tools/build\_wheels.sh <tools/build_wheels.sh.html>`__
 * `tools/create-stack-user.sh <tools/create-stack-user.sh.html>`__
 * `tools/create\_userrc.sh <tools/create_userrc.sh.html>`__
 * `tools/fixup\_stuff.sh <tools/fixup_stuff.sh.html>`__
diff --git a/exercises/client-args.sh b/exercises/client-args.sh
index c33ef44..7cfef1c 100755
--- a/exercises/client-args.sh
+++ b/exercises/client-args.sh
@@ -41,7 +41,6 @@
 unset NOVA_REGION_NAME
 unset NOVA_URL
 unset NOVA_USERNAME
-unset NOVA_VERSION
 
 # Save the known variables for later
 export x_TENANT_NAME=$OS_TENANT_NAME
diff --git a/exercises/client-env.sh b/exercises/client-env.sh
index 4a0609a..1d2f4f5 100755
--- a/exercises/client-env.sh
+++ b/exercises/client-env.sh
@@ -41,7 +41,6 @@
 unset NOVA_REGION_NAME
 unset NOVA_URL
 unset NOVA_USERNAME
-unset NOVA_VERSION
 
 for i in OS_TENANT_NAME OS_USERNAME OS_PASSWORD OS_AUTH_URL; do
     is_set $i
@@ -101,9 +100,6 @@
             STATUS_EC2="Failed"
             RETURN=1
         fi
-
-        # Clean up side effects
-        unset NOVA_VERSION
     fi
 fi
 
diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh
index a8fbd86..9bcb766 100755
--- a/exercises/neutron-adv-test.sh
+++ b/exercises/neutron-adv-test.sh
@@ -235,7 +235,7 @@
     local NET_ID
     NET_ID=$(neutron net-create --tenant-id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' )
     die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $TENANT_ID $NET_NAME $EXTRA"
-    neutron subnet-create --ip-version 4 --tenant-id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR
+    neutron subnet-create --ip-version 4 --tenant-id $TENANT_ID --gateway $GATEWAY --subnetpool None $NET_ID $CIDR
     neutron_debug_admin probe-create --device-owner compute $NET_ID
     source $TOP_DIR/openrc demo demo
 }
diff --git a/files/apache-heat-api-cfn.template b/files/apache-heat-api-cfn.template
new file mode 100644
index 0000000..ab33c66
--- /dev/null
+++ b/files/apache-heat-api-cfn.template
@@ -0,0 +1,27 @@
+Listen %PUBLICPORT%
+
+<VirtualHost *:%PUBLICPORT%>
+    WSGIDaemonProcess heat-api-cfn processes=2 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
+    WSGIProcessGroup heat-api-cfn
+    WSGIScriptAlias / %HEAT_BIN_DIR%/heat-wsgi-api-cfn
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+    AllowEncodedSlashes On
+    <IfVersion >= 2.4>
+      ErrorLogFormat "%{cu}t %M"
+    </IfVersion>
+    ErrorLog /var/log/%APACHE_NAME%/heat-api-cfn.log
+    %SSLENGINE%
+    %SSLCERTFILE%
+    %SSLKEYFILE%
+
+    <Directory %HEAT_BIN_DIR%>
+        <IfVersion >= 2.4>
+            Require all granted
+        </IfVersion>
+        <IfVersion < 2.4>
+            Order allow,deny
+            Allow from all
+        </IfVersion>
+    </Directory>
+</VirtualHost>
diff --git a/files/apache-heat-api-cloudwatch.template b/files/apache-heat-api-cloudwatch.template
new file mode 100644
index 0000000..06c91bb
--- /dev/null
+++ b/files/apache-heat-api-cloudwatch.template
@@ -0,0 +1,27 @@
+Listen %PUBLICPORT%
+
+<VirtualHost *:%PUBLICPORT%>
+    WSGIDaemonProcess heat-api-cloudwatch processes=2 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
+    WSGIProcessGroup heat-api-cloudwatch
+    WSGIScriptAlias / %HEAT_BIN_DIR%/heat-wsgi-api-cloudwatch
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+    AllowEncodedSlashes On
+    <IfVersion >= 2.4>
+      ErrorLogFormat "%{cu}t %M"
+    </IfVersion>
+    ErrorLog /var/log/%APACHE_NAME%/heat-api-cloudwatch.log
+    %SSLENGINE%
+    %SSLCERTFILE%
+    %SSLKEYFILE%
+
+    <Directory %HEAT_BIN_DIR%>
+        <IfVersion >= 2.4>
+            Require all granted
+        </IfVersion>
+        <IfVersion < 2.4>
+            Order allow,deny
+            Allow from all
+        </IfVersion>
+    </Directory>
+</VirtualHost>
diff --git a/files/apache-heat-api.template b/files/apache-heat-api.template
new file mode 100644
index 0000000..4924b39
--- /dev/null
+++ b/files/apache-heat-api.template
@@ -0,0 +1,27 @@
+Listen %PUBLICPORT%
+
+<VirtualHost *:%PUBLICPORT%>
+    WSGIDaemonProcess heat-api processes=3 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
+    WSGIProcessGroup heat-api
+    WSGIScriptAlias / %HEAT_BIN_DIR%/heat-wsgi-api
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+    AllowEncodedSlashes On
+    <IfVersion >= 2.4>
+      ErrorLogFormat "%{cu}t %M"
+    </IfVersion>
+    ErrorLog /var/log/%APACHE_NAME%/heat-api.log
+    %SSLENGINE%
+    %SSLCERTFILE%
+    %SSLKEYFILE%
+
+    <Directory %HEAT_BIN_DIR%>
+        <IfVersion >= 2.4>
+            Require all granted
+        </IfVersion>
+        <IfVersion < 2.4>
+            Order allow,deny
+            Allow from all
+        </IfVersion>
+    </Directory>
+</VirtualHost>
diff --git a/files/apache-keystone.template b/files/apache-keystone.template
index 4d3d2d6..f9fa265 100644
--- a/files/apache-keystone.template
+++ b/files/apache-keystone.template
@@ -2,6 +2,16 @@
 Listen %ADMINPORT%
 LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" keystone_combined
 
+<Directory %KEYSTONE_BIN%>
+    <IfVersion >= 2.4>
+        Require all granted
+    </IfVersion>
+    <IfVersion < 2.4>
+        Order allow,deny
+        Allow from all
+    </IfVersion>
+</Directory>
+
 <VirtualHost *:%PUBLICPORT%>
     WSGIDaemonProcess keystone-public processes=5 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
     WSGIProcessGroup keystone-public
@@ -16,16 +26,6 @@
     %SSLENGINE%
     %SSLCERTFILE%
     %SSLKEYFILE%
-
-    <Directory %KEYSTONE_BIN%>
-        <IfVersion >= 2.4>
-            Require all granted
-        </IfVersion>
-        <IfVersion < 2.4>
-            Order allow,deny
-            Allow from all
-        </IfVersion>
-    </Directory>
 </VirtualHost>
 
 <VirtualHost *:%ADMINPORT%>
@@ -42,19 +42,9 @@
     %SSLENGINE%
     %SSLCERTFILE%
     %SSLKEYFILE%
-
-    <Directory %KEYSTONE_BIN%>
-        <IfVersion >= 2.4>
-            Require all granted
-        </IfVersion>
-        <IfVersion < 2.4>
-            Order allow,deny
-            Allow from all
-        </IfVersion>
-    </Directory>
 </VirtualHost>
 
-Alias /identity %PUBLICWSGI%
+Alias /identity %KEYSTONE_BIN%/keystone-wsgi-public
 <Location /identity>
     SetHandler wsgi-script
     Options +ExecCGI
@@ -64,7 +54,7 @@
     WSGIPassAuthorization On
 </Location>
 
-Alias /identity_admin %ADMINWSGI%
+Alias /identity_admin %KEYSTONE_BIN%/keystone-wsgi-admin
 <Location /identity_admin>
     SetHandler wsgi-script
     Options +ExecCGI
diff --git a/files/apache-nova-api.template b/files/apache-nova-api.template
index 4908152..bcf406e 100644
--- a/files/apache-nova-api.template
+++ b/files/apache-nova-api.template
@@ -7,7 +7,7 @@
     WSGIApplicationGroup %{GLOBAL}
     WSGIPassAuthorization On
     <IfVersion >= 2.4>
-      ErrorLogFormat "%{cu}t %M"
+      ErrorLogFormat "%M"
     </IfVersion>
     ErrorLog /var/log/%APACHE_NAME%/nova-api.log
     %SSLENGINE%
diff --git a/files/apache-nova-ec2-api.template b/files/apache-nova-ec2-api.template
index 235d958..7b1d68b 100644
--- a/files/apache-nova-ec2-api.template
+++ b/files/apache-nova-ec2-api.template
@@ -7,10 +7,19 @@
     WSGIApplicationGroup %{GLOBAL}
     WSGIPassAuthorization On
     <IfVersion >= 2.4>
-      ErrorLogFormat "%{cu}t %M"
+      ErrorLogFormat "%M"
     </IfVersion>
     ErrorLog /var/log/%APACHE_NAME%/nova-ec2-api.log
     %SSLENGINE%
     %SSLCERTFILE%
     %SSLKEYFILE%
 </VirtualHost>
+
+Alias /ec2 %PUBLICWSGI%
+<Location /ec2>
+    SetHandler wsgi-script
+    Options +ExecCGI
+    WSGIProcessGroup nova-ec2-api
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+</Location>
diff --git a/files/apache-nova-metadata.template b/files/apache-nova-metadata.template
new file mode 100644
index 0000000..6231c1c
--- /dev/null
+++ b/files/apache-nova-metadata.template
@@ -0,0 +1,25 @@
+Listen %PUBLICPORT%
+
+<VirtualHost *:%PUBLICPORT%>
+    WSGIDaemonProcess nova-metadata processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
+    WSGIProcessGroup nova-metadata
+    WSGIScriptAlias / %PUBLICWSGI%
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+    <IfVersion >= 2.4>
+      ErrorLogFormat "%M"
+    </IfVersion>
+    ErrorLog /var/log/%APACHE_NAME%/nova-metadata.log
+    %SSLENGINE%
+    %SSLCERTFILE%
+    %SSLKEYFILE%
+</VirtualHost>
+
+Alias /metadata %PUBLICWSGI%
+<Location /metadata>
+    SetHandler wsgi-script
+    Options +ExecCGI
+    WSGIProcessGroup nova-metadata
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+</Location>
diff --git a/files/debs/ceilometer-collector b/files/debs/ceilometer-collector
index f1b692a..d1e9eef 100644
--- a/files/debs/ceilometer-collector
+++ b/files/debs/ceilometer-collector
@@ -1,6 +1,3 @@
-python-pymongo #NOPRIME
-mongodb-server #NOPRIME
 libnspr4-dev
-pkg-config
-libxml2-dev
-libxslt-dev
\ No newline at end of file
+mongodb-server #NOPRIME
+python-pymongo #NOPRIME
diff --git a/files/debs/cinder b/files/debs/cinder
index 51908eb..48b8d0f 100644
--- a/files/debs/cinder
+++ b/files/debs/cinder
@@ -1,6 +1,6 @@
-tgt # NOPRIME
-lvm2
-qemu-utils
 libpq-dev
+lvm2
 open-iscsi
 open-iscsi-utils # Deprecated since quantal dist:precise
+qemu-utils
+tgt # NOPRIME
diff --git a/files/debs/devlibs b/files/debs/devlibs
deleted file mode 100644
index 0446ceb..0000000
--- a/files/debs/devlibs
+++ /dev/null
@@ -1,7 +0,0 @@
-libffi-dev  # pyOpenSSL
-libmysqlclient-dev  # MySQL-python
-libpq-dev  # psycopg2
-libssl-dev  # pyOpenSSL
-libxml2-dev  # lxml
-libxslt1-dev  # lxml
-python-dev  # pyOpenSSL
diff --git a/files/debs/general b/files/debs/general
index 1460526..1215147 100644
--- a/files/debs/general
+++ b/files/debs/general
@@ -1,27 +1,33 @@
+bc
 bridge-utils
-screen
-unzip
-wget
-psmisc
-gcc
+curl
 g++
+gcc
+gettext  # used for compiling message catalogs
 git
 graphviz # needed for docs
+iputils-ping
+libffi-dev # for pyOpenSSL
+libjpeg-dev # Pillow 3.0.0
+libmysqlclient-dev  # MySQL-python
+libpq-dev  # psycopg2
+libssl-dev # for pyOpenSSL
+libxml2-dev  # lxml
+libxslt1-dev  # lxml
+libyaml-dev
 lsof # useful when debugging
+openjdk-7-jre-headless  # NOPRIME
 openssh-server
 openssl
-iputils-ping
-wget
-curl
-tcpdump
-tar
-python-dev
-python2.7
-python-gdbm # needed for testr
-bc
-libyaml-dev
-libffi-dev
-libssl-dev # for pyOpenSSL
-gettext  # used for compiling message catalogs
-openjdk-7-jre-headless  # NOPRIME
 pkg-config
+psmisc
+python2.7
+python-dev
+python-gdbm # needed for testr
+screen
+tar
+tcpdump
+unzip
+wget
+wget
+zlib1g-dev
diff --git a/files/debs/glance b/files/debs/glance
deleted file mode 100644
index 37877a8..0000000
--- a/files/debs/glance
+++ /dev/null
@@ -1,6 +0,0 @@
-libmysqlclient-dev
-libpq-dev
-libssl-dev
-libxml2-dev
-libxslt1-dev
-zlib1g-dev
diff --git a/files/debs/ironic b/files/debs/ironic
index 0a906db..4d5a6aa 100644
--- a/files/debs/ironic
+++ b/files/debs/ironic
@@ -6,8 +6,8 @@
 libvirt-bin
 open-iscsi
 openssh-client
-openvswitch-switch
 openvswitch-datapath-dkms
+openvswitch-switch
 python-libguestfs
 python-libvirt
 qemu
diff --git a/files/debs/keystone b/files/debs/keystone
index 70a5649..0795167 100644
--- a/files/debs/keystone
+++ b/files/debs/keystone
@@ -1,7 +1,6 @@
-python-lxml
-sqlite3
-python-mysqldb
-python-mysql.connector
+libkrb5-dev
 libldap2-dev
 libsasl2-dev
-libkrb5-dev
+python-mysql.connector
+python-mysqldb
+sqlite3
diff --git a/files/debs/ldap b/files/debs/ldap
index 26f7aef..aa3a934 100644
--- a/files/debs/ldap
+++ b/files/debs/ldap
@@ -1,3 +1,3 @@
 ldap-utils
-slapd
 python-ldap
+slapd
diff --git a/files/debs/n-cpu b/files/debs/n-cpu
index ffc947a..0da57ee 100644
--- a/files/debs/n-cpu
+++ b/files/debs/n-cpu
@@ -1,8 +1,8 @@
-qemu-utils
+cryptsetup
+genisoimage
 lvm2 # NOPRIME
 open-iscsi
-genisoimage
-sysfsutils
-sg3-utils
 python-guestfs # NOPRIME
-cryptsetup
+qemu-utils
+sg3-utils
+sysfsutils
diff --git a/files/debs/n-novnc b/files/debs/n-novnc
deleted file mode 100644
index c8722b9..0000000
--- a/files/debs/n-novnc
+++ /dev/null
@@ -1 +0,0 @@
-python-numpy
diff --git a/files/debs/neutron b/files/debs/neutron
index b5a457e..85145d3 100644
--- a/files/debs/neutron
+++ b/files/debs/neutron
@@ -1,18 +1,18 @@
 acl
-ebtables
-iptables
-iputils-ping
-iputils-arping
-libmysqlclient-dev
-mysql-server #NOPRIME
-sudo
-postgresql-server-dev-all
-python-mysqldb
-python-mysql.connector
 dnsmasq-base
 dnsmasq-utils # for dhcp_release only available in dist:precise
+ebtables
+iptables
+iputils-arping
+iputils-ping
+libmysqlclient-dev
+mysql-server #NOPRIME
+postgresql-server-dev-all
+python-mysql.connector
+python-mysqldb
 rabbitmq-server # NOPRIME
-sqlite3
-vlan
 radvd # NOPRIME
+sqlite3
+sudo
 uuid-runtime
+vlan
diff --git a/files/debs/nova b/files/debs/nova
index 346b8b3..fe57fc4 100644
--- a/files/debs/nova
+++ b/files/debs/nova
@@ -1,31 +1,26 @@
+conntrack
+curl
 dnsmasq-base
 dnsmasq-utils # for dhcp_release
-conntrack
-kpartx
-parted
-iputils-arping
-libmysqlclient-dev
-mysql-server # NOPRIME
-python-mysqldb
-python-mysql.connector
-python-lxml # needed for glance which is needed for nova --- this shouldn't be here
-gawk
-iptables
 ebtables
-sqlite3
-sudo
-qemu-kvm # NOPRIME
-qemu # dist:wheezy,jessie NOPRIME
+gawk
+genisoimage # required for config_drive
+iptables
+iputils-arping
+kpartx
+libjs-jquery-tablesorter # Needed for coverage html reports
+libmysqlclient-dev
 libvirt-bin # NOPRIME
 libvirt-dev # NOPRIME
+mysql-server # NOPRIME
+parted
 pm-utils
-libjs-jquery-tablesorter # Needed for coverage html reports
-vlan
-curl
-genisoimage # required for config_drive
+python-mysql.connector
+python-mysqldb
+qemu # dist:wheezy,jessie NOPRIME
+qemu-kvm # NOPRIME
 rabbitmq-server # NOPRIME
 socat # used by ajaxterm
-python-libvirt # NOPRIME
-python-libxml2
-python-numpy # used by websockify for spice console
-python-m2crypto
+sqlite3
+sudo
+vlan
diff --git a/files/debs/tempest b/files/debs/tempest
deleted file mode 100644
index bb09529..0000000
--- a/files/debs/tempest
+++ /dev/null
@@ -1,2 +0,0 @@
-libxml2-dev
-libxslt1-dev
diff --git a/files/debs/trove b/files/debs/trove
deleted file mode 100644
index 96f8f29..0000000
--- a/files/debs/trove
+++ /dev/null
@@ -1 +0,0 @@
-libxslt1-dev
diff --git a/files/debs/zookeeper b/files/debs/zookeeper
new file mode 100644
index 0000000..f41b559
--- /dev/null
+++ b/files/debs/zookeeper
@@ -0,0 +1 @@
+zookeeperd
diff --git a/files/rpms-suse/ceilometer-collector b/files/rpms-suse/ceilometer-collector
index 5e4dfcc..fc75ffa 100644
--- a/files/rpms-suse/ceilometer-collector
+++ b/files/rpms-suse/ceilometer-collector
@@ -1,3 +1,3 @@
-# Not available in openSUSE main repositories, but can be fetched from OBS
 # (devel:languages:python and server:database projects)
 mongodb
+# Not available in openSUSE main repositories, but can be fetched from OBS
diff --git a/files/rpms-suse/ceph b/files/rpms-suse/ceph
index 8d46500..8c4955d 100644
--- a/files/rpms-suse/ceph
+++ b/files/rpms-suse/ceph
@@ -1,3 +1,3 @@
 ceph    # NOPRIME
-xfsprogs
 lsb
+xfsprogs
diff --git a/files/rpms-suse/cinder b/files/rpms-suse/cinder
index 3fd03cc..56b1bb5 100644
--- a/files/rpms-suse/cinder
+++ b/files/rpms-suse/cinder
@@ -1,6 +1,6 @@
 lvm2
-tgt # NOPRIME
-qemu-tools
-python-devel
-postgresql-devel
 open-iscsi
+postgresql-devel
+python-devel
+qemu-tools
+tgt # NOPRIME
diff --git a/files/rpms-suse/devlibs b/files/rpms-suse/devlibs
deleted file mode 100644
index 54d13a3..0000000
--- a/files/rpms-suse/devlibs
+++ /dev/null
@@ -1,6 +0,0 @@
-libffi-devel  # pyOpenSSL
-libopenssl-devel  # pyOpenSSL
-libxslt-devel  # lxml
-postgresql-devel  # psycopg2
-libmysqlclient-devel # MySQL-python
-python-devel  # pyOpenSSL
diff --git a/files/rpms-suse/general b/files/rpms-suse/general
index 42756d8..34a2955 100644
--- a/files/rpms-suse/general
+++ b/files/rpms-suse/general
@@ -8,16 +8,23 @@
 git-core
 graphviz # docs
 iputils
+libffi-devel  # pyOpenSSL
+libjpeg8-devel # Pillow 3.0.0
+libmysqlclient-devel # MySQL-python
 libopenssl-devel # to rebuild pyOpenSSL if needed
+libxslt-devel  # lxml
 lsof # useful when debugging
 make
+net-tools
 openssh
 openssl
+postgresql-devel  # psycopg2
 psmisc
 python-cmd2 # dist:opensuse-12.3
+python-devel  # pyOpenSSL
 screen
 tar
 tcpdump
 unzip
 wget
-net-tools
+zlib-devel
diff --git a/files/rpms-suse/horizon b/files/rpms-suse/horizon
index 77f7c34..753ea76 100644
--- a/files/rpms-suse/horizon
+++ b/files/rpms-suse/horizon
@@ -1,2 +1,2 @@
-apache2  # NOPRIME
 apache2-mod_wsgi  # NOPRIME
+apache2  # NOPRIME
diff --git a/files/rpms-suse/n-api b/files/rpms-suse/n-api
index 6f59e60..af5ac2f 100644
--- a/files/rpms-suse/n-api
+++ b/files/rpms-suse/n-api
@@ -1,2 +1,2 @@
-python-dateutil
 fping
+python-dateutil
diff --git a/files/rpms-suse/n-cpu b/files/rpms-suse/n-cpu
index b3a468d..29bd31b 100644
--- a/files/rpms-suse/n-cpu
+++ b/files/rpms-suse/n-cpu
@@ -1,7 +1,7 @@
-# Stuff for diablo volumes
+cryptsetup
 genisoimage
 lvm2
 open-iscsi
-sysfsutils
 sg3_utils
-cryptsetup
+# Stuff for diablo volumes
+sysfsutils
diff --git a/files/rpms-suse/neutron b/files/rpms-suse/neutron
index 1339799..4b0eefa 100644
--- a/files/rpms-suse/neutron
+++ b/files/rpms-suse/neutron
@@ -7,7 +7,7 @@
 mariadb # NOPRIME
 postgresql-devel
 rabbitmq-server # NOPRIME
+radvd # NOPRIME
 sqlite3
 sudo
 vlan
-radvd # NOPRIME
diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova
index 039456f..2f3ad21 100644
--- a/files/rpms-suse/nova
+++ b/files/rpms-suse/nova
@@ -1,7 +1,7 @@
+conntrack-tools
 curl
 dnsmasq
 dnsmasq-utils # dist:opensuse-12.3,opensuse-13.1
-conntrack-tools
 ebtables
 gawk
 genisoimage # required for config_drive
@@ -9,14 +9,14 @@
 iputils
 kpartx
 kvm # NOPRIME
-# qemu as fallback if kvm cannot be used
-qemu # NOPRIME
 libvirt # NOPRIME
 libvirt-python # NOPRIME
 mariadb # NOPRIME
 parted
 polkit
 python-devel
+# qemu as fallback if kvm cannot be used
+qemu # NOPRIME
 rabbitmq-server # NOPRIME
 socat
 sqlite3
diff --git a/files/rpms-suse/openvswitch b/files/rpms-suse/openvswitch
index edfb4d2..53f8bb2 100644
--- a/files/rpms-suse/openvswitch
+++ b/files/rpms-suse/openvswitch
@@ -1,3 +1,3 @@
+
 openvswitch
 openvswitch-switch
-
diff --git a/files/rpms/ceilometer-collector b/files/rpms/ceilometer-collector
index b139ed2..a8b8118 100644
--- a/files/rpms/ceilometer-collector
+++ b/files/rpms/ceilometer-collector
@@ -1,3 +1,3 @@
-selinux-policy-targeted
-mongodb-server #NOPRIME
 mongodb # NOPRIME
+mongodb-server #NOPRIME
+selinux-policy-targeted
diff --git a/files/rpms/ceph b/files/rpms/ceph
index 5483735..64befc5 100644
--- a/files/rpms/ceph
+++ b/files/rpms/ceph
@@ -1,3 +1,3 @@
 ceph    # NOPRIME
-xfsprogs
 redhat-lsb-core
+xfsprogs
diff --git a/files/rpms/cinder b/files/rpms/cinder
index a88503b..f28f04d 100644
--- a/files/rpms/cinder
+++ b/files/rpms/cinder
@@ -1,5 +1,5 @@
-lvm2
-scsi-target-utils # NOPRIME
-qemu-img
-postgresql-devel
 iscsi-initiator-utils
+lvm2
+postgresql-devel
+qemu-img
+scsi-target-utils # NOPRIME
diff --git a/files/rpms/devlibs b/files/rpms/devlibs
deleted file mode 100644
index 385ed3b..0000000
--- a/files/rpms/devlibs
+++ /dev/null
@@ -1,8 +0,0 @@
-libffi-devel  # pyOpenSSL
-libxml2-devel  # lxml
-libxslt-devel  # lxml
-mariadb-devel  # MySQL-python
-openssl-devel  # pyOpenSSL
-postgresql-devel  # psycopg2
-python-devel  # pyOpenSSL
-redhat-rpm-config # MySQL-python rhbz-1195207 f21
diff --git a/files/rpms/dstat b/files/rpms/dstat
index 8a8f8fe..2b643b8 100644
--- a/files/rpms/dstat
+++ b/files/rpms/dstat
@@ -1 +1 @@
-dstat
\ No newline at end of file
+dstat
diff --git a/files/rpms/general b/files/rpms/general
index c3f3de8..40b06f4 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -1,31 +1,36 @@
+bc
 bridge-utils
 curl
 dbus
 euca2ools # only for testing client
 gcc
 gcc-c++
+gettext  # used for compiling message catalogs
 git-core
 graphviz # needed only for docs
+iptables-services  # NOPRIME f21,f22
+java-1.7.0-openjdk-headless  # NOPRIME rhel7
+java-1.8.0-openjdk-headless  # NOPRIME f21,f22
+libffi-devel
+libjpeg-turbo-devel # Pillow 3.0.0
+libxml2-devel # lxml
+libxslt-devel # lxml
+libyaml-devel
+mariadb-devel  # MySQL-python
+net-tools
 openssh-server
 openssl
 openssl-devel # to rebuild pyOpenSSL if needed
-libffi-devel
-libxml2-devel
-libxslt-devel
 pkgconfig
+postgresql-devel  # psycopg2
 psmisc
+pyOpenSSL # version in pip uses too much memory
 python-devel
+redhat-rpm-config # MySQL-python rhbz-1195207 f21
 screen
 tar
 tcpdump
 unzip
 wget
 which
-bc
-libyaml-devel
-gettext  # used for compiling message catalogs
-net-tools
-java-1.7.0-openjdk-headless  # NOPRIME rhel7
-java-1.8.0-openjdk-headless  # NOPRIME f21,f22
-pyOpenSSL # version in pip uses too much memory
-iptables-services  # NOPRIME f21,f22
+zlib-devel
diff --git a/files/rpms/glance b/files/rpms/glance
deleted file mode 100644
index 479194f..0000000
--- a/files/rpms/glance
+++ /dev/null
@@ -1,6 +0,0 @@
-libxml2-devel
-libxslt-devel
-mysql-devel
-openssl-devel
-postgresql-devel
-zlib-devel
diff --git a/files/rpms/horizon b/files/rpms/horizon
index b2cf0de..aeb2cb5 100644
--- a/files/rpms/horizon
+++ b/files/rpms/horizon
@@ -1,5 +1,5 @@
 Django
 httpd # NOPRIME
 mod_wsgi  # NOPRIME
-pyxattr
 pcre-devel  # pyScss
+pyxattr
diff --git a/files/rpms/keystone b/files/rpms/keystone
index 8074119..c01c261 100644
--- a/files/rpms/keystone
+++ b/files/rpms/keystone
@@ -1,4 +1,3 @@
-MySQL-python
-libxslt-devel
-sqlite
 mod_ssl
+MySQL-python
+sqlite
diff --git a/files/rpms/ldap b/files/rpms/ldap
index d89c4cf..d5b8fa4 100644
--- a/files/rpms/ldap
+++ b/files/rpms/ldap
@@ -1,2 +1,2 @@
-openldap-servers
 openldap-clients
+openldap-servers
diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu
index 81278b3..7773b04 100644
--- a/files/rpms/n-cpu
+++ b/files/rpms/n-cpu
@@ -1,7 +1,7 @@
-# Stuff for diablo volumes
+cryptsetup
+genisoimage
 iscsi-initiator-utils
 lvm2
-genisoimage
-sysfsutils
 sg3_utils
-cryptsetup
+# Stuff for diablo volumes
+sysfsutils
diff --git a/files/rpms/neutron b/files/rpms/neutron
index 29851be..b3f79ed 100644
--- a/files/rpms/neutron
+++ b/files/rpms/neutron
@@ -1,4 +1,3 @@
-MySQL-python
 acl
 dnsmasq # for q-dhcp
 dnsmasq-utils # for dhcp_release
@@ -7,10 +6,11 @@
 iputils
 mysql-connector-python
 mysql-devel
+MySQL-python
 mysql-server # NOPRIME
 openvswitch # NOPRIME
 postgresql-devel
 rabbitmq-server # NOPRIME
+radvd # NOPRIME
 sqlite
 sudo
-radvd # NOPRIME
diff --git a/files/rpms/nova b/files/rpms/nova
index 6eeb623..00e7596 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -1,27 +1,28 @@
-MySQL-python
+conntrack-tools
 curl
 dnsmasq # for nova-network
 dnsmasq-utils # for dhcp_release
-conntrack-tools
 ebtables
 gawk
 genisoimage # required for config_drive
 iptables
 iputils
+kernel-modules # dist:f21,f22,f23
 kpartx
 kvm # NOPRIME
-qemu-kvm # NOPRIME
 libvirt-bin # NOPRIME
 libvirt-devel # NOPRIME
 libvirt-python # NOPRIME
 libxml2-python
-numpy # needed by websockify for spice console
 m2crypto
 mysql-connector-python
 mysql-devel
+MySQL-python
 mysql-server # NOPRIME
+numpy # needed by websockify for spice console
 parted
 polkit
+qemu-kvm # NOPRIME
 rabbitmq-server # NOPRIME
 sqlite
 sudo
diff --git a/files/rpms/swift b/files/rpms/swift
index 1bf57cc..f56a81b 100644
--- a/files/rpms/swift
+++ b/files/rpms/swift
@@ -1,7 +1,7 @@
 curl
 memcached
 pyxattr
+rsync-daemon # dist:f22,f23
 sqlite
 xfsprogs
 xinetd
-rsync-daemon # dist:f22,f23
diff --git a/files/rpms/tempest b/files/rpms/tempest
deleted file mode 100644
index e7bbd43..0000000
--- a/files/rpms/tempest
+++ /dev/null
@@ -1 +0,0 @@
-libxslt-devel
diff --git a/files/rpms/trove b/files/rpms/trove
deleted file mode 100644
index e7bbd43..0000000
--- a/files/rpms/trove
+++ /dev/null
@@ -1 +0,0 @@
-libxslt-devel
diff --git a/files/rpms/zookeeper b/files/rpms/zookeeper
new file mode 100644
index 0000000..1bfac53
--- /dev/null
+++ b/files/rpms/zookeeper
@@ -0,0 +1 @@
+zookeeper
diff --git a/files/venv-requirements.txt b/files/venv-requirements.txt
deleted file mode 100644
index b9a55b4..0000000
--- a/files/venv-requirements.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-# Once we can prebuild wheels before a devstack run, uncomment the skipped libraries
-cryptography
-# lxml # still install from from packages
-# netifaces # still install from packages
-#numpy    # slowest wheel by far, stop building until we are actually using the output
-posix-ipc
-# psycopg # still install from packages
-pycrypto
-pyOpenSSL
-PyYAML
-xattr
diff --git a/files/zookeeper/environment b/files/zookeeper/environment
new file mode 100644
index 0000000..afa2d2f
--- /dev/null
+++ b/files/zookeeper/environment
@@ -0,0 +1,36 @@
+#
+# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Modified from http://packages.ubuntu.com/saucy/zookeeperd
+NAME=zookeeper
+ZOOCFGDIR=/etc/zookeeper/conf
+
+# seems, that log4j requires the log4j.properties file to be in the classpath
+CLASSPATH="$ZOOCFGDIR:/usr/share/java/jline.jar:/usr/share/java/log4j-1.2.jar:/usr/share/java/xercesImpl.jar:/usr/share/java/xmlParserAPIs.jar:/usr/share/java/netty.jar:/usr/share/java/slf4j-api.jar:/usr/share/java/slf4j-log4j12.jar:/usr/share/java/zookeeper.jar"
+
+ZOOCFG="$ZOOCFGDIR/zoo.cfg"
+ZOO_LOG_DIR=/var/log/zookeeper
+USER=$NAME
+GROUP=$NAME
+PIDDIR=/var/run/$NAME
+PIDFILE=$PIDDIR/$NAME.pid
+SCRIPTNAME=/etc/init.d/$NAME
+JAVA=/usr/bin/java
+ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain"
+ZOO_LOG4J_PROP="INFO,ROLLINGFILE"
+JMXLOCALONLY=false
+JAVA_OPTS=""
diff --git a/files/zookeeper/log4j.properties b/files/zookeeper/log4j.properties
new file mode 100644
index 0000000..6c45a4a
--- /dev/null
+++ b/files/zookeeper/log4j.properties
@@ -0,0 +1,69 @@
+#
+# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# From http://packages.ubuntu.com/saucy/zookeeperd
+
+# ZooKeeper Logging Configuration
+#
+
+# Format is "<default threshold> (, <appender>)+
+
+log4j.rootLogger=${zookeeper.root.logger}
+
+# Example: console appender only
+# log4j.rootLogger=INFO, CONSOLE
+
+# Example with rolling log file
+#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE
+
+# Example with rolling log file and tracing
+#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE
+
+#
+# Log INFO level and above messages to the console
+#
+log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
+log4j.appender.CONSOLE.Threshold=INFO
+log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
+log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
+
+#
+# Add ROLLINGFILE to rootLogger to get log file output
+#    Log DEBUG level and above messages to a log file
+log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
+log4j.appender.ROLLINGFILE.Threshold=WARN
+log4j.appender.ROLLINGFILE.File=${zookeeper.log.dir}/zookeeper.log
+
+# Max log file size of 10MB
+log4j.appender.ROLLINGFILE.MaxFileSize=10MB
+# uncomment the next line to limit number of backup files
+#log4j.appender.ROLLINGFILE.MaxBackupIndex=10
+
+log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
+log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
+
+
+#
+# Add TRACEFILE to rootLogger to get log file output
+#    Log DEBUG level and above messages to a log file
+log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
+log4j.appender.TRACEFILE.Threshold=TRACE
+log4j.appender.TRACEFILE.File=${zookeeper.log.dir}/zookeeper_trace.log
+
+log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
+### Notice we are including log4j's NDC here (%x)
+log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n
diff --git a/files/zookeeper/myid b/files/zookeeper/myid
new file mode 100644
index 0000000..c227083
--- /dev/null
+++ b/files/zookeeper/myid
@@ -0,0 +1 @@
+0
\ No newline at end of file
diff --git a/files/zookeeper/zoo.cfg b/files/zookeeper/zoo.cfg
new file mode 100644
index 0000000..b8f5582
--- /dev/null
+++ b/files/zookeeper/zoo.cfg
@@ -0,0 +1,74 @@
+#
+# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# http://hadoop.apache.org/zookeeper/docs/current/zookeeperAdmin.html
+
+# The number of milliseconds of each tick
+tickTime=2000
+# The number of ticks that the initial
+# synchronization phase can take
+initLimit=10
+# The number of ticks that can pass between
+# sending a request and getting an acknowledgement
+syncLimit=5
+# the directory where the snapshot is stored.
+dataDir=/var/lib/zookeeper
+# Place the dataLogDir to a separate physical disc for better performance
+# dataLogDir=/disk2/zookeeper
+
+# the port at which the clients will connect
+clientPort=2181
+
+# Maximum number of clients that can connect from one client
+maxClientCnxns=60
+
+# specify all zookeeper servers
+# The fist port is used by followers to connect to the leader
+# The second one is used for leader election
+
+server.0=127.0.0.1:2888:3888
+
+# To avoid seeks ZooKeeper allocates space in the transaction log file in
+# blocks of preAllocSize kilobytes. The default block size is 64M. One reason
+# for changing the size of the blocks is to reduce the block size if snapshots
+# are taken more often. (Also, see snapCount).
+#preAllocSize=65536
+
+# Clients can submit requests faster than ZooKeeper can process them,
+# especially if there are a lot of clients. To prevent ZooKeeper from running
+# out of memory due to queued requests, ZooKeeper will throttle clients so that
+# there is no more than globalOutstandingLimit outstanding requests in the
+# system. The default limit is 1,000.ZooKeeper logs transactions to a
+# transaction log. After snapCount transactions are written to a log file a
+# snapshot is started and a new transaction log file is started. The default
+# snapCount is 10,000.
+#snapCount=1000
+
+# If this option is defined, requests will be will logged to a trace file named
+# traceFile.year.month.day.
+#traceFile=
+
+# Leader accepts client connections. Default value is "yes". The leader machine
+# coordinates updates. For higher update throughput at thes slight expense of
+# read throughput the leader can be configured to not accept clients and focus
+# on coordination.
+#leaderServes=yes
+
+# Autopurge every hour to avoid using lots of disk in bursts
+# Order of the next 2 properties matters.
+# autopurge.snapRetainCount must be before autopurge.purgeInterval.
+autopurge.snapRetainCount=3
+autopurge.purgeInterval=1
\ No newline at end of file
diff --git a/functions b/functions
index ca5955e..34da1ba 100644
--- a/functions
+++ b/functions
@@ -410,7 +410,7 @@
     ip=$(echo "$nova_result" | grep "$network_name" | get_field 2)
     if [[ $ip = "" ]];then
         echo "$nova_result"
-        die $LINENO "[Fail] Coudn't get ipaddress of VM"
+        die $LINENO "[Fail] Couldn't get ipaddress of VM"
     fi
     echo $ip
 }
diff --git a/functions-common b/functions-common
index 42555a9..2fcd9f8 100644
--- a/functions-common
+++ b/functions-common
@@ -76,37 +76,36 @@
     # The location is a variable to allow for easier refactoring later to make it
     # overridable. There is currently no usecase where doing so makes sense, so
     # it's not currently configurable.
-    for clouds_path in /etc/openstack ~/.config/openstack ; do
-        CLOUDS_YAML=$clouds_path/clouds.yaml
 
-        sudo mkdir -p $(dirname $CLOUDS_YAML)
-        sudo chown -R $STACK_USER $(dirname $CLOUDS_YAML)
+    CLOUDS_YAML=/etc/openstack/clouds.yaml
 
-        CA_CERT_ARG=''
-        if [ -f "$SSL_BUNDLE_FILE" ]; then
-            CA_CERT_ARG="--os-cacert $SSL_BUNDLE_FILE"
-        fi
-        $TOP_DIR/tools/update_clouds_yaml.py \
-            --file $CLOUDS_YAML \
-            --os-cloud devstack \
-            --os-region-name $REGION_NAME \
-            --os-identity-api-version 3 \
-            $CA_CERT_ARG \
-            --os-auth-url $KEYSTONE_AUTH_URI \
-            --os-username demo \
-            --os-password $ADMIN_PASSWORD \
-            --os-project-name demo
-        $TOP_DIR/tools/update_clouds_yaml.py \
-            --file $CLOUDS_YAML \
-            --os-cloud devstack-admin \
-            --os-region-name $REGION_NAME \
-            --os-identity-api-version 3 \
-            $CA_CERT_ARG \
-            --os-auth-url $KEYSTONE_AUTH_URI \
-            --os-username admin \
-            --os-password $ADMIN_PASSWORD \
-            --os-project-name admin
-    done
+    sudo mkdir -p $(dirname $CLOUDS_YAML)
+    sudo chown -R $STACK_USER /etc/openstack
+
+    CA_CERT_ARG=''
+    if [ -f "$SSL_BUNDLE_FILE" ]; then
+        CA_CERT_ARG="--os-cacert $SSL_BUNDLE_FILE"
+    fi
+    $TOP_DIR/tools/update_clouds_yaml.py \
+        --file $CLOUDS_YAML \
+        --os-cloud devstack \
+        --os-region-name $REGION_NAME \
+        --os-identity-api-version 3 \
+        $CA_CERT_ARG \
+        --os-auth-url $KEYSTONE_AUTH_URI \
+        --os-username demo \
+        --os-password $ADMIN_PASSWORD \
+        --os-project-name demo
+    $TOP_DIR/tools/update_clouds_yaml.py \
+        --file $CLOUDS_YAML \
+        --os-cloud devstack-admin \
+        --os-region-name $REGION_NAME \
+        --os-identity-api-version 3 \
+        $CA_CERT_ARG \
+        --os-auth-url $KEYSTONE_AUTH_URI \
+        --os-username admin \
+        --os-password $ADMIN_PASSWORD \
+        --os-project-name admin
 }
 
 # trueorfalse <True|False> <VAR>
@@ -1037,7 +1036,7 @@
                 # We are using BASH regexp matching feature.
                 package=${BASH_REMATCH[1]}
                 distros=${BASH_REMATCH[2]}
-                # In bash ${VAR,,} will lowecase VAR
+                # In bash ${VAR,,} will lowercase VAR
                 # Look for a match in the distro list
                 if [[ ! ${distros,,} =~ ${DISTRO,,} ]]; then
                     # If no match then skip this package
@@ -1073,6 +1072,10 @@
     local file_to_parse=""
     local service=""
 
+    if [ $# -ne 1 ]; then
+        die $LINENO "get_packages takes a single, comma-separated argument"
+    fi
+
     if [[ -z "$package_dir" ]]; then
         echo "No package directory supplied"
         return 1
@@ -1161,7 +1164,7 @@
 
     if is_ubuntu; then
         local xtrace
-    xtrace=$(set +o | grep xtrace)
+        xtrace=$(set +o | grep xtrace)
         set +o xtrace
         if [[ "$REPOS_UPDATED" != "True" || "$RETRY_UPDATE" = "True" ]]; then
             # if there are transient errors pulling the updates, that's fine.
@@ -1349,6 +1352,7 @@
 # If the command includes shell metachatacters (;<>*) it must be run using a shell
 # If an optional group is provided sg will be used to run the
 # command as that group.
+# Uses globals ``USE_SCREEN``
 # run_process service "command-line" [group]
 function run_process {
     local service=$1
@@ -1367,7 +1371,7 @@
 
 # Helper to launch a process in a named screen
 # Uses globals ``CURRENT_LOG_TIME``, ```LOGDIR``, ``SCREEN_LOGDIR``, `SCREEN_NAME``,
-# ``SERVICE_DIR``, ``USE_SCREEN``
+# ``SERVICE_DIR``, ``SCREEN_IS_LOGGING``
 # screen_process name "command-line" [group]
 # Run a command in a shell in a screen window, if an optional group
 # is provided, use sg to set the group of the command.
@@ -1378,7 +1382,6 @@
 
     SCREEN_NAME=${SCREEN_NAME:-stack}
     SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
-    USE_SCREEN=$(trueorfalse True USE_SCREEN)
 
     screen -S $SCREEN_NAME -X screen -t $name
 
@@ -1387,8 +1390,12 @@
     echo "SCREEN_LOGDIR: $SCREEN_LOGDIR"
     echo "log: $real_logfile"
     if [[ -n ${LOGDIR} ]]; then
-        screen -S $SCREEN_NAME -p $name -X logfile "$real_logfile"
-        screen -S $SCREEN_NAME -p $name -X log on
+        if [[ "$SCREEN_IS_LOGGING" == "True" ]]; then
+            screen -S $SCREEN_NAME -p $name -X logfile "$real_logfile"
+            screen -S $SCREEN_NAME -p $name -X log on
+        fi
+        # If logging isn't active then avoid a broken symlink
+        touch "$real_logfile"
         ln -sf "$real_logfile" ${LOGDIR}/${name}.log
         if [[ -n ${SCREEN_LOGDIR} ]]; then
             # Drop the backward-compat symlink
@@ -1427,7 +1434,7 @@
 }
 
 # Screen rc file builder
-# Uses globals ``SCREEN_NAME``, ``SCREENRC``
+# Uses globals ``SCREEN_NAME``, ``SCREENRC``, ``SCREEN_IS_LOGGING``
 # screen_rc service "command-line"
 function screen_rc {
     SCREEN_NAME=${SCREEN_NAME:-stack}
@@ -1447,7 +1454,7 @@
         echo "screen -t $1 bash" >> $SCREENRC
         echo "stuff \"$2$NL\"" >> $SCREENRC
 
-        if [[ -n ${LOGDIR} ]]; then
+        if [[ -n ${LOGDIR} ]] && [[ "$SCREEN_IS_LOGGING" == "True" ]]; then
             echo "logfile ${LOGDIR}/${1}.log.${CURRENT_LOG_TIME}" >>$SCREENRC
             echo "log on" >>$SCREENRC
         fi
@@ -1458,14 +1465,13 @@
 # If a PID is available use it, kill the whole process group via TERM
 # If screen is being used kill the screen window; this will catch processes
 # that did not leave a PID behind
-# Uses globals ``SCREEN_NAME``, ``SERVICE_DIR``, ``USE_SCREEN``
+# Uses globals ``SCREEN_NAME``, ``SERVICE_DIR``
 # screen_stop_service service
 function screen_stop_service {
     local service=$1
 
     SCREEN_NAME=${SCREEN_NAME:-stack}
     SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
-    USE_SCREEN=$(trueorfalse True USE_SCREEN)
 
     if is_service_enabled $service; then
         # Clean up the screen window
@@ -1483,7 +1489,6 @@
     local service=$1
 
     SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
-    USE_SCREEN=$(trueorfalse True USE_SCREEN)
 
     if is_service_enabled $service; then
         # Kill via pid if we have one available
@@ -1502,7 +1507,7 @@
                 # this fixed in all services:
                 # https://bugs.launchpad.net/oslo-incubator/+bug/1446583
                 sleep 1
-                # /bin/true becakse pkill on a non existant process returns an error
+                # /bin/true because pkill on a non existent process returns an error
                 pkill -g $(cat $SERVICE_DIR/$SCREEN_NAME/$service.pid) || /bin/true
             fi
             rm $SERVICE_DIR/$SCREEN_NAME/$service.pid
@@ -1545,11 +1550,11 @@
 }
 
 # Tail a log file in a screen if USE_SCREEN is true.
+# Uses globals ``USE_SCREEN``
 function tail_log {
     local name=$1
     local logfile=$2
 
-    USE_SCREEN=$(trueorfalse True USE_SCREEN)
     if [[ "$USE_SCREEN" = "True" ]]; then
         screen_process "$name" "sudo tail -f $logfile"
     fi
@@ -1710,7 +1715,7 @@
         if [[ -f $dir/devstack/override-defaults ]]; then
             # be really verbose that an override is happening, as it
             # may not be obvious if things fail later.
-            echo "$plugin has overriden the following defaults"
+            echo "$plugin has overridden the following defaults"
             cat $dir/devstack/override-defaults
             source $dir/devstack/override-defaults
         fi
@@ -1746,7 +1751,8 @@
             # extras.d in an unsupported way which will let us track
             # unsupported usage in the gate.
             local exceptions="50-ironic.sh 60-ceph.sh 80-tempest.sh"
-            local extra=$(basename $extra_plugin_file_name)
+            local extra
+            extra=$(basename $extra_plugin_file_name)
             if [[ ! ( $exceptions =~ "$extra" ) ]]; then
                 deprecated "extras.d support is being removed in Mitaka-1"
                 deprecated "jobs for project $extra will break after that point"
@@ -1772,11 +1778,17 @@
 # remove extra commas from the input string (i.e. ``ENABLED_SERVICES``)
 # _cleanup_service_list service-list
 function _cleanup_service_list {
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+
     echo "$1" | sed -e '
         s/,,/,/g;
         s/^,//;
         s/,$//
     '
+
+    $xtrace
 }
 
 # disable_all_services() removes all current services
@@ -1794,6 +1806,10 @@
 # Uses global ``ENABLED_SERVICES``
 # disable_negated_services
 function disable_negated_services {
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+
     local to_remove=""
     local remaining=""
     local service
@@ -1811,6 +1827,8 @@
     # go through the service list.  if this service appears in the "to
     # be removed" list, drop it
     ENABLED_SERVICES=$(remove_disabled_services "$remaining" "$to_remove")
+
+    $xtrace
 }
 
 # disable_service() prepares the services passed as argument to be
@@ -1822,6 +1840,10 @@
 # Uses global ``DISABLED_SERVICES``
 # disable_service service [service ...]
 function disable_service {
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+
     local disabled_svcs="${DISABLED_SERVICES}"
     local enabled_svcs=",${ENABLED_SERVICES},"
     local service
@@ -1833,6 +1855,8 @@
     done
     DISABLED_SERVICES=$(_cleanup_service_list "$disabled_svcs")
     ENABLED_SERVICES=$(_cleanup_service_list "$enabled_svcs")
+
+    $xtrace
 }
 
 # enable_service() adds the services passed as argument to the
@@ -1846,6 +1870,10 @@
 # Uses global ``ENABLED_SERVICES``
 # enable_service service [service ...]
 function enable_service {
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+
     local tmpsvcs="${ENABLED_SERVICES}"
     local service
     for service in $@; do
@@ -1859,6 +1887,8 @@
     done
     ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs")
     disable_negated_services
+
+    $xtrace
 }
 
 # is_service_enabled() checks if the service(s) specified as arguments are
@@ -1887,6 +1917,7 @@
     local xtrace
     xtrace=$(set +o | grep xtrace)
     set +o xtrace
+
     local enabled=1
     local services=$@
     local service
@@ -1912,6 +1943,7 @@
         [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && enabled=0
         [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && enabled=0
     done
+
     $xtrace
     return $enabled
 }
@@ -1919,6 +1951,10 @@
 # remove specified list from the input string
 # remove_disabled_services service-list remove-list
 function remove_disabled_services {
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+
     local service_list=$1
     local remove_list=$2
     local service
@@ -1937,6 +1973,9 @@
             enabled="${enabled},$service"
         fi
     done
+
+    $xtrace
+
     _cleanup_service_list "$enabled"
 }
 
@@ -2184,14 +2223,21 @@
 # global counter for that name. Errors if that clock had not
 # previously been started.
 function time_stop {
-    local name=$1
-    local start_time=${START_TIME[$name]}
+    local name
+    local end_time
+    local elpased_time
+    local total
+    local start_time
+
+    name=$1
+    start_time=${START_TIME[$name]}
+
     if [[ -z "$start_time" ]]; then
         die $LINENO "Trying to stop the clock on $name, but it was never started"
     fi
-    local end_time=$(date +%s)
-    local elapsed_time=$(($end_time - $start_time))
-    local total=${TOTAL_TIME[$name]:-0}
+    end_time=$(date +%s)
+    elapsed_time=$(($end_time - $start_time))
+    total=${TOTAL_TIME[$name]:-0}
     # reset the clock so we can start it in the future
     START_TIME[$name]=""
     TOTAL_TIME[$name]=$(($total + $elapsed_time))
diff --git a/inc/meta-config b/inc/meta-config
index d74db59..b9ab6b2 100644
--- a/inc/meta-config
+++ b/inc/meta-config
@@ -92,7 +92,7 @@
     local real_configfile
     real_configfile=$(eval echo $configfile)
     if [ ! -f $real_configfile ]; then
-        touch $real_configfile
+        touch $real_configfile || die $LINENO "could not create config file $real_configfile ($configfile)"
     fi
 
     get_meta_section $file $matchgroup $configfile | \
@@ -178,8 +178,18 @@
     local configfile group
     for group in $matchgroups; do
         for configfile in $(get_meta_section_files $localfile $group); do
-            if [[ -d $(dirname $(eval "echo $configfile")) ]]; then
+            local realconfigfile
+            local dir
+
+            realconfigfile=$(eval "echo $configfile")
+            if [[ -z $realconfigfile ]]; then
+                die $LINENO "bogus config file specification: $configfile is undefined"
+            fi
+            dir=$(dirname $realconfigfile)
+            if [[ -d $dir ]]; then
                 merge_config_file $localfile $group $configfile
+            else
+                die $LINENO "bogus config file specification $configfile ($configfile=$realconfigfile, $dir is not a directory)"
             fi
         done
     done
diff --git a/lib/cinder b/lib/cinder
index 2c9c94a..1307c11 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -299,7 +299,7 @@
     fi
 
     if is_service_enabled swift; then
-        iniset $CINDER_CONF DEFAULT backup_swift_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:8080/v1/AUTH_"
+        iniset $CINDER_CONF DEFAULT backup_swift_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_"
     fi
 
     if is_service_enabled ceilometer; then
@@ -317,9 +317,7 @@
 
     iniset_rpc_backend cinder $CINDER_CONF
 
-    if [[ "$CINDER_VOLUME_CLEAR" == "none" ]] || [[ "$CINDER_VOLUME_CLEAR" == "zero" ]] || [[ "$CINDER_VOLUME_CLEAR" == "shred" ]]; then
-        iniset $CINDER_CONF DEFAULT volume_clear $CINDER_VOLUME_CLEAR
-    fi
+    iniset $CINDER_CONF DEFAULT volume_clear $CINDER_VOLUME_CLEAR
 
     # Format logging
     if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$CINDER_USE_MOD_WSGI" == "False" ]; then
diff --git a/lib/dlm b/lib/dlm
new file mode 100644
index 0000000..f68ee26
--- /dev/null
+++ b/lib/dlm
@@ -0,0 +1,108 @@
+#!/bin/bash
+#
+# lib/dlm
+#
+# Functions to control the installation and configuration of software
+# that provides a dlm (and possibly other functions). The default is
+# **zookeeper**, and is going to be the only backend supported in the
+# devstack tree.
+
+# Dependencies:
+#
+# - ``functions`` file
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# - is_dlm_enabled
+# - install_dlm
+# - configure_dlm
+# - cleanup_dlm
+
+# Save trace setting
+XTRACE=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+
+# <define global variables here that belong to this project>
+
+# Set up default directories
+ZOOKEEPER_DATA_DIR=$DEST/data/zookeeper
+ZOOKEEPER_CONF_DIR=/etc/zookeeper
+
+
+# Entry Points
+# ------------
+#
+# NOTE(sdague): it is expected that when someone wants to implement
+# another one of these out of tree, they'll implement the following
+# functions:
+#
+# - dlm_backend
+# - install_dlm
+# - configure_dlm
+# - cleanup_dlm
+
+# This should be declared in the settings file of any plugin or
+# service that needs to have a dlm in their enviroment.
+function use_dlm {
+    enable_service $(dlm_backend)
+}
+
+# A function to return the name of the backend in question, some users
+# are going to need to know this.
+function dlm_backend {
+    echo "zookeeper"
+}
+
+# Test if a dlm is enabled (defaults to a zookeeper specific check)
+function is_dlm_enabled {
+    [[ ,${ENABLED_SERVICES}, =~ ,"$(dlm_backend)", ]] && return 0
+    return 1
+}
+
+# cleanup_dlm() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_dlm {
+    # NOTE(sdague): we don't check for is_enabled here because we
+    # should just delete this regardless. Some times users updated
+    # their service list before they run cleanup.
+    sudo rm -rf $ZOOKEEPER_DATA_DIR
+}
+
+# configure_dlm() - Set config files, create data dirs, etc
+function configure_dlm {
+    if is_dlm_enabled; then
+        sudo cp $FILES/zookeeper/* $ZOOKEEPER_CONF_DIR
+        sudo sed -i -e 's|.*dataDir.*|dataDir='$ZOOKEEPER_DATA_DIR'|' $ZOOKEEPER_CONF_DIR/zoo.cfg
+        # clean up from previous (possibly aborted) runs
+        # create required data files
+        sudo rm -rf $ZOOKEEPER_DATA_DIR
+        sudo mkdir -p $ZOOKEEPER_DATA_DIR
+        # restart after configuration, there is no reason to make this
+        # another step, because having data files that don't match the
+        # zookeeper running is just going to cause tears.
+        restart_service zookeeper
+    fi
+}
+
+# install_dlm() - Collect source and prepare
+function install_dlm {
+    if is_dlm_enabled; then
+        if is_ubuntu; then
+            install_package zookeeperd
+        else
+            die $LINENO "Don't know how to install zookeeper on this platform"
+        fi
+    fi
+}
+
+# Restore xtrace
+$XTRACE
+
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/heat b/lib/heat
index 615198c..e42bdf0 100644
--- a/lib/heat
+++ b/lib/heat
@@ -16,6 +16,7 @@
 # - install_heat
 # - configure_heatclient
 # - configure_heat
+# - _config_heat_apache_wsgi
 # - init_heat
 # - start_heat
 # - stop_heat
@@ -32,6 +33,9 @@
 # set up default directories
 GITDIR["python-heatclient"]=$DEST/python-heatclient
 
+# Toggle for deploying Heat-API under HTTPD + mod_wsgi
+HEAT_USE_MOD_WSGI=${HEAT_USE_MOD_WSGI:-False}
+
 HEAT_DIR=$DEST/heat
 HEAT_CFNTOOLS_DIR=$DEST/heat-cfntools
 HEAT_TEMPLATES_REPO_DIR=$DEST/heat-templates
@@ -65,6 +69,8 @@
     HEAT_STACK_DOMAIN=$(trueorfalse True HEAT_STACK_DOMAIN)
     HEAT_DEFERRED_AUTH=${HEAT_DEFERRED_AUTH:-trusts}
 fi
+HEAT_PLUGIN_DIR=${HEAT_PLUGIN_DIR:-$DATA_DIR/heat/plugins}
+ENABLE_HEAT_PLUGINS=${ENABLE_HEAT_PLUGINS:-}
 
 # Functions
 # ---------
@@ -117,13 +123,17 @@
     # logging
     iniset $HEAT_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
     iniset $HEAT_CONF DEFAULT use_syslog $SYSLOG
-    if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
+    if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$HEAT_USE_MOD_WSGI" == "False" ]  ; then
         # Add color to logging output
         setup_colorized_logging $HEAT_CONF DEFAULT tenant user
     fi
 
     iniset $HEAT_CONF DEFAULT deferred_auth_method $HEAT_DEFERRED_AUTH
 
+    if [ "$HEAT_USE_MOD_WSGI" == "True" ]; then
+        _config_heat_apache_wsgi
+    fi
+
     # NOTE(jamielennox): heat re-uses specific values from the
     # keystone_authtoken middleware group and so currently fails when using the
     # auth plugin setup. This should be fixed in heat.  Heat is also the only
@@ -180,6 +190,35 @@
     # copy the default templates
     cp $HEAT_DIR/etc/heat/templates/* $HEAT_TEMPLATES_DIR/
 
+    # Enable heat plugins.
+    # NOTE(nic): The symlink nonsense is necessary because when
+    # plugins are installed in "developer mode", the final component
+    # of their target directory is always "resources", which confuses
+    # Heat's plugin loader into believing that all plugins are named
+    # "resources", and therefore are all the same plugin; so it
+    # will only load one of them.  Linking them all to a common
+    # location with unique names avoids that type of collision,
+    # while still allowing the plugins to be edited in-tree.
+    local err_count=0
+
+    if [ -n "$ENABLE_HEAT_PLUGINS" ]; then
+        mkdir -p $HEAT_PLUGIN_DIR
+        # Clean up cruft from any previous runs
+        rm -f $HEAT_PLUGIN_DIR/*
+        iniset $HEAT_CONF DEFAULT plugin_dirs $HEAT_PLUGIN_DIR
+    fi
+
+    for heat_plugin in $ENABLE_HEAT_PLUGINS; do
+        if [ -d $HEAT_DIR/contrib/$heat_plugin ]; then
+            setup_package $HEAT_DIR/contrib/$heat_plugin -e
+            ln -s $HEAT_DIR/contrib/$heat_plugin/$heat_plugin/resources $HEAT_PLUGIN_DIR/$heat_plugin
+        else
+            : # clear retval on the test so that we can roll up errors
+            err $LINENO "Requested Heat plugin(${heat_plugin}) not found."
+            err_count=$(($err_count + 1))
+        fi
+    done
+    [ $err_count -eq 0 ] || die $LINENO "$err_count of the requested Heat plugins could not be installed."
 }
 
 # init_heat() - Initialize database
@@ -211,6 +250,9 @@
 function install_heat {
     git_clone $HEAT_REPO $HEAT_DIR $HEAT_BRANCH
     setup_develop $HEAT_DIR
+    if [ "$HEAT_USE_MOD_WSGI" == "True" ]; then
+        install_apache_wsgi
+    fi
 }
 
 # install_heat_other() - Collect source and prepare
@@ -226,20 +268,106 @@
 # start_heat() - Start running processes, including screen
 function start_heat {
     run_process h-eng "$HEAT_BIN_DIR/heat-engine --config-file=$HEAT_CONF"
-    run_process h-api "$HEAT_BIN_DIR/heat-api --config-file=$HEAT_CONF"
-    run_process h-api-cfn "$HEAT_BIN_DIR/heat-api-cfn --config-file=$HEAT_CONF"
-    run_process h-api-cw "$HEAT_BIN_DIR/heat-api-cloudwatch --config-file=$HEAT_CONF"
+
+    # If the site is not enabled then we are in a grenade scenario
+    local enabled_site_file
+    enabled_site_file=$(apache_site_config_for heat-api)
+    if [ -f ${enabled_site_file} ] && [ "$HEAT_USE_MOD_WSGI" == "True" ]; then
+        enable_apache_site heat-api
+        enable_apache_site heat-api-cfn
+        enable_apache_site heat-api-cloudwatch
+        restart_apache_server
+        tail_log heat-api /var/log/$APACHE_NAME/heat-api.log
+        tail_log heat-api-cfn /var/log/$APACHE_NAME/heat-api-cfn.log
+        tail_log heat-api-cloudwatch /var/log/$APACHE_NAME/heat-api-cloudwatch.log
+    else
+        run_process h-api "$HEAT_BIN_DIR/heat-api --config-file=$HEAT_CONF"
+        run_process h-api-cfn "$HEAT_BIN_DIR/heat-api-cfn --config-file=$HEAT_CONF"
+        run_process h-api-cw "$HEAT_BIN_DIR/heat-api-cloudwatch --config-file=$HEAT_CONF"
+    fi
 }
 
 # stop_heat() - Stop running processes
 function stop_heat {
     # Kill the screen windows
-    local serv
-    for serv in h-eng h-api h-api-cfn h-api-cw; do
-        stop_process $serv
-    done
+    stop_process h-eng
+
+    if [ "$HEAT_USE_MOD_WSGI" == "True" ]; then
+        disable_apache_site heat-api
+        disable_apache_site heat-api-cfn
+        disable_apache_site heat-api-cloudwatch
+        restart_apache_server
+    else
+        local serv
+        for serv in h-api h-api-cfn h-api-cw; do
+            stop_process $serv
+        done
+    fi
+
 }
 
+# _cleanup_heat_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file
+function _cleanup_heat_apache_wsgi {
+    sudo rm -f $(apache_site_config_for heat-api)
+    sudo rm -f $(apache_site_config_for heat-api-cfn)
+    sudo rm -f $(apache_site_config_for heat-api-cloudwatch)
+}
+
+# _config_heat_apache_wsgi() - Set WSGI config files of Heat
+function _config_heat_apache_wsgi {
+
+    local heat_apache_conf
+    heat_apache_conf=$(apache_site_config_for heat-api)
+    local heat_cfn_apache_conf
+    heat_cfn_apache_conf=$(apache_site_config_for heat-api-cfn)
+    local heat_cloudwatch_apache_conf
+    heat_cloudwatch_apache_conf=$(apache_site_config_for heat-api-cloudwatch)
+    local heat_ssl=""
+    local heat_certfile=""
+    local heat_keyfile=""
+    local heat_api_port=$HEAT_API_PORT
+    local heat_cfn_api_port=$HEAT_API_CFN_PORT
+    local heat_cw_api_port=$HEAT_API_CW_PORT
+    local venv_path=""
+
+    sudo cp $FILES/apache-heat-api.template $heat_apache_conf
+    sudo sed -e "
+        s|%PUBLICPORT%|$heat_api_port|g;
+        s|%APACHE_NAME%|$APACHE_NAME|g;
+        s|%HEAT_BIN_DIR%|$HEAT_BIN_DIR|g;
+        s|%SSLENGINE%|$heat_ssl|g;
+        s|%SSLCERTFILE%|$heat_certfile|g;
+        s|%SSLKEYFILE%|$heat_keyfile|g;
+        s|%USER%|$STACK_USER|g;
+        s|%VIRTUALENV%|$venv_path|g
+    " -i $heat_apache_conf
+
+    sudo cp $FILES/apache-heat-api-cfn.template $heat_cfn_apache_conf
+    sudo sed -e "
+        s|%PUBLICPORT%|$heat_cfn_api_port|g;
+        s|%APACHE_NAME%|$APACHE_NAME|g;
+        s|%HEAT_BIN_DIR%|$HEAT_BIN_DIR|g;
+        s|%SSLENGINE%|$heat_ssl|g;
+        s|%SSLCERTFILE%|$heat_certfile|g;
+        s|%SSLKEYFILE%|$heat_keyfile|g;
+        s|%USER%|$STACK_USER|g;
+        s|%VIRTUALENV%|$venv_path|g
+    " -i $heat_cfn_apache_conf
+
+    sudo cp $FILES/apache-heat-api-cloudwatch.template $heat_cloudwatch_apache_conf
+    sudo sed -e "
+        s|%PUBLICPORT%|$heat_cw_api_port|g;
+        s|%APACHE_NAME%|$APACHE_NAME|g;
+        s|%HEAT_BIN_DIR%|$HEAT_BIN_DIR|g;
+        s|%SSLENGINE%|$heat_ssl|g;
+        s|%SSLCERTFILE%|$heat_certfile|g;
+        s|%SSLKEYFILE%|$heat_keyfile|g;
+        s|%USER%|$STACK_USER|g;
+        s|%VIRTUALENV%|$venv_path|g
+    " -i $heat_cloudwatch_apache_conf
+}
+
+
 # create_heat_accounts() - Set up common required heat accounts
 function create_heat_accounts {
     if [[ "$HEAT_STANDALONE" != "True" ]]; then
diff --git a/lib/horizon b/lib/horizon
index 6ecd755..ff63b06 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -99,13 +99,8 @@
 
     _horizon_config_set $local_settings "" OPENSTACK_HOST \"${KEYSTONE_SERVICE_HOST}\"
 
-    if [ "$ENABLE_IDENTITY_V2" == "False" ]; then
-        # Only Identity v3 API is available; then use it with v3 auth tokens
-        _horizon_config_set $local_settings "" OPENSTACK_API_VERSIONS {\"identity\":3}
-        _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}:${KEYSTONE_SERVICE_PORT}/v3\""
-    else
-        _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}:${KEYSTONE_SERVICE_PORT}/v2.0\""
-    fi
+    _horizon_config_set $local_settings "" OPENSTACK_API_VERSIONS {\"identity\":3}
+    _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}:${KEYSTONE_SERVICE_PORT}/v3\""
 
     if [ -f $SSL_BUNDLE_FILE ]; then
         _horizon_config_set $local_settings "" OPENSTACK_SSL_CACERT \"${SSL_BUNDLE_FILE}\"
diff --git a/lib/ironic b/lib/ironic
index 74e2f93..016e639 100644
--- a/lib/ironic
+++ b/lib/ironic
@@ -516,13 +516,8 @@
     # intentional sleep to make sure the tag has been set to port
     sleep 10
 
-    if  [[ "$Q_USE_NAMESPACE" = "True" ]]; then
-        local tapdev
-        tapdev=$(sudo ip netns exec qdhcp-${ironic_net_id} ip link list | grep " tap" | cut -d':' -f2 | cut -d'@' -f1 | cut -b2-)
-    else
-        local tapdev
-        tapdev=$(sudo ip link list | grep " tap" | cut -d':' -f2 | cut -d'@' -f1 | cut -b2-)
-    fi
+    local tapdev
+    tapdev=$(sudo ip netns exec qdhcp-${ironic_net_id} ip link list | grep " tap" | cut -d':' -f2 | cut -d'@' -f1 | cut -b2-)
     local tag_id
     tag_id=$(sudo ovs-vsctl show |grep ${tapdev} -A1 -m1 | grep tag | cut -d':' -f2 | cut -b2-)
 
@@ -677,6 +672,8 @@
     # enable tftp natting for allowing connections to HOST_IP's tftp server
     sudo modprobe nf_conntrack_tftp
     sudo modprobe nf_nat_tftp
+    # explicitly allow DHCP - packets are occassionally being dropped here
+    sudo iptables -I INPUT -p udp --dport 67:68 --sport 67:68 -j ACCEPT || true
     # nodes boot from TFTP and callback to the API server listening on $HOST_IP
     sudo iptables -I INPUT -d $HOST_IP -p udp --dport 69 -j ACCEPT || true
     sudo iptables -I INPUT -d $HOST_IP -p tcp --dport $IRONIC_SERVICE_PORT -j ACCEPT || true
@@ -800,7 +797,7 @@
     # load them into glance
     IRONIC_DEPLOY_KERNEL_ID=$(openstack \
         --os-token $token \
-        --os-url http://$GLANCE_HOSTPORT \
+        --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT \
         image create \
         $(basename $IRONIC_DEPLOY_KERNEL_PATH) \
         --public --disk-format=aki \
@@ -808,7 +805,7 @@
         < $IRONIC_DEPLOY_KERNEL_PATH  | grep ' id ' | get_field 2)
     IRONIC_DEPLOY_RAMDISK_ID=$(openstack \
         --os-token $token \
-        --os-url http://$GLANCE_HOSTPORT \
+        --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT \
         image create \
         $(basename $IRONIC_DEPLOY_RAMDISK_PATH) \
         --public --disk-format=ari \
diff --git a/lib/keystone b/lib/keystone
index cdcc13a..5a2afbf 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -254,9 +254,9 @@
 
         # Add swift endpoints to service catalog if swift is enabled
         if is_service_enabled s-proxy; then
-            echo "catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
-            echo "catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:8080/" >> $KEYSTONE_CATALOG
-            echo "catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
+            echo "catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
+            echo "catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:$SWIFT_DEFAULT_BIND_PORT/" >> $KEYSTONE_CATALOG
+            echo "catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
             echo "catalog.RegionOne.object_store.name = Swift Service" >> $KEYSTONE_CATALOG
         fi
 
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index 0968699..79c3140 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -144,8 +144,6 @@
 Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-neutron}
 # Default auth strategy
 Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone}
-# Use namespace or not
-Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True}
 # RHEL's support for namespaces requires using veths with ovs
 Q_OVS_USE_VETH=${Q_OVS_USE_VETH:-False}
 Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True}
@@ -208,7 +206,7 @@
 # The plugin supports L3.
 Q_L3_ENABLED=${Q_L3_ENABLED:-False}
 # L3 routers exist per tenant
-Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-False}
+Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-True}
 
 # List of config file names in addition to the main plugin config file
 # See _configure_neutron_common() for details about setting it up
@@ -543,12 +541,12 @@
         die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $TENANT_ID"
 
         if [[ "$IP_VERSION" =~ 4.* ]]; then
-            SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --name $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2)
+            SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --name $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY --subnetpool None $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2)
             die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME $TENANT_ID"
         fi
 
         if [[ "$IP_VERSION" =~ .*6 ]]; then
-            SUBNET_V6_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 6 --ipv6-address-mode slaac --gateway $V6_NETWORK_GATEWAY --name $PROVIDER_SUBNET_NAME_V6 $NET_ID $FIXED_RANGE_V6 | grep 'id' | get_field 2)
+            SUBNET_V6_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 6 --ipv6-address-mode $IPV6_ADDRESS_MODE --gateway $V6_NETWORK_GATEWAY --name $PROVIDER_SUBNET_NAME_V6 --subnetpool_id None $NET_ID $FIXED_RANGE_V6 | grep 'id' | get_field 2)
             die_if_not_set $LINENO SUBNET_V6_ID "Failure creating SUBNET_V6_ID for $PROVIDER_SUBNET_NAME_V6 $TENANT_ID"
         fi
 
@@ -800,7 +798,7 @@
         local IP_ADD=""
         local IP_DEL=""
         local DEFAULT_ROUTE_GW
-        DEFAULT_ROUTE_GW=$(ip r | awk "/default.+$from_intf/ { print \$3; exit }")
+        DEFAULT_ROUTE_GW=$(ip -f $af r | awk "/default.+$from_intf/ { print \$3; exit }")
         local ADD_OVS_PORT=""
 
         if [[ $af == "inet" ]]; then
@@ -812,7 +810,7 @@
         fi
 
         if [ "$DEFAULT_ROUTE_GW" != "" ]; then
-            ADD_DEFAULT_ROUTE="sudo ip r replace default via $DEFAULT_ROUTE_GW dev $to_intf"
+            ADD_DEFAULT_ROUTE="sudo ip -f $af r replace default via $DEFAULT_ROUTE_GW dev $to_intf"
         fi
 
         if [[ "$add_ovs_port" == "True" ]]; then
@@ -967,7 +965,6 @@
 
     iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT verbose False
     iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT debug False
-    iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
     iniset $NEUTRON_TEST_CONFIG_FILE AGENT root_helper "$Q_RR_COMMAND"
     if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
         iniset $NEUTRON_TEST_CONFIG_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND"
@@ -984,7 +981,6 @@
 
     iniset $Q_DHCP_CONF_FILE DEFAULT verbose True
     iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-    iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
     iniset $Q_DHCP_CONF_FILE AGENT root_helper "$Q_RR_COMMAND"
     if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
         iniset $Q_DHCP_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND"
@@ -1008,8 +1004,6 @@
 
 function _configure_neutron_l3_agent {
     Q_L3_ENABLED=True
-    # for l3-agent, only use per tenant router if we have namespaces
-    Q_L3_ROUTER_PER_TENANT=$Q_USE_NAMESPACE
 
     if is_service_enabled q-vpn; then
         neutron_vpn_configure_agent
@@ -1019,7 +1013,6 @@
 
     iniset $Q_L3_CONF_FILE DEFAULT verbose True
     iniset $Q_L3_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-    iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
     iniset $Q_L3_CONF_FILE AGENT root_helper "$Q_RR_COMMAND"
     if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
         iniset $Q_L3_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND"
@@ -1242,6 +1235,7 @@
     subnet_params+="--ip_version 4 "
     subnet_params+="--gateway $NETWORK_GATEWAY "
     subnet_params+="--name $PRIVATE_SUBNET_NAME "
+    subnet_params+="--subnetpool None "
     subnet_params+="$NET_ID $FIXED_RANGE"
     local subnet_id
     subnet_id=$(neutron subnet-create $subnet_params | grep ' id ' | get_field 2)
@@ -1258,6 +1252,7 @@
     subnet_params+="--ip_version 6 "
     subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY "
     subnet_params+="--name $IPV6_PRIVATE_SUBNET_NAME "
+    subnet_params+="--subnetpool None "
     subnet_params+="$NET_ID $FIXED_RANGE_V6 $ipv6_modes"
     local ipv6_subnet_id
     ipv6_subnet_id=$(neutron subnet-create $subnet_params | grep ' id ' | get_field 2)
@@ -1271,6 +1266,7 @@
     subnet_params+="${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} "
     subnet_params+="--gateway $PUBLIC_NETWORK_GATEWAY "
     subnet_params+="--name $PUBLIC_SUBNET_NAME "
+    subnet_params+="--subnetpool None "
     subnet_params+="$EXT_NET_ID $FLOATING_RANGE "
     subnet_params+="-- --enable_dhcp=False"
     local id_and_ext_gw_ip
@@ -1284,6 +1280,7 @@
     local subnet_params="--ip_version 6 "
     subnet_params+="--gateway $IPV6_PUBLIC_NETWORK_GATEWAY "
     subnet_params+="--name $IPV6_PUBLIC_SUBNET_NAME "
+    subnet_params+="--subnetpool None "
     subnet_params+="$EXT_NET_ID $IPV6_PUBLIC_RANGE "
     subnet_params+="-- --enable_dhcp=False"
     local ipv6_id_and_ext_gw_ip
@@ -1308,7 +1305,7 @@
     if is_service_enabled q-l3; then
         # Configure and enable public bridge
         local ext_gw_interface="none"
-        if is_neutron_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then
+        if is_neutron_ovs_base_plugin; then
             ext_gw_interface=$(_neutron_get_ext_gw_interface)
         elif [[ "$Q_AGENT" = "linuxbridge" ]]; then
             # Search for the brq device the neutron router and network for $FIXED_RANGE
@@ -1358,7 +1355,7 @@
         IPV6_ROUTER_GW_IP=`neutron port-list -c fixed_ips | grep $ipv6_pub_subnet_id | awk -F '"' -v subnet_id=$ipv6_pub_subnet_id '$4 == subnet_id { print $8; }'`
         die_if_not_set $LINENO IPV6_ROUTER_GW_IP "Failure retrieving IPV6_ROUTER_GW_IP"
 
-        if is_neutron_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then
+        if is_neutron_ovs_base_plugin; then
             local ext_gw_interface
             ext_gw_interface=$(_neutron_get_ext_gw_interface)
             local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/}
@@ -1373,7 +1370,7 @@
 
 # Explicitly set router id in l3 agent configuration
 function _neutron_set_router_id {
-    if [[ "$Q_USE_NAMESPACE" == "False" ]]; then
+    if [[ "$Q_L3_ROUTER_PER_TENANT" == "False" ]]; then
         iniset $Q_L3_CONF_FILE DEFAULT router_id $ROUTER_ID
     fi
 }
diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight
index 4166131..f52105e 100644
--- a/lib/neutron_plugins/bigswitch_floodlight
+++ b/lib/neutron_plugins/bigswitch_floodlight
@@ -58,9 +58,9 @@
 function neutron_plugin_setup_interface_driver {
     local conf_file=$1
     if [ "$BS_FL_VIF_DRIVER" = "ivs" ]; then
-        iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.IVSInterfaceDriver
+        iniset $conf_file DEFAULT interface_driver ivs
     else
-        iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
+        iniset $conf_file DEFAULT interface_driver openvswitch
     fi
 }
 
diff --git a/lib/neutron_plugins/brocade b/lib/neutron_plugins/brocade
index 557b94d..953360e 100644
--- a/lib/neutron_plugins/brocade
+++ b/lib/neutron_plugins/brocade
@@ -68,7 +68,7 @@
 
 function neutron_plugin_setup_interface_driver {
     local conf_file=$1
-    iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.BridgeInterfaceDriver
+    iniset $conf_file DEFAULT interface_driver linuxbridge
 }
 
 function has_neutron_plugin_security_group {
diff --git a/lib/neutron_plugins/cisco b/lib/neutron_plugins/cisco
index 90dcd57..7d0cf1a 100644
--- a/lib/neutron_plugins/cisco
+++ b/lib/neutron_plugins/cisco
@@ -150,7 +150,7 @@
 
 function neutron_plugin_setup_interface_driver {
     local conf_file=$1
-    iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
+    iniset $conf_file DEFAULT interface_driver openvswitch
 }
 
 # Restore xtrace
diff --git a/lib/neutron_plugins/ibm b/lib/neutron_plugins/ibm
deleted file mode 100644
index dd5cfa6..0000000
--- a/lib/neutron_plugins/ibm
+++ /dev/null
@@ -1,133 +0,0 @@
-#!/bin/bash
-#
-# Neutron IBM SDN-VE plugin
-# ---------------------------
-
-# Save trace setting
-IBM_XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-source $TOP_DIR/lib/neutron_plugins/ovs_base
-
-function neutron_plugin_install_agent_packages {
-    _neutron_ovs_base_install_agent_packages
-}
-
-function _neutron_interface_setup {
-    # Setup one interface on the integration bridge if needed
-    # The plugin agent to be used if more than one interface is used
-    local bridge=$1
-    local interface=$2
-    sudo ovs-vsctl --no-wait -- --may-exist add-port $bridge $interface
-}
-
-function neutron_setup_integration_bridge {
-    # Setup integration bridge if needed
-    if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then
-        neutron_ovs_base_cleanup
-        _neutron_ovs_base_setup_bridge $SDNVE_INTEGRATION_BRIDGE
-        if [[ "$SDNVE_INTERFACE_MAPPINGS" != "" ]]; then
-            interfaces=(${SDNVE_INTERFACE_MAPPINGS//[,:]/ })
-            _neutron_interface_setup $SDNVE_INTEGRATION_BRIDGE ${interfaces[1]}
-        fi
-    fi
-
-    # Set controller to SDNVE controller (1st of list) if exists
-    if [[ "$SDNVE_CONTROLLER_IPS" != "" ]]; then
-        # Get the first controller
-        controllers=(${SDNVE_CONTROLLER_IPS//[\[,\]]/ })
-        SDNVE_IP=${controllers[0]}
-        sudo ovs-vsctl set-controller $SDNVE_INTEGRATION_BRIDGE tcp:$SDNVE_IP
-    fi
-}
-
-function neutron_plugin_create_nova_conf {
-    # if n-cpu is enabled, then setup integration bridge
-    if is_service_enabled n-cpu; then
-        neutron_setup_integration_bridge
-    fi
-}
-
-function is_neutron_ovs_base_plugin {
-    if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then
-        # Yes, we use OVS.
-        return 0
-    else
-        # No, we do not use OVS.
-        return 1
-    fi
-}
-
-function neutron_plugin_configure_common {
-    Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ibm
-    Q_PLUGIN_CONF_FILENAME=sdnve_neutron_plugin.ini
-    Q_PLUGIN_CLASS="neutron.plugins.ibm.sdnve_neutron_plugin.SdnvePluginV2"
-}
-
-function neutron_plugin_configure_service {
-    # Define extra "SDNVE" configuration options when q-svc is configured
-
-    iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver
-
-    if [[ "$SDNVE_CONTROLLER_IPS" != "" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE sdnve controller_ips $SDNVE_CONTROLLER_IPS
-    fi
-
-    if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE sdnve integration_bridge $SDNVE_INTEGRATION_BRIDGE
-    fi
-
-    if [[ "$SDNVE_RESET_BRIDGE" != "" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE sdnve reset_bridge $SDNVE_RESET_BRIDGE
-    fi
-
-    if [[ "$SDNVE_OUT_OF_BAND" != "" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE sdnve out_of_band $SDNVE_OUT_OF_BAND
-    fi
-
-    if [[ "$SDNVE_INTERFACE_MAPPINGS" != "" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE sdnve interface_mappings $SDNVE_INTERFACE_MAPPINGS
-    fi
-
-    if [[ "$SDNVE_FAKE_CONTROLLER" != "" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE sdnve use_fake_controller $SDNVE_FAKE_CONTROLLER
-    fi
-
-
-    iniset $NEUTRON_CONF DEFAULT notification_driver neutron.openstack.common.notifier.no_op_notifier
-
-}
-
-function neutron_plugin_configure_plugin_agent {
-    AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-ibm-agent"
-}
-
-function neutron_plugin_configure_debug_command {
-    :
-}
-
-function neutron_plugin_setup_interface_driver {
-    return 0
-}
-
-function has_neutron_plugin_security_group {
-    # Does not support Security Groups
-    return 1
-}
-
-function neutron_ovs_base_cleanup {
-    if [[ "$SDNVE_RESET_BRIDGE" != False ]]; then
-        # remove all OVS ports that look like Neutron created ports
-        for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do
-            sudo ovs-vsctl del-port ${port}
-        done
-
-        # remove integration bridge created by Neutron
-        for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${SDNVE_INTEGRATION_BRIDGE}); do
-            sudo ovs-vsctl del-br ${bridge}
-        done
-    fi
-}
-
-# Restore xtrace
-$IBM_XTRACE
diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent
index bd4438d..f28bcfe 100644
--- a/lib/neutron_plugins/linuxbridge_agent
+++ b/lib/neutron_plugins/linuxbridge_agent
@@ -85,7 +85,7 @@
 
 function neutron_plugin_setup_interface_driver {
     local conf_file=$1
-    iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.BridgeInterfaceDriver
+    iniset $conf_file DEFAULT interface_driver linuxbridge
 }
 
 function neutron_plugin_check_adv_test_requirements {
diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent
index 48e47b3..5a843ff 100644
--- a/lib/neutron_plugins/openvswitch_agent
+++ b/lib/neutron_plugins/openvswitch_agent
@@ -110,7 +110,7 @@
 
 function neutron_plugin_setup_interface_driver {
     local conf_file=$1
-    iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
+    iniset $conf_file DEFAULT interface_driver openvswitch
 }
 
 function neutron_plugin_check_adv_test_requirements {
diff --git a/lib/neutron_plugins/services/firewall b/lib/neutron_plugins/services/firewall
index 61a148e..3496da8 100644
--- a/lib/neutron_plugins/services/firewall
+++ b/lib/neutron_plugins/services/firewall
@@ -1,3 +1,5 @@
+#!/bin/bash
+
 # Neutron firewall plugin
 # ---------------------------
 
diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer
index 34190f9..7865f6f 100644
--- a/lib/neutron_plugins/services/loadbalancer
+++ b/lib/neutron_plugins/services/loadbalancer
@@ -1,3 +1,5 @@
+#!/bin/bash
+
 # Neutron loadbalancer plugin
 # ---------------------------
 
diff --git a/lib/neutron_plugins/services/metering b/lib/neutron_plugins/services/metering
index 37ba019..c75ab19 100644
--- a/lib/neutron_plugins/services/metering
+++ b/lib/neutron_plugins/services/metering
@@ -1,3 +1,5 @@
+#!/bin/bash
+
 # Neutron metering plugin
 # ---------------------------
 
diff --git a/lib/neutron_plugins/services/vpn b/lib/neutron_plugins/services/vpn
index 4d6a2bf..c0e7457 100644
--- a/lib/neutron_plugins/services/vpn
+++ b/lib/neutron_plugins/services/vpn
@@ -1,3 +1,5 @@
+#!/bin/bash
+
 # Neutron VPN plugin
 # ---------------------------
 
diff --git a/lib/neutron_thirdparty/vmware_nsx b/lib/neutron_thirdparty/vmware_nsx
index 03853a9..e182fca 100644
--- a/lib/neutron_thirdparty/vmware_nsx
+++ b/lib/neutron_thirdparty/vmware_nsx
@@ -1,2 +1,4 @@
+#!/bin/bash
+
 # REVISIT(roeyc): this file left empty so that 'enable_service vmware_nsx'
 # continues to work.
diff --git a/lib/nova b/lib/nova
index 6e6075c..ba05f53 100644
--- a/lib/nova
+++ b/lib/nova
@@ -7,6 +7,7 @@
 #
 # - ``functions`` file
 # - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
+# - ``FILES``
 # - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined
 # - ``LIBVIRT_TYPE`` must be defined
 # - ``INSTANCE_NAME_PREFIX``, ``VOLUME_NAME_PREFIX`` must be defined
@@ -87,6 +88,7 @@
 NOVA_SERVICE_LISTEN_ADDRESS=${NOVA_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS}
 EC2_SERVICE_PORT=${EC2_SERVICE_PORT:-8773}
 EC2_SERVICE_PORT_INT=${EC2_SERVICE_PORT_INT:-18773}
+METADATA_SERVICE_PORT=${METADATA_SERVICE_PORT:-8775}
 
 # Option to enable/disable config drive
 # NOTE: Set ``FORCE_CONFIG_DRIVE="False"`` to turn OFF config drive
@@ -241,6 +243,7 @@
     sudo rm -f $NOVA_WSGI_DIR/*
     sudo rm -f $(apache_site_config_for nova-api)
     sudo rm -f $(apache_site_config_for nova-ec2-api)
+    sudo rm -f $(apache_site_config_for nova-metadata)
 }
 
 # _config_nova_apache_wsgi() - Set WSGI config files of Keystone
@@ -251,11 +254,14 @@
     nova_apache_conf=$(apache_site_config_for nova-api)
     local nova_ec2_apache_conf
     nova_ec2_apache_conf=$(apache_site_config_for nova-ec2-api)
+    local nova_metadata_apache_conf
+    nova_metadata_apache_conf=$(apache_site_config_for nova-metadata)
     local nova_ssl=""
     local nova_certfile=""
     local nova_keyfile=""
     local nova_api_port=$NOVA_SERVICE_PORT
     local nova_ec2_api_port=$EC2_SERVICE_PORT
+    local nova_metadata_port=$METADATA_SERVICE_PORT
     local venv_path=""
 
     if is_ssl_enabled_service nova-api; then
@@ -270,6 +276,7 @@
     # copy proxy vhost and wsgi helper files
     sudo cp $NOVA_DIR/nova/wsgi/nova-api.py $NOVA_WSGI_DIR/nova-api
     sudo cp $NOVA_DIR/nova/wsgi/nova-ec2-api.py $NOVA_WSGI_DIR/nova-ec2-api
+    sudo cp $NOVA_DIR/nova/wsgi/nova-metadata.py $NOVA_WSGI_DIR/nova-metadata
 
     sudo cp $FILES/apache-nova-api.template $nova_apache_conf
     sudo sed -e "
@@ -296,6 +303,19 @@
         s|%VIRTUALENV%|$venv_path|g
         s|%APIWORKERS%|$API_WORKERS|g
     " -i $nova_ec2_apache_conf
+
+    sudo cp $FILES/apache-nova-metadata.template $nova_metadata_apache_conf
+    sudo sed -e "
+        s|%PUBLICPORT%|$nova_metadata_port|g;
+        s|%APACHE_NAME%|$APACHE_NAME|g;
+        s|%PUBLICWSGI%|$NOVA_WSGI_DIR/nova-metadata|g;
+        s|%SSLENGINE%|$nova_ssl|g;
+        s|%SSLCERTFILE%|$nova_certfile|g;
+        s|%SSLKEYFILE%|$nova_keyfile|g;
+        s|%USER%|$STACK_USER|g;
+        s|%VIRTUALENV%|$venv_path|g
+        s|%APIWORKERS%|$API_WORKERS|g
+    " -i $nova_metadata_apache_conf
 }
 
 # configure_nova() - Set config files, create data dirs, etc
@@ -444,13 +464,18 @@
 
         # EC2
         if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then
-
+            local nova_ec2_api_url
+            if [[ "$NOVA_USE_MOD_WSGI" == "False" ]]; then
+                nova_ec2_api_url="$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:$EC2_SERVICE_PORT/"
+            else
+                nova_ec2_api_url="$EC2_SERVICE_PROTOCOL://$SERVICE_HOST/ec2"
+            fi
             get_or_create_service "ec2" "ec2" "EC2 Compatibility Layer"
             get_or_create_endpoint "ec2" \
                 "$REGION_NAME" \
-                "$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/" \
-                "$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/" \
-                "$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/"
+                "$nova_ec2_api_url" \
+                "$nova_ec2_api_url" \
+                "$nova_ec2_api_url"
         fi
     fi
 
@@ -475,7 +500,6 @@
 
     # (Re)create ``nova.conf``
     rm -f $NOVA_CONF
-    iniset $NOVA_CONF DEFAULT verbose "True"
     iniset $NOVA_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL"
     if [ "$NOVA_ALLOW_MOVE_TO_SAME_HOST" == "True" ]; then
         iniset $NOVA_CONF DEFAULT allow_resize_to_same_host "True"
@@ -793,9 +817,11 @@
     if [ -f ${enabled_site_file} ] && [ "$NOVA_USE_MOD_WSGI" == "True" ]; then
         enable_apache_site nova-api
         enable_apache_site nova-ec2-api
+        enable_apache_site nova-metadata
         restart_apache_server
         tail_log nova-api /var/log/$APACHE_NAME/nova-api.log
         tail_log nova-ec2-api /var/log/$APACHE_NAME/nova-ec2-api.log
+        tail_log nova-metadata /var/log/$APACHE_NAME/nova-metadata.log
     else
         run_process n-api "$NOVA_BIN_DIR/nova-api"
     fi
@@ -911,6 +937,7 @@
     if [ "$NOVA_USE_MOD_WSGI" == "True" ]; then
         disable_apache_site nova-api
         disable_apache_site nova-ec2-api
+        disable_apache_site nova-metadata
         restart_apache_server
     else
         stop_process n-api
diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt
index 5525cfd..78c5978 100644
--- a/lib/nova_plugins/functions-libvirt
+++ b/lib/nova_plugins/functions-libvirt
@@ -23,7 +23,7 @@
 # Installs required distro-specific libvirt packages.
 function install_libvirt {
     if is_ubuntu; then
-        if is_arch "aarch64" && [[ ${DISTRO} =~ (trusty|utopic) ]]; then
+        if is_arch "aarch64" && [[ ${DISTRO} == "trusty" ]]; then
             install_package qemu-system
         else
             install_package qemu-kvm
diff --git a/lib/oslo b/lib/oslo
index f64f327..56615fa 100644
--- a/lib/oslo
+++ b/lib/oslo
@@ -36,6 +36,7 @@
 GITDIR["oslo.messaging"]=$DEST/oslo.messaging
 GITDIR["oslo.middleware"]=$DEST/oslo.middleware
 GITDIR["oslo.policy"]=$DEST/oslo.policy
+GITDIR["oslo.privsep"]=$DEST/oslo.privsep
 GITDIR["oslo.reports"]=$DEST/oslo.reports
 GITDIR["oslo.rootwrap"]=$DEST/oslo.rootwrap
 GITDIR["oslo.serialization"]=$DEST/oslo.serialization
@@ -79,6 +80,7 @@
     _do_install_oslo_lib "oslo.messaging"
     _do_install_oslo_lib "oslo.middleware"
     _do_install_oslo_lib "oslo.policy"
+    _do_install_oslo_lib "oslo.privsep"
     _do_install_oslo_lib "oslo.reports"
     _do_install_oslo_lib "oslo.rootwrap"
     _do_install_oslo_lib "oslo.serialization"
diff --git a/lib/swift b/lib/swift
index 27832dc..ee0238d 100644
--- a/lib/swift
+++ b/lib/swift
@@ -44,6 +44,7 @@
 SWIFT3_DIR=$DEST/swift3
 
 SWIFT_SERVICE_PROTOCOL=${SWIFT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
+SWIFT_DEFAULT_BIND_PORT=${SWIFT_DEFAULT_BIND_PORT:-8080}
 SWIFT_DEFAULT_BIND_PORT_INT=${SWIFT_DEFAULT_BIND_PORT_INT:-8081}
 SWIFT_SERVICE_LOCAL_HOST=${SWIFT_SERVICE_LOCAL_HOST:-$SERVICE_LOCAL_HOST}
 SWIFT_SERVICE_LISTEN_ADDRESS=${SWIFT_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS}
@@ -62,7 +63,7 @@
 if is_service_enabled s-proxy && is_service_enabled swift3; then
     # If we are using ``swift3``, we can default the S3 port to swift instead
     # of nova-objectstore
-    S3_SERVICE_PORT=${S3_SERVICE_PORT:-8080}
+    S3_SERVICE_PORT=${S3_SERVICE_PORT:-$SWIFT_DEFAULT_BIND_PORT}
 fi
 
 if is_service_enabled g-api; then
@@ -183,7 +184,7 @@
 # _config_swift_apache_wsgi() - Set WSGI config files of Swift
 function _config_swift_apache_wsgi {
     sudo mkdir -p ${SWIFT_APACHE_WSGI_DIR}
-    local proxy_port=${SWIFT_DEFAULT_BIND_PORT:-8080}
+    local proxy_port=${SWIFT_DEFAULT_BIND_PORT}
 
     # copy proxy vhost and wsgi file
     sudo cp ${SWIFT_DIR}/examples/apache2/proxy-server.template $(apache_site_config_for proxy-server)
@@ -347,7 +348,7 @@
     local csyncfile=${SWIFT_CONF_DIR}/container-sync-realms.conf
     cp ${SWIFT_DIR}/etc/container-sync-realms.conf-sample ${csyncfile}
     iniset ${csyncfile} realm1 key realm1key
-    iniset ${csyncfile} realm1 cluster_name1 "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:8080/v1/"
+    iniset ${csyncfile} realm1 cluster_name1 "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/"
 
     iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user
     iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user ${STACK_USER}
@@ -368,7 +369,7 @@
     if is_service_enabled tls-proxy; then
         iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT_INT}
     else
-        iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080}
+        iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT}
     fi
 
     if is_ssl_enabled_service s-proxy; then
@@ -625,9 +626,9 @@
         get_or_create_service "swift" "object-store" "Swift Service"
         get_or_create_endpoint "object-store" \
             "$REGION_NAME" \
-            "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" \
-            "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:8080" \
-            "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s"
+            "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_\$(tenant_id)s" \
+            "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT" \
+            "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_\$(tenant_id)s"
     fi
 
     local swift_tenant_test1
@@ -775,7 +776,7 @@
         swift-init --run-dir=${SWIFT_DATA_DIR}/run ${type} stop || true
     done
     if is_service_enabled tls-proxy; then
-        local proxy_port=${SWIFT_DEFAULT_BIND_PORT:-8080}
+        local proxy_port=${SWIFT_DEFAULT_BIND_PORT}
         start_tls_proxy '*' $proxy_port $SERVICE_HOST $SWIFT_DEFAULT_BIND_PORT_INT &
     fi
     run_process s-proxy "$SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v"
@@ -812,11 +813,13 @@
 }
 
 function swift_configure_tempurls {
+    # note we are using swift credentials!
     OS_USERNAME=swift \
-        OS_PROJECT_NAME=$SERVICE_TENANT_NAME \
-        OS_PASSWORD=$SERVICE_PASSWORD \
-        OS_AUTH_URL=$SERVICE_ENDPOINT \
-        swift post --auth-version 3 -m "Temp-URL-Key: $SWIFT_TEMPURL_KEY"
+    OS_PASSWORD=$SERVICE_PASSWORD \
+    OS_PROJECT_NAME=$SERVICE_TENANT_NAME \
+    OS_AUTH_URL=$SERVICE_ENDPOINT \
+    openstack object store account \
+        set --property "Temp-URL-Key=$SWIFT_TEMPURL_KEY"
 }
 
 # Restore xtrace
diff --git a/lib/tempest b/lib/tempest
index 10dd652..32630db 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -15,7 +15,6 @@
 #   - ``SERVICE_HOST``
 #   - ``BASE_SQL_CONN`` ``lib/database`` declares
 #   - ``PUBLIC_NETWORK_NAME``
-#   - ``Q_USE_NAMESPACE``
 #   - ``Q_ROUTER_NAME``
 #   - ``Q_L3_ENABLED``
 #   - ``VIRT_DRIVER``
@@ -132,7 +131,6 @@
     local flavor_lines
     local public_network_id
     local public_router_id
-    local tenant_networks_reachable
     local boto_instance_type="m1.tiny"
     local ssh_connect_method="fixed"
 
@@ -246,13 +244,8 @@
         fi
     fi
 
-    if [ "$Q_USE_NAMESPACE" != "False" ]; then
-        tenant_networks_reachable=false
-        if ! is_service_enabled n-net; then
-            ssh_connect_method="floating"
-        fi
-    else
-        tenant_networks_reachable=true
+    if ! is_service_enabled n-net; then
+        ssh_connect_method="floating"
     fi
 
     ssh_connect_method=${TEMPEST_SSH_CONNECT_METHOD:-$ssh_connect_method}
@@ -260,12 +253,6 @@
     if [ "$Q_L3_ENABLED" = "True" ]; then
         public_network_id=$(neutron net-list | grep $PUBLIC_NETWORK_NAME | \
             awk '{print $2}')
-        if [ "$Q_USE_NAMESPACE" == "False" ]; then
-            # If namespaces are disabled, DevStack will create a single
-            # public router that tempest should be configured to use.
-            public_router_id=$(neutron router-list | awk "/ $Q_ROUTER_NAME / \
-                { print \$2 }")
-        fi
     fi
 
     EC2_URL=$(get_endpoint_url ec2 public || true)
@@ -394,7 +381,7 @@
 
     # Network
     iniset $TEMPEST_CONFIG network api_version 2.0
-    iniset $TEMPEST_CONFIG network tenant_networks_reachable "$tenant_networks_reachable"
+    iniset $TEMPEST_CONFIG network tenant_networks_reachable false
     iniset $TEMPEST_CONFIG network public_network_id "$public_network_id"
     iniset $TEMPEST_CONFIG network public_router_id "$public_router_id"
     iniset $TEMPEST_CONFIG network default_network "$FIXED_RANGE"
@@ -531,6 +518,8 @@
     if [ "$VIRT_DRIVER" = "libvirt" ] && [ "$LIBVIRT_TYPE" = "lxc" ]; then
         iniset $TEMPEST_CONFIG compute-feature-enabled rescue False
         iniset $TEMPEST_CONFIG compute-feature-enabled resize False
+        iniset $TEMPEST_CONFIG compute-feature-enabled shelve False
+        iniset $TEMPEST_CONFIG compute-feature-enabled snapshot False
         iniset $TEMPEST_CONFIG compute-feature-enabled suspend False
     fi
 
@@ -548,6 +537,12 @@
         fi
     done
 
+    if [ "$VIRT_DRIVER" = "libvirt" ] && [ "$LIBVIRT_TYPE" = "lxc" ]; then
+        # libvirt-lxc does not support boot from volume or attaching volumes
+        # so basically anything with cinder is out of the question.
+        iniset $TEMPEST_CONFIG service_available cinder "False"
+    fi
+
     if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then
         # Use the ``BOTO_CONFIG`` environment variable to point to this file
         iniset -sudo $BOTO_CONF Boto ca_certificates_file $SSL_BUNDLE_FILE
@@ -564,9 +559,13 @@
         fi
         iniset $TEMPEST_CONFIG auth allow_tenant_isolation False
         iniset $TEMPEST_CONFIG auth test_accounts_file "etc/accounts.yaml"
+    elif [[ $TEMPEST_HAS_ADMIN == "False" ]]; then
+        iniset $TEMPEST_CONFIG auth allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-False}
+
     else
         iniset $TEMPEST_CONFIG auth allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True}
     fi
+
     # Restore IFS
     IFS=$ifs
 }
diff --git a/openrc b/openrc
index 71ba5a6..9bc0fd7 100644
--- a/openrc
+++ b/openrc
@@ -95,12 +95,6 @@
     fi
 fi
 
-# Currently novaclient needs you to specify the *compute api* version.  This
-# needs to match the config of your catalog returned by Keystone.
-export NOVA_VERSION=${NOVA_VERSION:-1.1}
-# In the future this will change names:
-export COMPUTE_API_VERSION=${COMPUTE_API_VERSION:-$NOVA_VERSION}
-
 # Currently cinderclient needs you to specify the *volume api* version. This
 # needs to match the config of your catalog returned by Keystone.
 export CINDER_VERSION=${CINDER_VERSION:-2}
diff --git a/stack.sh b/stack.sh
index 8024731..9b811b7 100755
--- a/stack.sh
+++ b/stack.sh
@@ -93,6 +93,20 @@
     exit 1
 fi
 
+# OpenStack is designed to run at a system level, with system level
+# installation of python packages. It does not support running under a
+# virtual env, and will fail in really odd ways if you do this. Make
+# this explicit as it has come up on the mailing list.
+if [[ -n "$VIRTUAL_ENV" ]]; then
+    echo "You appear to be running under a python virtualenv."
+    echo "DevStack does not support this, as we may break the"
+    echo "virtualenv you are currently in by modifying "
+    echo "external system-level components the virtualenv relies on."
+    echo "We recommend you use a separate virtual-machine if "
+    echo "you are worried about DevStack taking over your system."
+    exit 1
+fi
+
 # Provide a safety switch for devstack. If you do a lot of devstack,
 # on a lot of different environments, you sometimes run it on the
 # wrong box. This makes there be a way to prevent that.
@@ -178,7 +192,7 @@
 
 # Warn users who aren't on an explicitly supported distro, but allow them to
 # override check and attempt installation with ``FORCE=yes ./stack``
-if [[ ! ${DISTRO} =~ (precise|trusty|utopic|vivid|7.0|wheezy|sid|testing|jessie|f21|f22|rhel7) ]]; then
+if [[ ! ${DISTRO} =~ (precise|trusty|vivid|wily|7.0|wheezy|sid|testing|jessie|f21|f22|rhel7) ]]; then
     echo "WARNING: this script has not been tested on $DISTRO"
     if [[ "$FORCE" != "yes" ]]; then
         die $LINENO "If you wish to run this script anyway run with FORCE=yes"
@@ -321,6 +335,10 @@
     sudo sed -i "s/\(^127.0.0.1.*\)/\1 $LOCAL_HOSTNAME/" /etc/hosts
 fi
 
+# Ensure python is installed
+# --------------------------
+is_package_installed python || install_package python
+
 
 # Configure Logging
 # -----------------
@@ -539,6 +557,7 @@
 source $TOP_DIR/lib/neutron-legacy
 source $TOP_DIR/lib/ldap
 source $TOP_DIR/lib/dstat
+source $TOP_DIR/lib/dlm
 
 # Extras Source
 # --------------
@@ -716,12 +735,6 @@
 # Install required infra support libraries
 install_infra
 
-# Pre-build some problematic wheels
-if [[ -n ${WHEELHOUSE:-} && ! -d ${WHEELHOUSE:-} ]]; then
-    source $TOP_DIR/tools/build_wheels.sh
-fi
-
-
 # Extras Pre-install
 # ------------------
 # Phase: pre-install
@@ -729,6 +742,10 @@
 
 install_rpc_backend
 
+# NOTE(sdague): dlm install is conditional on one being enabled by configuration
+install_dlm
+configure_dlm
+
 if is_service_enabled $DATABASE_BACKENDS; then
     install_database
 fi
diff --git a/stackrc b/stackrc
index 4026ff8..f400047 100644
--- a/stackrc
+++ b/stackrc
@@ -101,7 +101,17 @@
 # ctrl-c, up-arrow, enter to restart the service. Starting services
 # this way is slightly unreliable, and a bit slower, so this can
 # be disabled for automated testing by setting this value to False.
-USE_SCREEN=True
+USE_SCREEN=$(trueorfalse True USE_SCREEN)
+
+# When using screen, should we keep a log file on disk?  You might
+# want this False if you have a long-running setup where verbose logs
+# can fill-up the host.
+# XXX: Ideally screen itself would be configured to log but just not
+# activate.  This isn't possible with the screerc syntax.  Temporary
+# logging can still be used by a developer with:
+#    C-a : logfile foo
+#    C-a : log on
+SCREEN_IS_LOGGING=$(trueorfalse True SCREEN_IS_LOGGING)
 
 # Passwords generated by interactive devstack runs
 if [[ -r $RC_DIR/.localrc.password ]]; then
@@ -143,11 +153,6 @@
 # requirmenets files here, in a comma-separated list
 ADDITIONAL_VENV_PACKAGES=${ADITIONAL_VENV_PACKAGES:-""}
 
-# Configure wheel cache location
-export WHEELHOUSE=${WHEELHOUSE:-$DEST/.wheelhouse}
-export PIP_WHEEL_DIR=${PIP_WHEEL_DIR:-$WHEELHOUSE}
-export PIP_FIND_LINKS=${PIP_FIND_LINKS:-file://$WHEELHOUSE}
-
 # This can be used to turn database query logging on and off
 # (currently only implemented for MySQL backend)
 DATABASE_QUERY_LOGGING=$(trueorfalse False DATABASE_QUERY_LOGGING)
@@ -371,6 +376,10 @@
 GITREPO["oslo.policy"]=${OSLOPOLICY_REPO:-${GIT_BASE}/openstack/oslo.policy.git}
 GITBRANCH["oslo.policy"]=${OSLOPOLICY_BRANCH:-master}
 
+# oslo.privsep
+GITREPO["oslo.privsep"]=${OSLOPRIVSEP_REPO:-${GIT_BASE}/openstack/oslo.privsep.git}
+GITBRANCH["oslo.privsep"]=${OSLOPRIVSEP_BRANCH:-master}
+
 # oslo.reports
 GITREPO["oslo.reports"]=${OSLOREPORTS_REPO:-${GIT_BASE}/openstack/oslo.reports.git}
 GITBRANCH["oslo.reports"]=${OSLOREPORTS_BRANCH:-master}
@@ -642,9 +651,6 @@
 PRIVATE_NETWORK_NAME=${PRIVATE_NETWORK_NAME:-"private"}
 PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-"public"}
 
-# Compatibility until it's eradicated from CI
-USE_SCREEN=${SCREEN_DEV:-$USE_SCREEN}
-
 # Set default screen name
 SCREEN_NAME=${SCREEN_NAME:-stack}
 
diff --git a/tests/test_functions.sh b/tests/test_functions.sh
index f555de8..be8dc5e 100755
--- a/tests/test_functions.sh
+++ b/tests/test_functions.sh
@@ -245,4 +245,33 @@
     passed "OK"
 fi
 
+function test_export_proxy_variables {
+    echo "Testing export_proxy_variables()"
+
+    local expected results
+
+    http_proxy=http_proxy_test
+    https_proxy=https_proxy_test
+    no_proxy=no_proxy_test
+
+    export_proxy_variables
+    expected=$(echo -e "http_proxy=$http_proxy\nhttps_proxy=$https_proxy\nno_proxy=$no_proxy")
+    results=$(env | egrep '(http(s)?|no)_proxy=')
+    if [[ $expected = $results ]]; then
+        passed "OK: Proxy variables are exported when proxy variables are set"
+    else
+        failed "Expected: $expected, Failed: $results"
+    fi
+
+    unset http_proxy https_proxy no_proxy
+    export_proxy_variables
+    results=$(env | egrep '(http(s)?|no)_proxy=')
+    if [[ "" = $results ]]; then
+        passed "OK: Proxy variables aren't exported when proxy variables aren't set"
+    else
+        failed "Expected: '', Failed: $results"
+    fi
+}
+test_export_proxy_variables
+
 report_results
diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh
index 8e8c022..f31560a 100755
--- a/tests/test_libs_from_pypi.sh
+++ b/tests/test_libs_from_pypi.sh
@@ -41,7 +41,7 @@
 ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy"
 ALL_LIBS+=" debtcollector os-brick automaton futurist oslo.service"
 ALL_LIBS+=" oslo.cache oslo.reports"
-ALL_LIBS+=" keystoneauth ironic-lib"
+ALL_LIBS+=" keystoneauth ironic-lib oslo.privsep"
 
 # Generate the above list with
 # echo ${!GITREPO[@]}
diff --git a/tests/test_meta_config.sh b/tests/test_meta_config.sh
index a04c081..f3e94af 100755
--- a/tests/test_meta_config.sh
+++ b/tests/test_meta_config.sh
@@ -23,6 +23,12 @@
     fi
 }
 
+# mock function-common:die so that it does not
+# interupt our test script
+function die {
+    exit -1
+}
+
 TEST_1C_ADD="[eee]
 type=new
 multi = foo2"
@@ -110,6 +116,15 @@
 [DEFAULT]
 servers=10.11.12.13:80
 
+[[test8|/permission-denied.conf]]
+foo=bar
+
+[[test9|\$UNDEF]]
+foo=bar
+
+[[test10|does-not-exist-dir/test.conf]]
+foo=bar
+
 [[test-multi-sections|test-multi-sections.conf]]
 [sec-1]
 cfg_item1 = abcd
@@ -340,6 +355,36 @@
 servers = 10.11.12.13:80"
 check_result "$VAL" "$EXPECT_VAL"
 
+echo "merge_config_file test8 non-touchable conf file: "
+set +e
+# function is expected to fail and exit, running it
+# in a subprocess to let this script proceed
+(merge_config_file test.conf test8 /permission-denied.conf)
+VAL=$?
+EXPECT_VAL=255
+check_result "$VAL" "$EXPECT_VAL"
+set -e
+
+echo -n "merge_config_group test9 undefined conf file: "
+set +e
+# function is expected to fail and exit, running it
+# in a subprocess to let this script proceed
+(merge_config_group test.conf test9)
+VAL=$?
+EXPECT_VAL=255
+check_result "$VAL" "$EXPECT_VAL"
+set -e
+
+echo -n "merge_config_group test10 not directory: "
+set +e
+# function is expected to fail and exit, running it
+# in a subprocess to let this script proceed
+(merge_config_group test.conf test10)
+VAL=$?
+EXPECT_VAL=255
+check_result "$VAL" "$EXPECT_VAL"
+set -e
+
 rm -f test.conf test1c.conf test2a.conf \
     test-space.conf test-equals.conf test-strip.conf \
     test-colon.conf test-env.conf test-multiline.conf \
diff --git a/tools/build_wheels.sh b/tools/build_wheels.sh
deleted file mode 100755
index 14c2999..0000000
--- a/tools/build_wheels.sh
+++ /dev/null
@@ -1,86 +0,0 @@
-#!/usr/bin/env bash
-#
-# **tools/build_wheels.sh** - Build a cache of Python wheels
-#
-# build_wheels.sh [package [...]]
-#
-# System package prerequisites listed in ``files/*/devlibs`` will be installed
-#
-# Builds wheels for all virtual env requirements listed in
-# ``venv-requirements.txt`` plus any supplied on the command line.
-#
-# Assumes:
-# - ``tools/install_pip.sh`` has been run and a suitable ``pip/setuptools`` is available.
-
-# If ``TOP_DIR`` is set we're being sourced rather than running stand-alone
-# or in a sub-shell
-if [[ -z "$TOP_DIR" ]]; then
-
-    set -o errexit
-    set -o nounset
-
-    # Keep track of the DevStack directory
-    TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
-    FILES=$TOP_DIR/files
-
-    # Import common functions
-    source $TOP_DIR/functions
-
-    GetDistro
-
-    source $TOP_DIR/stackrc
-
-    trap err_trap ERR
-
-fi
-
-# Get additional packages to build
-MORE_PACKAGES="$@"
-
-# Exit on any errors so that errors don't compound
-function err_trap {
-    local r=$?
-    set +o xtrace
-
-    rm -rf $TMP_VENV_PATH
-
-    exit $r
-}
-
-# Get system prereqs
-install_package $(get_packages devlibs)
-
-# Get a modern ``virtualenv``
-pip_install virtualenv
-
-# Prepare the workspace
-TMP_VENV_PATH=$(mktemp -d tmp-venv-XXXX)
-virtualenv $TMP_VENV_PATH
-
-# Install modern pip and wheel
-PIP_VIRTUAL_ENV=$TMP_VENV_PATH pip_install -U pip wheel
-
-# BUG: cffi has a lot of issues. It has no stable ABI, if installed
-# code is built with a different ABI than the one that's detected at
-# load time, it tries to compile on the fly for the new ABI in the
-# install location (which will probably be /usr and not
-# writable). Also cffi is often included via setup_requires by
-# packages, which have different install rules (allowing betas) than
-# pip has.
-#
-# Because of this we must pip install cffi into the venv to build
-# wheels.
-PIP_VIRTUAL_ENV=$TMP_VENV_PATH pip_install_gr cffi
-
-# ``VENV_PACKAGES`` is a list of packages we want to pre-install
-VENV_PACKAGE_FILE=$FILES/venv-requirements.txt
-if [[ -r $VENV_PACKAGE_FILE ]]; then
-    VENV_PACKAGES=$(grep -v '^#' $VENV_PACKAGE_FILE)
-fi
-
-for pkg in ${VENV_PACKAGES,/ } ${MORE_PACKAGES}; do
-    $TMP_VENV_PATH/bin/pip wheel $pkg
-done
-
-# Clean up wheel workspace
-rm -rf $TMP_VENV_PATH
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index 13c1786..ab5efb2 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -110,7 +110,11 @@
 # Do pip
 
 # Eradicate any and all system packages
-uninstall_package python-pip
+
+# python in f23 depends on the python-pip package
+if ! { is_fedora && [[ $DISTRO == "f23" ]]; }; then
+    uninstall_package python-pip
+fi
 
 install_get_pip
 
diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh
index a07e58d..38452cd 100755
--- a/tools/install_prereqs.sh
+++ b/tools/install_prereqs.sh
@@ -61,7 +61,7 @@
 # ================
 
 # Install package requirements
-PACKAGES=$(get_packages general $ENABLED_SERVICES)
+PACKAGES=$(get_packages general,$ENABLED_SERVICES)
 PACKAGES="$PACKAGES $(get_plugin_packages)"
 
 if is_ubuntu && echo $PACKAGES | grep -q dkms ; then
diff --git a/tools/worlddump.py b/tools/worlddump.py
index 1b337a9..97e4d94 100755
--- a/tools/worlddump.py
+++ b/tools/worlddump.py
@@ -86,8 +86,10 @@
 
 
 def ebtables_dump():
+    tables = ['filter', 'nat', 'broute']
     _header("EB Tables Dump")
-    _dump_cmd("sudo ebtables -L")
+    for table in tables:
+        _dump_cmd("sudo ebtables -t %s -L" % table)
 
 
 def iptables_dump():
diff --git a/tox.ini b/tox.ini
index 0df9877..9279455 100644
--- a/tox.ini
+++ b/tox.ini
@@ -9,7 +9,7 @@
 
 [testenv:bashate]
 deps =
-   {env:BASHATE_INSTALL_PATH:bashate==0.3.1}
+   {env:BASHATE_INSTALL_PATH:bashate==0.3.2}
 whitelist_externals = bash
 commands = bash -c "find {toxinidir}             \
          -not \( -type d -name .?\* -prune \)    \ # prune all 'dot' dirs
@@ -20,12 +20,13 @@
          -not -name \*.md                        \
          \(                                      \
           -name \*.sh -or                        \
-          -name \*rc -or                         \
+          -name \*.orig -or                      \
+          -name \*rc -or                         \ # openrc files, etc
           -name functions\* -or                  \
           -wholename \*/inc/\* -or               \ # /inc files and
           -wholename \*/lib/\*                   \ # /lib files are shell, but
          \)                                      \ #   have no extension
-         -print0 | xargs -0 bashate -v -iE006"
+         -print0 | xargs -0 bashate -v -iE006 -eE005,E042"
 
 [testenv:docs]
 deps =
diff --git a/unstack.sh b/unstack.sh
index 30447a7..8eded83 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -69,6 +69,7 @@
 source $TOP_DIR/lib/neutron-legacy
 source $TOP_DIR/lib/ldap
 source $TOP_DIR/lib/dstat
+source $TOP_DIR/lib/dlm
 
 # Extras Source
 # --------------
@@ -172,6 +173,10 @@
     stop_dstat
 fi
 
+if is_service_enabled zookeeper; then
+    stop_zookeeper
+fi
+
 # Clean up the remainder of the screen processes
 SCREEN=$(which screen)
 if [[ -n "$SCREEN" ]]; then