Merge "Enable some serivce when on boot"
diff --git a/.gitignore b/.gitignore
index 2778a65..8870bb3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -24,3 +24,4 @@
 shocco
 src
 stack-screenrc
+userrc_early
diff --git a/HACKING.rst b/HACKING.rst
index 6bd24b0..d763c75 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -250,8 +250,7 @@
   database access from the exercise itself.
 
 * If specific configuration needs to be present for the exercise to complete,
-  it should be staged in ``stack.sh``, or called from ``stack.sh`` (see
-  ``files/keystone_data.sh`` for an example of this).
+  it should be staged in ``stack.sh``, or called from ``stack.sh``.
 
 * The ``OS_*`` environment variables should be the only ones used for all
   authentication to OpenStack clients as documented in the CLIAuth_ wiki page.
@@ -329,7 +328,7 @@
 your change
 
 * **Is it passing tests** -- your change will not be reviewed
-  throughly unless the official CI has run successfully against it.
+  thoroughly unless the official CI has run successfully against it.
 
 * **Does this belong in DevStack** -- DevStack reviewers have a
   default position of "no" but are ready to be convinced by your
diff --git a/Makefile b/Makefile
index 082aff2..a94d60a 100644
--- a/Makefile
+++ b/Makefile
@@ -13,7 +13,6 @@
 
 # Duplicated from stackrc for now
 DEST=/opt/stack
-WHEELHOUSE=$(DEST)/.wheelhouse
 
 all:
 	echo "This just saved you from a terrible mistake!"
@@ -25,9 +24,6 @@
 unstack:
 	./unstack.sh
 
-wheels:
-	WHEELHOUSE=$(WHEELHOUSE) tools/build-wheels.sh
-
 docs:
 	tox -edocs
 
@@ -57,7 +53,7 @@
 
 # Clean out the cache too
 realclean: clean
-	rm -rf files/cirros*.tar.gz files/Fedora*.qcow2 $(WHEELHOUSE)
+	rm -rf files/cirros*.tar.gz files/Fedora*.qcow2
 
 # Repo stuffs
 
diff --git a/clean.sh b/clean.sh
index b22a29c..fc6f80d 100755
--- a/clean.sh
+++ b/clean.sh
@@ -50,7 +50,6 @@
 source $TOP_DIR/lib/swift
 source $TOP_DIR/lib/heat
 source $TOP_DIR/lib/neutron-legacy
-source $TOP_DIR/lib/ironic
 
 
 # Extras Source
@@ -134,7 +133,9 @@
 
 # Clean up files
 
-FILES_TO_CLEAN=".localrc.auto docs/files docs/html shocco/ stack-screenrc test*.conf* test.ini*"
+FILES_TO_CLEAN=".localrc.auto .localrc.password "
+FILES_TO_CLEAN+="docs/files docs/html shocco/ "
+FILES_TO_CLEAN+="stack-screenrc test*.conf* test.ini* "
 FILES_TO_CLEAN+=".stackenv .prereqs"
 
 for file in $FILES_TO_CLEAN; do
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 7ca82c7..386fbbb 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -44,6 +44,7 @@
    before they are started
 -  **extra** - runs after services are started and before any files in
    ``extra.d`` are executed
+-  **post-extra** - runs after files in ``extra.d`` are executed
 
 The file is processed strictly in sequence; meta-sections may be
 specified more than once but if any settings are duplicated the last to
@@ -202,8 +203,8 @@
 
         LOGDIR=$DEST/logs
 
-*Note the use of ``DEST`` to locate the main install directory; this
-is why we suggest setting it in ``local.conf``.*
+Note the use of ``DEST`` to locate the main install directory; this
+is why we suggest setting it in ``local.conf``.
 
 Enabling Syslog
 ~~~~~~~~~~~~~~~
@@ -211,7 +212,7 @@
 Logging all services to a single syslog can be convenient. Enable
 syslogging by setting ``SYSLOG`` to ``True``. If the destination log
 host is not localhost ``SYSLOG_HOST`` and ``SYSLOG_PORT`` can be used
-to direct the message stream to the log host.  |
+to direct the message stream to the log host.
 
     ::
 
@@ -239,15 +240,15 @@
 
 Multiple database backends are available. The available databases are defined
 in the lib/databases directory.
-`mysql` is the default database, choose a different one by putting the
-following in the `localrc` section:
+``mysql`` is the default database, choose a different one by putting the
+following in the ``localrc`` section:
 
    ::
 
       disable_service mysql
       enable_service postgresql
 
-`mysql` is the default database.
+``mysql`` is the default database.
 
 RPC Backend
 -----------
@@ -260,6 +261,7 @@
 Example disabling RabbitMQ in ``local.conf``:
 
 ::
+
     disable_service rabbit
 
 
@@ -297,6 +299,12 @@
 
     SWIFT_USE_MOD_WSGI="True"
 
+Example (Heat):
+
+::
+
+    HEAT_USE_MOD_WSGI="True"
+
 
 Example (Cinder):
 
@@ -387,7 +395,7 @@
         KEYSTONE_CATALOG_BACKEND=template
 
 DevStack's default configuration in ``sql`` mode is set in
-``files/keystone_data.sh``
+``lib/keystone``
 
 
 Guest Images
@@ -408,6 +416,28 @@
         IMAGE_URLS="http://foo.bar.com/image.qcow,"
         IMAGE_URLS+="http://foo.bar.com/image2.qcow"
 
+
+Instance Type
+-------------
+
+``DEFAULT_INSTANCE_TYPE`` can be used to configure the default instance
+type. When this parameter is not specified, Devstack creates additional
+micro & nano flavors for really small instances to run Tempest tests.
+
+For guests with larger memory requirements, ``DEFAULT_INSTANCE_TYPE``
+should be specified in the configuration file so Tempest selects the
+default flavors instead.
+
+KVM on Power with QEMU 2.4 requires 512 MB to load the firmware -
+`QEMU 2.4 - PowerPC <http://wiki.qemu.org/ChangeLog/2.4>`__ so users
+running instances on ppc64/ppc64le can choose one of the default
+created flavors as follows:
+
+    ::
+
+        DEFAULT_INSTANCE_TYPE=m1.tiny
+
+
 IP Version
 ----------
 
@@ -483,7 +513,7 @@
 object services will run directly in screen. The others services like
 replicator, updaters or auditor runs in background.
 
-If you would like to enable Swift you can add this to your `localrc`
+If you would like to enable Swift you can add this to your ``localrc``
 section:
 
 ::
@@ -491,7 +521,7 @@
     enable_service s-proxy s-object s-container s-account
 
 If you want a minimal Swift install with only Swift and Keystone you
-can have this instead in your `localrc` section:
+can have this instead in your ``localrc`` section:
 
 ::
 
@@ -500,24 +530,24 @@
 
 If you only want to do some testing of a real normal swift cluster
 with multiple replicas you can do so by customizing the variable
-`SWIFT_REPLICAS` in your `localrc` section (usually to 3).
+``SWIFT_REPLICAS`` in your ``localrc`` section (usually to 3).
 
 Swift S3
 ++++++++
 
-If you are enabling `swift3` in `ENABLED_SERVICES` DevStack will
+If you are enabling ``swift3`` in ``ENABLED_SERVICES`` DevStack will
 install the swift3 middleware emulation. Swift will be configured to
 act as a S3 endpoint for Keystone so effectively replacing the
-`nova-objectstore`.
+``nova-objectstore``.
 
 Only Swift proxy server is launched in the screen session all other
-services are started in background and managed by `swift-init` tool.
+services are started in background and managed by ``swift-init`` tool.
 
 Heat
 ~~~~
 
-Heat is disabled by default (see `stackrc` file). To enable it
-explicitly you'll need the following settings in your `localrc`
+Heat is disabled by default (see ``stackrc`` file). To enable it
+explicitly you'll need the following settings in your ``localrc``
 section
 
 ::
@@ -526,7 +556,7 @@
 
 Heat can also run in standalone mode, and be configured to orchestrate
 on an external OpenStack cloud. To launch only Heat in standalone mode
-you'll need the following settings in your `localrc` section
+you'll need the following settings in your ``localrc`` section
 
 ::
 
@@ -562,14 +592,14 @@
 ~~~~~~~~~
 
 If you would like to use Xenserver as the hypervisor, please refer to
-the instructions in `./tools/xen/README.md`.
+the instructions in ``./tools/xen/README.md``.
 
 Cells
 ~~~~~
 
 `Cells <http://wiki.openstack.org/blueprint-nova-compute-cells>`__ is
 an alternative scaling option.  To setup a cells environment add the
-following to your `localrc` section:
+following to your ``localrc`` section:
 
 ::
 
diff --git a/doc/source/faq.rst b/doc/source/faq.rst
index 0db8932..7aca8d0 100644
--- a/doc/source/faq.rst
+++ b/doc/source/faq.rst
@@ -54,7 +54,7 @@
 releases other than those documented in ``README.md`` on a best-effort
 basis.
 
-Are there any differences between Ubuntu and Centos/Fedora support?
+Are there any differences between Ubuntu and CentOS/Fedora support?
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 Both should work well and are tested by DevStack CI.
@@ -124,24 +124,30 @@
 
         enable_service q-svc
 
-How do I run a specific OpenStack milestone?
+How do I run a specific OpenStack release?
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-OpenStack milestones have tags set in the git repo. Set the
-appropriate tag in the ``*_BRANCH`` variables in ``local.conf``.
-Swift is on its own release schedule so pick a tag in the Swift repo
-that is just before the milestone release. For example:
+DevStack master tracks the upstream master of all the projects. If you
+would like to run a stable branch of OpenStack, you should use the
+corresponding stable branch of DevStack as well. For instance the
+``stable/kilo`` version of DevStack will already default to all the
+projects running at ``stable/kilo`` levels.
 
-    ::
+Note: it's also possible to manually adjust the ``*_BRANCH`` variables
+further if you would like to test specific milestones, or even custom
+out of tree branches. This is done with entries like the following in
+your ``local.conf``
+
+::
 
         [[local|localrc]]
-        GLANCE_BRANCH=stable/kilo
-        HORIZON_BRANCH=stable/kilo
-        KEYSTONE_BRANCH=stable/kilo
-        NOVA_BRANCH=stable/kilo
-        GLANCE_BRANCH=stable/kilo
-        NEUTRON_BRANCH=stable/kilo
-        SWIFT_BRANCH=2.3.0
+        GLANCE_BRANCH=11.0.0.0rc1
+        NOVA_BRANCH=12.0.0.0.rc1
+
+
+Upstream DevStack is only tested with master and stable
+branches. Setting custom BRANCH definitions is not guaranteed to
+produce working results.
 
 What can I do about RabbitMQ not wanting to start on my fresh new VM?
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst
index f679783..f3bd2fe 100644
--- a/doc/source/guides/devstack-with-lbaas-v2.rst
+++ b/doc/source/guides/devstack-with-lbaas-v2.rst
@@ -1,13 +1,17 @@
-Configure Load-Balancer in Kilo
+Configure Load-Balancer Version 2
 =================================
 
-The Kilo release of OpenStack will support Version 2 of the neutron load balancer. Until now, using OpenStack `LBaaS V2 <http://docs.openstack.org/api/openstack-network/2.0/content/lbaas_ext.html>`_ has required a good understanding of neutron and LBaaS architecture and several manual steps.
+Starting in the OpenStack Liberty release, the
+`neutron LBaaS v2 API <http://developer.openstack.org/api-ref-networking-v2-ext.html>`_
+is now stable while the LBaaS v1 API has been deprecated.  The LBaaS v2 reference
+driver is based on Octavia.
 
 
 Phase 1: Create DevStack + 2 nova instances
 --------------------------------------------
 
-First, set up a vm of your choice with at least 8 GB RAM and 16 GB disk space, make sure it is updated. Install git and any other developer tools you find useful.
+First, set up a vm of your choice with at least 8 GB RAM and 16 GB disk space,
+make sure it is updated. Install git and any other developer tools you find useful.
 
 Install devstack
 
@@ -17,13 +21,14 @@
     cd devstack
 
 
-Edit your `local.conf` to look like
+Edit your ``local.conf`` to look like
 
   ::
 
     [[local|localrc]]
     # Load the external LBaaS plugin.
     enable_plugin neutron-lbaas https://git.openstack.org/openstack/neutron-lbaas
+    enable_plugin octavia https://git.openstack.org/openstack/octavia
 
     # ===== BEGIN localrc =====
     DATABASE_PASSWORD=password
@@ -42,13 +47,13 @@
     ENABLED_SERVICES+=,horizon
     # Nova
     ENABLED_SERVICES+=,n-api,n-crt,n-obj,n-cpu,n-cond,n-sch
-    IMAGE_URLS+=",https://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img"
     # Glance
     ENABLED_SERVICES+=,g-api,g-reg
     # Neutron
     ENABLED_SERVICES+=,q-svc,q-agt,q-dhcp,q-l3,q-meta
-    # Enable LBaaS V2
+    # Enable LBaaS v2
     ENABLED_SERVICES+=,q-lbaasv2
+    ENABLED_SERVICES+=,octavia,o-cw,o-hk,o-hm,o-api
     # Cinder
     ENABLED_SERVICES+=,c-api,c-vol,c-sch
     # Tempest
@@ -69,11 +74,11 @@
   ::
 
     #create nova instances on private network
-    nova boot --image $(nova image-list | awk '/ cirros-0.3.0-x86_64-disk / {print $2}') --flavor 1 --nic net-id=$(neutron net-list | awk '/ private / {print $2}') node1
-    nova boot --image $(nova image-list | awk '/ cirros-0.3.0-x86_64-disk / {print $2}') --flavor 1 --nic net-id=$(neutron net-list | awk '/ private / {print $2}') node2
+    nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(neutron net-list | awk '/ private / {print $2}') node1
+    nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(neutron net-list | awk '/ private / {print $2}') node2
     nova list # should show the nova instances just created
 
-    #add secgroup rule to allow ssh etc..
+    #add secgroup rules to allow ssh etc..
     neutron security-group-rule-create default --protocol icmp
     neutron security-group-rule-create default --protocol tcp --port-range-min 22 --port-range-max 22
     neutron security-group-rule-create default --protocol tcp --port-range-min 80 --port-range-max 80
@@ -91,9 +96,16 @@
  ::
 
     neutron lbaas-loadbalancer-create --name lb1 private-subnet
+    neutron lbaas-loadbalancer-show lb1  # Wait for the provisioning_status to be ACTIVE.
     neutron lbaas-listener-create --loadbalancer lb1 --protocol HTTP --protocol-port 80 --name listener1
+    sleep 10  # Sleep since LBaaS actions can take a few seconds depending on the environment.
     neutron lbaas-pool-create --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1
+    sleep 10
     neutron lbaas-member-create  --subnet private-subnet --address 10.0.0.3 --protocol-port 80 pool1
+    sleep 10
     neutron lbaas-member-create  --subnet private-subnet --address 10.0.0.5 --protocol-port 80 pool1
 
-Please note here that the "10.0.0.3" and "10.0.0.5" in the above commands are the IPs of the nodes (in my test run-thru, they were actually 10.2 and 10.4), and the address of the created LB will be reported as "vip_address" from the lbaas-loadbalancer-create, and a quick test of that LB is "curl that-lb-ip", which should alternate between showing the IPs of the two nodes.
+Please note here that the "10.0.0.3" and "10.0.0.5" in the above commands are the IPs of the nodes
+(in my test run-thru, they were actually 10.2 and 10.4), and the address of the created LB will be
+reported as "vip_address" from the lbaas-loadbalancer-create, and a quick test of that LB is
+"curl that-lb-ip", which should alternate between showing the IPs of the two nodes.
diff --git a/doc/source/guides/devstack-with-nested-kvm.rst b/doc/source/guides/devstack-with-nested-kvm.rst
index c652bac..85a5656 100644
--- a/doc/source/guides/devstack-with-nested-kvm.rst
+++ b/doc/source/guides/devstack-with-nested-kvm.rst
@@ -50,7 +50,7 @@
     parm:           nested:bool
 
 Start your VM, now it should have KVM capabilities -- you can verify
-that by ensuring `/dev/kvm` character device is present.
+that by ensuring ``/dev/kvm`` character device is present.
 
 
 Configure Nested KVM for AMD-based Machines
@@ -97,7 +97,7 @@
 Expose Virtualization Extensions to DevStack VM
 -----------------------------------------------
 
-Edit the VM's libvirt XML configuration via `virsh` utility:
+Edit the VM's libvirt XML configuration via ``virsh`` utility:
 
 ::
 
@@ -115,10 +115,10 @@
 -------------------------------
 
 Before invoking ``stack.sh`` in the VM, ensure that KVM is enabled. This
-can be verified by checking for the presence of the file `/dev/kvm` in
+can be verified by checking for the presence of the file ``/dev/kvm`` in
 your VM. If it is present, DevStack will default to using the config
-attribute `virt_type = kvm` in `/etc/nova.conf`; otherwise, it'll fall
-back to `virt_type=qemu`, i.e. plain QEMU emulation.
+attribute ``virt_type = kvm`` in ``/etc/nova.conf``; otherwise, it'll fall
+back to ``virt_type=qemu``, i.e. plain QEMU emulation.
 
 Optionally, to explicitly set the type of virtualization, to KVM, by the
 libvirt driver in nova, the below config attribute can be used in
@@ -131,7 +131,7 @@
 
 Once DevStack is configured successfully, verify if the Nova instances
 are using KVM by noticing the QEMU CLI invoked by Nova is using the
-parameter `accel=kvm`, e.g.:
+parameter ``accel=kvm``, e.g.:
 
 ::
 
diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst
index 1530a84..5660bc5 100644
--- a/doc/source/guides/multinode-lab.rst
+++ b/doc/source/guides/multinode-lab.rst
@@ -128,7 +128,7 @@
     MULTI_HOST=1
     LOGFILE=/opt/stack/logs/stack.sh.log
     ADMIN_PASSWORD=labstack
-    MYSQL_PASSWORD=supersecret
+    DATABASE_PASSWORD=supersecret
     RABBIT_PASSWORD=supersecrete
     SERVICE_PASSWORD=supersecrete
     SERVICE_TOKEN=xyzpdqlazydog
@@ -169,7 +169,7 @@
     MULTI_HOST=1
     LOGFILE=/opt/stack/logs/stack.sh.log
     ADMIN_PASSWORD=labstack
-    MYSQL_PASSWORD=supersecret
+    DATABASE_PASSWORD=supersecret
     RABBIT_PASSWORD=supersecrete
     SERVICE_PASSWORD=supersecrete
     SERVICE_TOKEN=xyzpdqlazydog
diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst
index 2973eb6..a72b6f9 100644
--- a/doc/source/guides/neutron.rst
+++ b/doc/source/guides/neutron.rst
@@ -35,7 +35,7 @@
                 network hardware_network {
                         address = "172.18.161.0/24"
                         router [ address = "172.18.161.1" ];
-                        devstack_laptop [ address = "172.18.161.6" ];
+                        devstack-1 [ address = "172.18.161.6" ];
                 }
         }
 
@@ -43,20 +43,30 @@
 DevStack Configuration
 ----------------------
 
+The following is a complete `local.conf` for the host named
+`devstack-1`. It will run all the API and services, as well as
+serving as a hypervisor for guest instances.
 
 ::
 
+        [[local|localrc]]
         HOST_IP=172.18.161.6
         SERVICE_HOST=172.18.161.6
         MYSQL_HOST=172.18.161.6
         RABBIT_HOST=172.18.161.6
         GLANCE_HOSTPORT=172.18.161.6:9292
         ADMIN_PASSWORD=secrete
-        MYSQL_PASSWORD=secrete
+        DATABASE_PASSWORD=secrete
         RABBIT_PASSWORD=secrete
         SERVICE_PASSWORD=secrete
         SERVICE_TOKEN=secrete
 
+        # Do not use Nova-Network
+        disable_service n-net
+        # Enable Neutron
+        ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt,q-l3
+
+
         ## Neutron options
         Q_USE_SECGROUP=True
         FLOATING_RANGE="172.18.161.0/24"
@@ -71,21 +81,218 @@
         OVS_BRIDGE_MAPPINGS=public:br-ex
 
 
+Adding Additional Compute Nodes
+-------------------------------
+
+Let's suppose that after installing DevStack on the first host, you
+also want to do multinode testing and networking.
+
+Physical Network Setup
+~~~~~~~~~~~~~~~~~~~~~~
+
+.. nwdiag::
+
+        nwdiag {
+                inet [ shape = cloud ];
+                router;
+                inet -- router;
+
+                network hardware_network {
+                        address = "172.18.161.0/24"
+                        router [ address = "172.18.161.1" ];
+                        devstack-1 [ address = "172.18.161.6" ];
+                        devstack-2 [ address = "172.18.161.7" ];
+                }
+        }
+
+
+After DevStack installs and configures Neutron, traffic from guest VMs
+flows out of `devstack-2` (the compute node) and is encapsulated in a
+VXLAN tunnel back to `devstack-1` (the control node) where the L3
+agent is running.
+
+::
+
+    stack@devstack-2:~/devstack$ sudo ovs-vsctl show
+    8992d965-0ba0-42fd-90e9-20ecc528bc29
+        Bridge br-int
+            fail_mode: secure
+            Port br-int
+                Interface br-int
+                    type: internal
+            Port patch-tun
+                Interface patch-tun
+                    type: patch
+                    options: {peer=patch-int}
+        Bridge br-tun
+            fail_mode: secure
+            Port "vxlan-c0a801f6"
+                Interface "vxlan-c0a801f6"
+                    type: vxlan
+                    options: {df_default="true", in_key=flow, local_ip="172.18.161.7", out_key=flow, remote_ip="172.18.161.6"}
+            Port patch-int
+                Interface patch-int
+                    type: patch
+                    options: {peer=patch-tun}
+            Port br-tun
+                Interface br-tun
+                    type: internal
+        ovs_version: "2.0.2"
+
+Open vSwitch on the control node, where the L3 agent runs, is
+configured to de-encapsulate traffic from compute nodes, then forward
+it over the `br-ex` bridge, where `eth0` is attached.
+
+::
+
+    stack@devstack-1:~/devstack$ sudo ovs-vsctl show
+    422adeea-48d1-4a1f-98b1-8e7239077964
+        Bridge br-tun
+            fail_mode: secure
+            Port br-tun
+                Interface br-tun
+                    type: internal
+            Port patch-int
+                Interface patch-int
+                    type: patch
+                    options: {peer=patch-tun}
+            Port "vxlan-c0a801d8"
+                Interface "vxlan-c0a801d8"
+                    type: vxlan
+                    options: {df_default="true", in_key=flow, local_ip="172.18.161.6", out_key=flow, remote_ip="172.18.161.7"}
+        Bridge br-ex
+            Port phy-br-ex
+                Interface phy-br-ex
+                    type: patch
+                    options: {peer=int-br-ex}
+            Port "eth0"
+                Interface "eth0"
+            Port br-ex
+                Interface br-ex
+                    type: internal
+        Bridge br-int
+            fail_mode: secure
+            Port "tapce66332d-ea"
+                tag: 1
+                Interface "tapce66332d-ea"
+                    type: internal
+            Port "qg-65e5a4b9-15"
+                tag: 2
+                Interface "qg-65e5a4b9-15"
+                    type: internal
+            Port "qr-33e5e471-88"
+                tag: 1
+                Interface "qr-33e5e471-88"
+                    type: internal
+            Port "qr-acbe9951-70"
+                tag: 1
+                Interface "qr-acbe9951-70"
+                    type: internal
+            Port br-int
+                Interface br-int
+                    type: internal
+            Port patch-tun
+                Interface patch-tun
+                    type: patch
+                    options: {peer=patch-int}
+            Port int-br-ex
+                Interface int-br-ex
+                    type: patch
+                    options: {peer=phy-br-ex}
+        ovs_version: "2.0.2"
+
+`br-int` is a bridge that the Open vSwitch mechanism driver creates,
+which is used as the "integration bridge" where ports are created, and
+plugged into the virtual switching fabric. `br-ex` is an OVS bridge
+that is used to connect physical ports (like `eth0`), so that floating
+IP traffic for tenants can be received from the physical network
+infrastructure (and the internet), and routed to tenant network ports.
+`br-tun` is a tunnel bridge that is used to connect OpenStack nodes
+(like `devstack-2`) together. This bridge is used so that tenant
+network traffic, using the VXLAN tunneling protocol, flows between
+each compute node where tenant instances run.
 
 
 
-Using Neutron with Multiple Interfaces
-======================================
+DevStack Compute Configuration
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-The first interface, eth0 is used for the OpenStack management (API,
-message bus, etc) as well as for ssh for an administrator to access
-the machine.
+The host `devstack-2` has a very minimal `local.conf`.
+
+::
+
+    [[local|localrc]]
+    HOST_IP=172.18.161.7
+    SERVICE_HOST=172.18.161.6
+    MYSQL_HOST=172.18.161.6
+    RABBIT_HOST=172.18.161.6
+    GLANCE_HOSTPORT=172.18.161.6:9292
+    ADMIN_PASSWORD=secrete
+    MYSQL_PASSWORD=secrete
+    RABBIT_PASSWORD=secrete
+    SERVICE_PASSWORD=secrete
+    SERVICE_TOKEN=secrete
+
+    ## Neutron options
+    PUBLIC_INTERFACE=eth0
+    ENABLED_SERVICES=n-cpu,rabbit,q-agt
+
+Network traffic from `eth0` on the compute nodes is then NAT'd by the
+controller node that runs Neutron's `neutron-l3-agent` and provides L3
+connectivity.
+
+
+Neutron Networking with Open vSwitch and Provider Networks
+==========================================================
+
+In some instances, it is desirable to use neutron's provider
+networking extension, so that networks that are configured on an
+external router can be utilized by neutron, and instances created via
+Nova can attach to the network managed by the external router.
+
+For example, in some lab environments, a hardware router has been
+pre-configured by another party, and an OpenStack developer has been
+given a VLAN tag and IP address range, so that instances created via
+DevStack will use the external router for L3 connectivity, as opposed
+to the neutron L3 service.
+
+Physical Network Setup
+----------------------
+
+.. nwdiag::
+
+        nwdiag {
+                inet [ shape = cloud ];
+                router;
+                inet -- router;
+
+                network provider_net {
+                        address = "203.0.113.0/24"
+                        router [ address = "203.0.113.1" ];
+                        controller;
+                        compute1;
+                        compute2;
+                }
+
+                network control_plane {
+                        router [ address = "10.0.0.1" ]
+                        address = "10.0.0.0/24"
+                        controller [ address = "10.0.0.2" ]
+                        compute1 [ address = "10.0.0.3" ]
+                        compute2 [ address = "10.0.0.4" ]
+                }
+        }
+
+
+On a compute node, the first interface, eth0 is used for the OpenStack
+management (API, message bus, etc) as well as for ssh for an
+administrator to access the machine.
 
 ::
 
         stack@compute:~$ ifconfig eth0
         eth0      Link encap:Ethernet  HWaddr bc:16:65:20:af:fc
-                  inet addr:192.168.1.18
+                  inet addr:10.0.0.3
 
 eth1 is manually configured at boot to not have an IP address.
 Consult your operating system documentation for the appropriate
@@ -101,9 +308,6 @@
 
 The second physical interface, eth1 is added to a bridge (in this case
 named br-ex), which is used to forward network traffic from guest VMs.
-Network traffic from eth1 on the compute nodes is then NAT'd by the
-controller node that runs Neutron's `neutron-l3-agent` and provides L3
-connectivity.
 
 ::
 
@@ -123,10 +327,120 @@
                     Interface "eth1"
 
 
+Service Configuration
+---------------------
+
+**Control Node**
+
+In this example, the control node will run the majority of the
+OpenStack API and management services (keystone, glance,
+nova, neutron)
+
+
+**Compute Nodes**
+
+In this example, the nodes that will host guest instances will run
+the ``neutron-openvswitch-agent`` for network connectivity, as well as
+the compute service ``nova-compute``.
+
+DevStack Configuration
+----------------------
+
+The following is a snippet of the DevStack configuration on the
+controller node.
+
+::
+
+        HOST_IP=10.0.0.2
+        SERVICE_HOST=10.0.0.2
+        MYSQL_HOST=10.0.0.2
+        RABBIT_HOST=10.0.0.2
+        GLANCE_HOSTPORT=10.0.0.2:9292
+        PUBLIC_INTERFACE=eth1
+
+        ADMIN_PASSWORD=secrete
+        MYSQL_PASSWORD=secrete
+        RABBIT_PASSWORD=secrete
+        SERVICE_PASSWORD=secrete
+        SERVICE_TOKEN=secrete
+
+        ## Neutron options
+        Q_USE_SECGROUP=True
+        ENABLE_TENANT_VLANS=True
+        TENANT_VLAN_RANGE=3001:4000
+        PHYSICAL_NETWORK=default
+        OVS_PHYSICAL_BRIDGE=br-ex
+
+        Q_USE_PROVIDER_NETWORKING=True
+        Q_L3_ENABLED=False
+
+        # Do not use Nova-Network
+        disable_service n-net
+
+        # Neutron
+        ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt
+
+        ## Neutron Networking options used to create Neutron Subnets
+
+        FIXED_RANGE="203.0.113.0/24"
+        NETWORK_GATEWAY=203.0.113.1
+        PROVIDER_SUBNET_NAME="provider_net"
+        PROVIDER_NETWORK_TYPE="vlan"
+        SEGMENTATION_ID=2010
+
+In this configuration we are defining FIXED_RANGE to be a
+publicly routed IPv4 subnet. In this specific instance we are using
+the special TEST-NET-3 subnet defined in `RFC 5737 <http://tools.ietf.org/html/rfc5737>`_,
+which is used for documentation.  In your DevStack setup, FIXED_RANGE
+would be a public IP address range that you or your organization has
+allocated to you, so that you could access your instances from the
+public internet.
+
+The following is the DevStack configuration on
+compute node 1.
+
+::
+
+        HOST_IP=10.0.0.3
+        SERVICE_HOST=10.0.0.2
+        MYSQL_HOST=10.0.0.2
+        RABBIT_HOST=10.0.0.2
+        GLANCE_HOSTPORT=10.0.0.2:9292
+        ADMIN_PASSWORD=secrete
+        MYSQL_PASSWORD=secrete
+        RABBIT_PASSWORD=secrete
+        SERVICE_PASSWORD=secrete
+        SERVICE_TOKEN=secrete
+
+        # Services that a compute node runs
+        ENABLED_SERVICES=n-cpu,rabbit,q-agt
+
+        ## Neutron options
+        PHYSICAL_NETWORK=default
+        OVS_PHYSICAL_BRIDGE=br-ex
+        PUBLIC_INTERFACE=eth1
+        Q_USE_PROVIDER_NETWORKING=True
+        Q_L3_ENABLED=False
+
+Compute node 2's configuration will be exactly the same, except
+``HOST_IP`` will be ``10.0.0.4``
+
+When DevStack is configured to use provider networking (via
+``Q_USE_PROVIDER_NETWORKING`` is True and ``Q_L3_ENABLED`` is False) -
+DevStack will automatically add the network interface defined in
+``PUBLIC_INTERFACE`` to the ``OVS_PHYSICAL_BRIDGE``
+
+For example, with the above  configuration, a bridge is
+created, named ``br-ex`` which is managed by Open vSwitch, and the
+second interface on the compute node, ``eth1`` is attached to the
+bridge, to forward traffic sent by guest VMs.
+
+Miscellaneous Tips
+==================
 
 
 Disabling Next Generation Firewall Tools
-========================================
+----------------------------------------
 
 DevStack does not properly operate with modern firewall tools.  Specifically
 it will appear as if the guest VM can access the external network via ICMP,
@@ -156,148 +470,11 @@
         sudo service iptables save
         sudo ufw disable
 
+Configuring Extension Drivers for the ML2 Plugin
+------------------------------------------------
 
+Extension drivers for the ML2 plugin are set with the variable
+``Q_ML2_PLUGIN_EXT_DRIVERS``, and includes the 'port_security' extension
+by default. If you want to remove all the extension drivers (even
+'port_security'), set ``Q_ML2_PLUGIN_EXT_DRIVERS`` to blank.
 
-
-Neutron Networking with Open vSwitch
-====================================
-
-Configuring neutron, OpenStack Networking in DevStack is very similar to
-configuring `nova-network` - many of the same configuration variables
-(like `FIXED_RANGE` and `FLOATING_RANGE`) used by `nova-network` are
-used by neutron, which is intentional.
-
-The only difference is the disabling of `nova-network` in your
-local.conf, and the enabling of the neutron components.
-
-
-Configuration
--------------
-
-::
-
-        FIXED_RANGE=10.0.0.0/24
-        FLOATING_RANGE=192.168.27.0/24
-        PUBLIC_NETWORK_GATEWAY=192.168.27.2
-
-        disable_service n-net
-        enable_service q-svc
-        enable_service q-agt
-        enable_service q-dhcp
-        enable_service q-meta
-        enable_service q-l3
-
-        Q_USE_SECGROUP=True
-        ENABLE_TENANT_VLANS=True
-        TENANT_VLAN_RANGE=1000:1999
-        PHYSICAL_NETWORK=default
-        OVS_PHYSICAL_BRIDGE=br-ex
-
-In this configuration we are defining FLOATING_RANGE to be a
-subnet that exists in the private RFC1918 address space - however in
-in a real setup FLOATING_RANGE would be a public IP address range.
-
-Note that extension drivers for the ML2 plugin is set by
-`Q_ML2_PLUGIN_EXT_DRIVERS`, and it includes 'port_security' by default. If you
-want to remove all the extension drivers (even 'port_security'), set
-`Q_ML2_PLUGIN_EXT_DRIVERS` to blank.
-
-Neutron Networking with Open vSwitch and Provider Networks
-==========================================================
-
-In some instances, it is desirable to use neutron's provider
-networking extension, so that networks that are configured on an
-external router can be utilized by neutron, and instances created via
-Nova can attach to the network managed by the external router.
-
-For example, in some lab environments, a hardware router has been
-pre-configured by another party, and an OpenStack developer has been
-given a VLAN tag and IP address range, so that instances created via
-DevStack will use the external router for L3 connectivity, as opposed
-to the neutron L3 service.
-
-
-Service Configuration
----------------------
-
-**Control Node**
-
-In this example, the control node will run the majority of the
-OpenStack API and management services (keystone, glance,
-nova, neutron)
-
-
-**Compute Nodes**
-
-In this example, the nodes that will host guest instances will run
-the `neutron-openvswitch-agent` for network connectivity, as well as
-the compute service `nova-compute`.
-
-DevStack Configuration
-----------------------
-
-The following is a snippet of the DevStack configuration on the
-controller node.
-
-::
-
-        PUBLIC_INTERFACE=eth1
-
-        ## Neutron options
-        Q_USE_SECGROUP=True
-        ENABLE_TENANT_VLANS=True
-        TENANT_VLAN_RANGE=3001:4000
-        PHYSICAL_NETWORK=default
-        OVS_PHYSICAL_BRIDGE=br-ex
-
-        Q_USE_PROVIDER_NETWORKING=True
-        Q_L3_ENABLED=False
-
-        # Do not use Nova-Network
-        disable_service n-net
-
-        # Neutron
-        ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt
-
-        ## Neutron Networking options used to create Neutron Subnets
-
-        FIXED_RANGE="203.0.113.0/24"
-        PROVIDER_SUBNET_NAME="provider_net"
-        PROVIDER_NETWORK_TYPE="vlan"
-        SEGMENTATION_ID=2010
-
-In this configuration we are defining FIXED_RANGE to be a
-publicly routed IPv4 subnet. In this specific instance we are using
-the special TEST-NET-3 subnet defined in `RFC 5737 <http://tools.ietf.org/html/rfc5737>`_,
-which is used for documentation.  In your DevStack setup, FIXED_RANGE
-would be a public IP address range that you or your organization has
-allocated to you, so that you could access your instances from the
-public internet.
-
-The following is a snippet of the DevStack configuration on the
-compute node.
-
-::
-
-        # Services that a compute node runs
-        ENABLED_SERVICES=n-cpu,rabbit,q-agt
-
-        ## Neutron options
-        Q_USE_SECGROUP=True
-        ENABLE_TENANT_VLANS=True
-        TENANT_VLAN_RANGE=3001:4000
-        PHYSICAL_NETWORK=default
-        OVS_PHYSICAL_BRIDGE=br-ex
-        PUBLIC_INTERFACE=eth1
-        Q_USE_PROVIDER_NETWORKING=True
-        Q_L3_ENABLED=False
-
-When DevStack is configured to use provider networking (via
-`Q_USE_PROVIDER_NETWORKING` is True and `Q_L3_ENABLED` is False) -
-DevStack will automatically add the network interface defined in
-`PUBLIC_INTERFACE` to the `OVS_PHYSICAL_BRIDGE`
-
-For example, with the above  configuration, a bridge is
-created, named `br-ex` which is managed by Open vSwitch, and the
-second interface on the compute node, `eth1` is attached to the
-bridge, to forward traffic sent by guest VMs.
diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst
index 236ece9..a01c368 100644
--- a/doc/source/guides/single-machine.rst
+++ b/doc/source/guides/single-machine.rst
@@ -105,7 +105,7 @@
     FIXED_NETWORK_SIZE=256
     FLAT_INTERFACE=eth0
     ADMIN_PASSWORD=supersecret
-    MYSQL_PASSWORD=iheartdatabases
+    DATABASE_PASSWORD=iheartdatabases
     RABBIT_PASSWORD=flopsymopsy
     SERVICE_PASSWORD=iheartksl
     SERVICE_TOKEN=xyzpdqlazydog
diff --git a/doc/source/guides/single-vm.rst b/doc/source/guides/single-vm.rst
index 515cd50..53c3fa9 100644
--- a/doc/source/guides/single-vm.rst
+++ b/doc/source/guides/single-vm.rst
@@ -64,7 +64,7 @@
             cd devstack
             echo '[[local|localrc]]' > local.conf
             echo ADMIN_PASSWORD=password >> local.conf
-            echo MYSQL_PASSWORD=password >> local.conf
+            echo DATABASE_PASSWORD=password >> local.conf
             echo RABBIT_PASSWORD=password >> local.conf
             echo SERVICE_PASSWORD=password >> local.conf
             echo SERVICE_TOKEN=tokentoken >> local.conf
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 99e96b1..4a1d93d 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -20,7 +20,7 @@
 
 #. Select a Linux Distribution
 
-   Only Ubuntu 14.04 (Trusty), Fedora 21 (or Fedora 22) and CentOS/RHEL
+   Only Ubuntu 14.04 (Trusty), Fedora 22 (or Fedora 23) and CentOS/RHEL
    7 are documented here. OpenStack also runs and is packaged on other
    flavors of Linux such as OpenSUSE and Debian.
 
@@ -44,6 +44,18 @@
 
    We recommend at least a :ref:`minimal-configuration` be set up.
 
+#. Add Stack User
+
+   Devstack should be run as a non-root user with sudo enabled
+   (standard logins to cloud images such as "ubuntu" or "cloud-user"
+   are usually fine).
+
+   You can quickly create a separate `stack` user to run DevStack with
+
+   ::
+
+       devstack/tools/create-stack-user.sh; su stack
+
 #. Start the install
 
    ::
@@ -162,7 +174,6 @@
 * `lib/heat <lib/heat.html>`__
 * `lib/horizon <lib/horizon.html>`__
 * `lib/infra <lib/infra.html>`__
-* `lib/ironic <lib/ironic.html>`__
 * `lib/keystone <lib/keystone.html>`__
 * `lib/ldap <lib/ldap.html>`__
 * `lib/neutron-legacy <lib/neutron-legacy.html>`__
@@ -177,7 +188,6 @@
 * `clean.sh <clean.sh.html>`__
 * `run\_tests.sh <run_tests.sh.html>`__
 
-* `extras.d/50-ironic.sh <extras.d/50-ironic.sh.html>`__
 * `extras.d/60-ceph.sh <extras.d/60-ceph.sh.html>`__
 * `extras.d/70-tuskar.sh <extras.d/70-tuskar.sh.html>`__
 * `extras.d/80-tempest.sh <extras.d/80-tempest.sh.html>`__
@@ -206,7 +216,6 @@
 
 * `tools/build\_docs.sh <tools/build_docs.sh.html>`__
 * `tools/build\_venv.sh <tools/build_venv.sh.html>`__
-* `tools/build\_wheels.sh <tools/build_wheels.sh.html>`__
 * `tools/create-stack-user.sh <tools/create-stack-user.sh.html>`__
 * `tools/create\_userrc.sh <tools/create_userrc.sh.html>`__
 * `tools/fixup\_stuff.sh <tools/fixup_stuff.sh.html>`__
diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst
index 85fd7cc..b96883a 100644
--- a/doc/source/plugin-registry.rst
+++ b/doc/source/plugin-registry.rst
@@ -22,10 +22,18 @@
 +------------------+---------------------------------------------+--------------------+
 |aodh              |git://git.openstack.org/openstack/aodh       | alarming           |
 +------------------+---------------------------------------------+--------------------+
+|barbican          |git://git.openstack.org/openstack/barbican   | key management     |
++------------------+---------------------------------------------+--------------------+
 |ceilometer        |git://git.openstack.org/openstack/ceilometer | metering           |
 +------------------+---------------------------------------------+--------------------+
+|congress          |git://git.openstack.org/openstack/congress   | governance         |
++------------------+---------------------------------------------+--------------------+
+|cue               |git://git.openstack.org/openstack/cue        | message-broker     |
++------------------+---------------------------------------------+--------------------+
 |gnocchi           |git://git.openstack.org/openstack/gnocchi    | metric             |
 +------------------+---------------------------------------------+--------------------+
+|ironic            |git://git.openstack.org/openstack/ironic     | baremetal          |
++------------------+---------------------------------------------+--------------------+
 |magnum            |git://git.openstack.org/openstack/magnum     |                    |
 +------------------+---------------------------------------------+--------------------+
 |manila            |git://git.openstack.org/openstack/manila     | file shares        |
@@ -66,7 +74,7 @@
 | Plugin Name | URL                                                        | Comments   |
 |             |                                                            |            |
 +-------------+------------------------------------------------------------+------------+
-|glusterfs    |git://git.openstack.org/stackforge/devstack-plugin-glusterfs|            |
+|glusterfs    |git://git.openstack.org/openstack/devstack-plugin-glusterfs |            |
 +-------------+------------------------------------------------------------+------------+
 |             |                                                            |            |
 +-------------+------------------------------------------------------------+------------+
@@ -74,16 +82,30 @@
 Additional Services
 ===================
 
-+----------------+--------------------------------------------------+------------+
-| Plugin Name    | URL                                              | Comments   |
-|                |                                                  |            |
-+----------------+--------------------------------------------------+------------+
-|ec2-api         |git://git.openstack.org/stackforge/ec2api         |[as1]_      |
-+----------------+--------------------------------------------------+------------+
-|ironic-inspector|git://git.openstack.org/openstack/ironic-inspector|            |
-+----------------+--------------------------------------------------+------------+
-|                |                                                  |            |
-+----------------+--------------------------------------------------+------------+
++-----------------+------------------------------------------------------------+------------+
+| Plugin Name     | URL                                                        | Comments   |
+|                 |                                                            |            |
++-----------------+------------------------------------------------------------+------------+
+|amqp1            |git://git.openstack.org/openstack/devstack-plugin-amqp1     |            |
++-----------------+------------------------------------------------------------+------------+
+|bdd              |git://git.openstack.org/openstack/devstack-plugin-bdd       |            |
++-----------------+------------------------------------------------------------+------------+
+|ec2-api          |git://git.openstack.org/openstack/ec2-api                   |[as1]_      |
++-----------------+------------------------------------------------------------+------------+
+|glusterfs        |git://git.openstack.org/openstack/devstack-plugin-glusterfs |            |
++-----------------+------------------------------------------------------------+------------+
+|hdfs             |git://git.openstack.org/openstack/devstack-plugin-hdfs      |            |
++-----------------+------------------------------------------------------------+------------+
+|ironic-inspector |git://git.openstack.org/openstack/ironic-inspector          |            |
++-----------------+------------------------------------------------------------+------------+
+|pika             |git://git.openstack.org/openstack/devstack-plugin-pika      |            |
++-----------------+------------------------------------------------------------+------------+
+|sheepdog         |git://git.openstack.org/openstack/devstack-plugin-sheepdog  |            |
++-----------------+------------------------------------------------------------+------------+
+|zmq              |git://git.openstack.org/openstack/devstack-plugin-zmq       |            |
++-----------------+------------------------------------------------------------+------------+
+|                 |                                                            |            |
++-----------------+------------------------------------------------------------+------------+
 
 .. [as1] first functional devstack plugin, hence why used in most of
          the examples.
diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst
index fda601b..83e5609 100644
--- a/doc/source/plugins.rst
+++ b/doc/source/plugins.rst
@@ -19,7 +19,16 @@
 external repositories. The plugin interface assumes the following:
 
 An external git repository that includes a ``devstack/`` top level
-directory. Inside this directory there can be 2 files.
+directory. Inside this directory there can be 3 files.
+
+- ``override-defaults`` - a file containing global variables that
+  will be sourced before the lib/* files. This allows the plugin
+  to override the defaults that are otherwise set in the lib/*
+  files.
+
+  For example, override-defaults may export CINDER_ENABLED_BACKENDS
+  to include the plugin-specific storage backend and thus be able
+  to override the default lvm only storage backend for Cinder.
 
 - ``settings`` - a file containing global variables that will be
   sourced very early in the process. This is helpful if other plugins
@@ -38,7 +47,7 @@
 
 - ``plugin.sh`` - the actual plugin. It is executed by devstack at
   well defined points during a ``stack.sh`` run. The plugin.sh
-  internal structure is discussed bellow.
+  internal structure is discussed below.
 
 
 Plugins are registered by adding the following to the localrc section
@@ -56,7 +65,7 @@
 
 An example would be as follows::
 
-  enable_plugin ec2api git://git.openstack.org/stackforge/ec2api
+  enable_plugin ec2-api git://git.openstack.org/openstack/ec2-api
 
 plugin.sh contract
 ==================
@@ -202,13 +211,12 @@
 For everyday use, DevStack plugins can exist in any git tree that's
 accessible on the internet. However, when using DevStack plugins in
 the OpenStack gate, they must live in projects in OpenStack's
-gerrit. Both ``openstack`` namespace and ``stackforge`` namespace are
-fine. This allows testing of the plugin as well as provides network
+gerrit. This allows testing of the plugin as well as provides network
 isolation against upstream git repository failures (which we see often
 enough to be an issue).
 
 Ideally a plugin will be included within the ``devstack`` directory of
-the project they are being tested. For example, the stackforge/ec2-api
+the project they are being tested. For example, the openstack/ec2-api
 project has its plugin support in its own tree.
 
 However, some times a DevStack plugin might be used solely to
@@ -218,7 +226,7 @@
 integration of SDN controllers (e.g. ovn, OpenDayLight), or
 integration of alternate RPC systems (e.g. zmq, qpid). In these cases
 the best practice is to build a dedicated
-``stackforge/devstack-plugin-FOO`` project.
+``openstack/devstack-plugin-FOO`` project.
 
 To enable a plugin to be used in a gate job, the following lines will
 be needed in your ``jenkins/jobs/<project>.yaml`` definition in
@@ -228,12 +236,12 @@
   # Because we are testing a non standard project, add the
   # our project repository. This makes zuul do the right
   # reference magic for testing changes.
-  export PROJECTS="stackforge/ec2-api $PROJECTS"
+  export PROJECTS="openstack/ec2-api $PROJECTS"
 
   # note the actual url here is somewhat irrelevant because it
   # caches in nodepool, however make it a valid url for
   # documentation purposes.
-  export DEVSTACK_LOCAL_CONFIG="enable_plugin ec2-api git://git.openstack.org/stackforge/ec2-api"
+  export DEVSTACK_LOCAL_CONFIG="enable_plugin ec2-api git://git.openstack.org/openstack/ec2-api"
 
 See Also
 ========
diff --git a/exercises/client-args.sh b/exercises/client-args.sh
index c33ef44..7cfef1c 100755
--- a/exercises/client-args.sh
+++ b/exercises/client-args.sh
@@ -41,7 +41,6 @@
 unset NOVA_REGION_NAME
 unset NOVA_URL
 unset NOVA_USERNAME
-unset NOVA_VERSION
 
 # Save the known variables for later
 export x_TENANT_NAME=$OS_TENANT_NAME
diff --git a/exercises/client-env.sh b/exercises/client-env.sh
index 4a0609a..1d2f4f5 100755
--- a/exercises/client-env.sh
+++ b/exercises/client-env.sh
@@ -41,7 +41,6 @@
 unset NOVA_REGION_NAME
 unset NOVA_URL
 unset NOVA_USERNAME
-unset NOVA_VERSION
 
 for i in OS_TENANT_NAME OS_USERNAME OS_PASSWORD OS_AUTH_URL; do
     is_set $i
@@ -101,9 +100,6 @@
             STATUS_EC2="Failed"
             RETURN=1
         fi
-
-        # Clean up side effects
-        unset NOVA_VERSION
     fi
 fi
 
diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh
index a0de4cc..9bcb766 100755
--- a/exercises/neutron-adv-test.sh
+++ b/exercises/neutron-adv-test.sh
@@ -122,41 +122,47 @@
 }
 
 function get_image_id {
-    local IMAGE_ID=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
+    local IMAGE_ID
+    IMAGE_ID=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1)
     die_if_not_set $LINENO IMAGE_ID "Failure retrieving IMAGE_ID"
     echo "$IMAGE_ID"
 }
 
 function get_tenant_id {
     local TENANT_NAME=$1
-    local TENANT_ID=`openstack project list | grep " $TENANT_NAME " | head -n 1 | get_field 1`
+    local TENANT_ID
+    TENANT_ID=`openstack project list | grep " $TENANT_NAME " | head -n 1 | get_field 1`
     die_if_not_set $LINENO TENANT_ID "Failure retrieving TENANT_ID for $TENANT_NAME"
     echo "$TENANT_ID"
 }
 
 function get_user_id {
     local USER_NAME=$1
-    local USER_ID=`openstack user list | grep $USER_NAME | awk '{print $2}'`
+    local USER_ID
+    USER_ID=`openstack user list | grep $USER_NAME | awk '{print $2}'`
     die_if_not_set $LINENO USER_ID "Failure retrieving USER_ID for $USER_NAME"
     echo "$USER_ID"
 }
 
 function get_role_id {
     local ROLE_NAME=$1
-    local ROLE_ID=`openstack role list | grep $ROLE_NAME | awk '{print $2}'`
+    local ROLE_ID
+    ROLE_ID=`openstack role list | grep $ROLE_NAME | awk '{print $2}'`
     die_if_not_set $LINENO ROLE_ID "Failure retrieving ROLE_ID for $ROLE_NAME"
     echo "$ROLE_ID"
 }
 
 function get_network_id {
     local NETWORK_NAME="$1"
-    local NETWORK_ID=`neutron net-list -F id  -- --name=$NETWORK_NAME | awk "NR==4" | awk '{print $2}'`
+    local NETWORK_ID
+    NETWORK_ID=`neutron net-list -F id  -- --name=$NETWORK_NAME | awk "NR==4" | awk '{print $2}'`
     echo $NETWORK_ID
 }
 
 function get_flavor_id {
     local INSTANCE_TYPE=$1
-    local FLAVOR_ID=`nova flavor-list | grep $INSTANCE_TYPE | awk '{print $2}'`
+    local FLAVOR_ID
+    FLAVOR_ID=`nova flavor-list | grep $INSTANCE_TYPE | awk '{print $2}'`
     die_if_not_set $LINENO FLAVOR_ID "Failure retrieving FLAVOR_ID for $INSTANCE_TYPE"
     echo "$FLAVOR_ID"
 }
@@ -185,13 +191,15 @@
 
 function remove_tenant {
     local TENANT=$1
-    local TENANT_ID=$(get_tenant_id $TENANT)
+    local TENANT_ID
+    TENANT_ID=$(get_tenant_id $TENANT)
     openstack project delete $TENANT_ID
 }
 
 function remove_user {
     local USER=$1
-    local USER_ID=$(get_user_id $USER)
+    local USER_ID
+    USER_ID=$(get_user_id $USER)
     openstack user delete $USER_ID
 }
 
@@ -221,11 +229,13 @@
     local NET_NAME="${TENANT}-net$NUM"
     local ROUTER_NAME="${TENANT}-router${NUM}"
     source $TOP_DIR/openrc admin admin
-    local TENANT_ID=$(get_tenant_id $TENANT)
+    local TENANT_ID
+    TENANT_ID=$(get_tenant_id $TENANT)
     source $TOP_DIR/openrc $TENANT $TENANT
-    local NET_ID=$(neutron net-create --tenant-id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' )
+    local NET_ID
+    NET_ID=$(neutron net-create --tenant-id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' )
     die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $TENANT_ID $NET_NAME $EXTRA"
-    neutron subnet-create --ip-version 4 --tenant-id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR
+    neutron subnet-create --ip-version 4 --tenant-id $TENANT_ID --gateway $GATEWAY --subnetpool None $NET_ID $CIDR
     neutron_debug_admin probe-create --device-owner compute $NET_ID
     source $TOP_DIR/openrc demo demo
 }
@@ -251,7 +261,8 @@
     done
     #TODO (nati) Add multi-nic test
     #TODO (nati) Add public-net test
-    local VM_UUID=`nova boot --flavor $(get_flavor_id m1.tiny) \
+    local VM_UUID
+    VM_UUID=`nova boot --flavor $(get_flavor_id m1.tiny) \
         --image $(get_image_id) \
         $NIC \
         $TENANT-server$NUM | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'`
@@ -309,7 +320,8 @@
     local NUM=$2
     local NET_NAME="${TENANT}-net$NUM"
     source $TOP_DIR/openrc admin admin
-    local TENANT_ID=$(get_tenant_id $TENANT)
+    local TENANT_ID
+    TENANT_ID=$(get_tenant_id $TENANT)
     #TODO(nati) comment out until l3-agent merged
     #for res in port subnet net router;do
     for net_id in `neutron net-list -c id -c name | grep $NET_NAME | awk '{print $2}'`;do
diff --git a/exercises/swift.sh b/exercises/swift.sh
index afcede8..4a41e0f 100755
--- a/exercises/swift.sh
+++ b/exercises/swift.sh
@@ -2,7 +2,7 @@
 
 # **swift.sh**
 
-# Test swift via the ``swift`` command line from ``python-swiftclient``
+# Test swift via the ``python-openstackclient`` command line
 
 echo "*********************************************************************"
 echo "Begin DevStack Exercise: $0"
@@ -39,26 +39,29 @@
 
 # Container name
 CONTAINER=ex-swift
+OBJECT=/etc/issue
 
 
 # Testing Swift
 # =============
 
 # Check if we have to swift via keystone
-swift stat || die $LINENO "Failure getting status"
+openstack object store account show || die $LINENO "Failure getting account status"
 
 # We start by creating a test container
 openstack container create $CONTAINER || die $LINENO "Failure creating container $CONTAINER"
 
-# add some files into it.
-openstack object create $CONTAINER /etc/issue || die $LINENO "Failure uploading file to container $CONTAINER"
+# add a file into it.
+openstack object create $CONTAINER $OBJECT || die $LINENO "Failure uploading file to container $CONTAINER"
 
-# list them
+# list the objects
 openstack object list $CONTAINER || die $LINENO "Failure listing contents of container $CONTAINER"
 
-# And we may want to delete them now that we have tested that
-# everything works.
-swift delete $CONTAINER || die $LINENO "Failure deleting container $CONTAINER"
+# delete the object first
+openstack object delete $CONTAINER $OBJECT || die $LINENO "Failure deleting object $OBJECT in container $CONTAINER"
+
+# delete the container
+openstack container delete $CONTAINER || die $LINENO "Failure deleting container $CONTAINER"
 
 set +o xtrace
 echo "*********************************************************************"
diff --git a/extras.d/50-ironic.sh b/extras.d/50-ironic.sh
deleted file mode 100644
index 3b8e3d5..0000000
--- a/extras.d/50-ironic.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-# ironic.sh - Devstack extras script to install ironic
-
-if is_service_enabled ir-api ir-cond; then
-    if [[ "$1" == "source" ]]; then
-        # Initial source
-        source $TOP_DIR/lib/ironic
-    elif [[ "$1" == "stack" && "$2" == "install" ]]; then
-        echo_summary "Installing Ironic"
-        install_ironic
-        install_ironicclient
-        cleanup_ironic
-    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
-        echo_summary "Configuring Ironic"
-        configure_ironic
-
-        if is_service_enabled key; then
-            create_ironic_accounts
-        fi
-
-    elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
-        # Initialize ironic
-        init_ironic
-
-        # Start the ironic API and ironic taskmgr components
-        echo_summary "Starting Ironic"
-        start_ironic
-
-        if [[ "$IRONIC_BAREMETAL_BASIC_OPS" = "True" ]]; then
-            prepare_baremetal_basic_ops
-        fi
-    fi
-
-    if [[ "$1" == "unstack" ]]; then
-        stop_ironic
-        if [[ "$IRONIC_BAREMETAL_BASIC_OPS" = "True" ]]; then
-            cleanup_baremetal_basic_ops
-        fi
-    fi
-
-    if [[ "$1" == "clean" ]]; then
-        cleanup_ironic
-    fi
-fi
diff --git a/extras.d/60-ceph.sh b/extras.d/60-ceph.sh
index 38b901b..cc90128 100644
--- a/extras.d/60-ceph.sh
+++ b/extras.d/60-ceph.sh
@@ -32,7 +32,7 @@
             echo_summary "Configuring Cinder for Ceph"
             configure_ceph_cinder
         fi
-        if is_service_enabled cinder || is_service_enabled nova; then
+        if is_service_enabled n-cpu; then
             # NOTE (leseb): the part below is a requirement to attach Ceph block devices
             echo_summary "Configuring libvirt secret"
             import_libvirt_secret_ceph
diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh
index 74f4c60..5e8da99 100644
--- a/extras.d/80-tempest.sh
+++ b/extras.d/80-tempest.sh
@@ -13,7 +13,6 @@
     elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
         echo_summary "Initializing Tempest"
         configure_tempest
-        init_tempest
     elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then
         # local.conf Tempest option overrides
         :
diff --git a/extras.d/README.md b/extras.d/README.md
index 7c2e4fe..4cec14b 100644
--- a/extras.d/README.md
+++ b/extras.d/README.md
@@ -14,10 +14,13 @@
 entire `stack.sh` variable space is available.  The scripts are
 sourced with one or more arguments, the first of which defines the hook phase:
 
-    source | stack | unstack | clean
+    override_defaults | source | stack | unstack | clean
 
-    source: always called first in any of the scripts, used to set the
-        initial defaults in a lib/* script or similar
+    override_defaults: always called first in any of the scripts, used to
+        override defaults (if need be) that are otherwise set in lib/* scripts
+
+    source: called by stack.sh. Used to set the initial defaults in a lib/*
+        script or similar
 
     stack: called by stack.sh.  There are four possible values for
         the second arg to distinguish the phase stack.sh is in:
diff --git a/files/apache-heat-api-cfn.template b/files/apache-heat-api-cfn.template
new file mode 100644
index 0000000..ab33c66
--- /dev/null
+++ b/files/apache-heat-api-cfn.template
@@ -0,0 +1,27 @@
+Listen %PUBLICPORT%
+
+<VirtualHost *:%PUBLICPORT%>
+    WSGIDaemonProcess heat-api-cfn processes=2 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
+    WSGIProcessGroup heat-api-cfn
+    WSGIScriptAlias / %HEAT_BIN_DIR%/heat-wsgi-api-cfn
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+    AllowEncodedSlashes On
+    <IfVersion >= 2.4>
+      ErrorLogFormat "%{cu}t %M"
+    </IfVersion>
+    ErrorLog /var/log/%APACHE_NAME%/heat-api-cfn.log
+    %SSLENGINE%
+    %SSLCERTFILE%
+    %SSLKEYFILE%
+
+    <Directory %HEAT_BIN_DIR%>
+        <IfVersion >= 2.4>
+            Require all granted
+        </IfVersion>
+        <IfVersion < 2.4>
+            Order allow,deny
+            Allow from all
+        </IfVersion>
+    </Directory>
+</VirtualHost>
diff --git a/files/apache-heat-api-cloudwatch.template b/files/apache-heat-api-cloudwatch.template
new file mode 100644
index 0000000..06c91bb
--- /dev/null
+++ b/files/apache-heat-api-cloudwatch.template
@@ -0,0 +1,27 @@
+Listen %PUBLICPORT%
+
+<VirtualHost *:%PUBLICPORT%>
+    WSGIDaemonProcess heat-api-cloudwatch processes=2 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
+    WSGIProcessGroup heat-api-cloudwatch
+    WSGIScriptAlias / %HEAT_BIN_DIR%/heat-wsgi-api-cloudwatch
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+    AllowEncodedSlashes On
+    <IfVersion >= 2.4>
+      ErrorLogFormat "%{cu}t %M"
+    </IfVersion>
+    ErrorLog /var/log/%APACHE_NAME%/heat-api-cloudwatch.log
+    %SSLENGINE%
+    %SSLCERTFILE%
+    %SSLKEYFILE%
+
+    <Directory %HEAT_BIN_DIR%>
+        <IfVersion >= 2.4>
+            Require all granted
+        </IfVersion>
+        <IfVersion < 2.4>
+            Order allow,deny
+            Allow from all
+        </IfVersion>
+    </Directory>
+</VirtualHost>
diff --git a/files/apache-heat-api.template b/files/apache-heat-api.template
new file mode 100644
index 0000000..4924b39
--- /dev/null
+++ b/files/apache-heat-api.template
@@ -0,0 +1,27 @@
+Listen %PUBLICPORT%
+
+<VirtualHost *:%PUBLICPORT%>
+    WSGIDaemonProcess heat-api processes=3 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
+    WSGIProcessGroup heat-api
+    WSGIScriptAlias / %HEAT_BIN_DIR%/heat-wsgi-api
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+    AllowEncodedSlashes On
+    <IfVersion >= 2.4>
+      ErrorLogFormat "%{cu}t %M"
+    </IfVersion>
+    ErrorLog /var/log/%APACHE_NAME%/heat-api.log
+    %SSLENGINE%
+    %SSLCERTFILE%
+    %SSLKEYFILE%
+
+    <Directory %HEAT_BIN_DIR%>
+        <IfVersion >= 2.4>
+            Require all granted
+        </IfVersion>
+        <IfVersion < 2.4>
+            Order allow,deny
+            Allow from all
+        </IfVersion>
+    </Directory>
+</VirtualHost>
diff --git a/files/apache-ironic.template b/files/apache-ironic.template
deleted file mode 100644
index 8864194..0000000
--- a/files/apache-ironic.template
+++ /dev/null
@@ -1,12 +0,0 @@
-Listen %PUBLICPORT%
-
-<VirtualHost *:%PUBLICPORT%>
-    DocumentRoot "%HTTPROOT%"
-    <Directory "%HTTPROOT%">
-        Options Indexes FollowSymLinks
-        AllowOverride None
-        Order allow,deny
-        Allow from all
-        Require all granted
-    </Directory>
-</VirtualHost>
diff --git a/files/apache-keystone.template b/files/apache-keystone.template
index 4d3d2d6..f9fa265 100644
--- a/files/apache-keystone.template
+++ b/files/apache-keystone.template
@@ -2,6 +2,16 @@
 Listen %ADMINPORT%
 LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" keystone_combined
 
+<Directory %KEYSTONE_BIN%>
+    <IfVersion >= 2.4>
+        Require all granted
+    </IfVersion>
+    <IfVersion < 2.4>
+        Order allow,deny
+        Allow from all
+    </IfVersion>
+</Directory>
+
 <VirtualHost *:%PUBLICPORT%>
     WSGIDaemonProcess keystone-public processes=5 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
     WSGIProcessGroup keystone-public
@@ -16,16 +26,6 @@
     %SSLENGINE%
     %SSLCERTFILE%
     %SSLKEYFILE%
-
-    <Directory %KEYSTONE_BIN%>
-        <IfVersion >= 2.4>
-            Require all granted
-        </IfVersion>
-        <IfVersion < 2.4>
-            Order allow,deny
-            Allow from all
-        </IfVersion>
-    </Directory>
 </VirtualHost>
 
 <VirtualHost *:%ADMINPORT%>
@@ -42,19 +42,9 @@
     %SSLENGINE%
     %SSLCERTFILE%
     %SSLKEYFILE%
-
-    <Directory %KEYSTONE_BIN%>
-        <IfVersion >= 2.4>
-            Require all granted
-        </IfVersion>
-        <IfVersion < 2.4>
-            Order allow,deny
-            Allow from all
-        </IfVersion>
-    </Directory>
 </VirtualHost>
 
-Alias /identity %PUBLICWSGI%
+Alias /identity %KEYSTONE_BIN%/keystone-wsgi-public
 <Location /identity>
     SetHandler wsgi-script
     Options +ExecCGI
@@ -64,7 +54,7 @@
     WSGIPassAuthorization On
 </Location>
 
-Alias /identity_admin %ADMINWSGI%
+Alias /identity_admin %KEYSTONE_BIN%/keystone-wsgi-admin
 <Location /identity_admin>
     SetHandler wsgi-script
     Options +ExecCGI
diff --git a/files/apache-nova-api.template b/files/apache-nova-api.template
index 4908152..bcf406e 100644
--- a/files/apache-nova-api.template
+++ b/files/apache-nova-api.template
@@ -7,7 +7,7 @@
     WSGIApplicationGroup %{GLOBAL}
     WSGIPassAuthorization On
     <IfVersion >= 2.4>
-      ErrorLogFormat "%{cu}t %M"
+      ErrorLogFormat "%M"
     </IfVersion>
     ErrorLog /var/log/%APACHE_NAME%/nova-api.log
     %SSLENGINE%
diff --git a/files/apache-nova-ec2-api.template b/files/apache-nova-ec2-api.template
deleted file mode 100644
index 235d958..0000000
--- a/files/apache-nova-ec2-api.template
+++ /dev/null
@@ -1,16 +0,0 @@
-Listen %PUBLICPORT%
-
-<VirtualHost *:%PUBLICPORT%>
-    WSGIDaemonProcess nova-ec2-api processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
-    WSGIProcessGroup nova-ec2-api
-    WSGIScriptAlias / %PUBLICWSGI%
-    WSGIApplicationGroup %{GLOBAL}
-    WSGIPassAuthorization On
-    <IfVersion >= 2.4>
-      ErrorLogFormat "%{cu}t %M"
-    </IfVersion>
-    ErrorLog /var/log/%APACHE_NAME%/nova-ec2-api.log
-    %SSLENGINE%
-    %SSLCERTFILE%
-    %SSLKEYFILE%
-</VirtualHost>
diff --git a/files/apache-nova-metadata.template b/files/apache-nova-metadata.template
new file mode 100644
index 0000000..6231c1c
--- /dev/null
+++ b/files/apache-nova-metadata.template
@@ -0,0 +1,25 @@
+Listen %PUBLICPORT%
+
+<VirtualHost *:%PUBLICPORT%>
+    WSGIDaemonProcess nova-metadata processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
+    WSGIProcessGroup nova-metadata
+    WSGIScriptAlias / %PUBLICWSGI%
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+    <IfVersion >= 2.4>
+      ErrorLogFormat "%M"
+    </IfVersion>
+    ErrorLog /var/log/%APACHE_NAME%/nova-metadata.log
+    %SSLENGINE%
+    %SSLCERTFILE%
+    %SSLKEYFILE%
+</VirtualHost>
+
+Alias /metadata %PUBLICWSGI%
+<Location /metadata>
+    SetHandler wsgi-script
+    Options +ExecCGI
+    WSGIProcessGroup nova-metadata
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+</Location>
diff --git a/files/debs/ceilometer-collector b/files/debs/ceilometer-collector
index f1b692a..d1e9eef 100644
--- a/files/debs/ceilometer-collector
+++ b/files/debs/ceilometer-collector
@@ -1,6 +1,3 @@
-python-pymongo #NOPRIME
-mongodb-server #NOPRIME
 libnspr4-dev
-pkg-config
-libxml2-dev
-libxslt-dev
\ No newline at end of file
+mongodb-server #NOPRIME
+python-pymongo #NOPRIME
diff --git a/files/debs/cinder b/files/debs/cinder
index 51908eb..3595e01 100644
--- a/files/debs/cinder
+++ b/files/debs/cinder
@@ -1,6 +1,5 @@
-tgt # NOPRIME
 lvm2
-qemu-utils
-libpq-dev
 open-iscsi
 open-iscsi-utils # Deprecated since quantal dist:precise
+qemu-utils
+tgt # NOPRIME
diff --git a/files/debs/devlibs b/files/debs/devlibs
deleted file mode 100644
index 0446ceb..0000000
--- a/files/debs/devlibs
+++ /dev/null
@@ -1,7 +0,0 @@
-libffi-dev  # pyOpenSSL
-libmysqlclient-dev  # MySQL-python
-libpq-dev  # psycopg2
-libssl-dev  # pyOpenSSL
-libxml2-dev  # lxml
-libxslt1-dev  # lxml
-python-dev  # pyOpenSSL
diff --git a/files/debs/general b/files/debs/general
index 1460526..1215147 100644
--- a/files/debs/general
+++ b/files/debs/general
@@ -1,27 +1,33 @@
+bc
 bridge-utils
-screen
-unzip
-wget
-psmisc
-gcc
+curl
 g++
+gcc
+gettext  # used for compiling message catalogs
 git
 graphviz # needed for docs
+iputils-ping
+libffi-dev # for pyOpenSSL
+libjpeg-dev # Pillow 3.0.0
+libmysqlclient-dev  # MySQL-python
+libpq-dev  # psycopg2
+libssl-dev # for pyOpenSSL
+libxml2-dev  # lxml
+libxslt1-dev  # lxml
+libyaml-dev
 lsof # useful when debugging
+openjdk-7-jre-headless  # NOPRIME
 openssh-server
 openssl
-iputils-ping
-wget
-curl
-tcpdump
-tar
-python-dev
-python2.7
-python-gdbm # needed for testr
-bc
-libyaml-dev
-libffi-dev
-libssl-dev # for pyOpenSSL
-gettext  # used for compiling message catalogs
-openjdk-7-jre-headless  # NOPRIME
 pkg-config
+psmisc
+python2.7
+python-dev
+python-gdbm # needed for testr
+screen
+tar
+tcpdump
+unzip
+wget
+wget
+zlib1g-dev
diff --git a/files/debs/glance b/files/debs/glance
deleted file mode 100644
index 37877a8..0000000
--- a/files/debs/glance
+++ /dev/null
@@ -1,6 +0,0 @@
-libmysqlclient-dev
-libpq-dev
-libssl-dev
-libxml2-dev
-libxslt1-dev
-zlib1g-dev
diff --git a/files/debs/ironic b/files/debs/ironic
deleted file mode 100644
index 0a906db..0000000
--- a/files/debs/ironic
+++ /dev/null
@@ -1,19 +0,0 @@
-docker.io
-ipmitool
-iptables
-ipxe
-libguestfs0
-libvirt-bin
-open-iscsi
-openssh-client
-openvswitch-switch
-openvswitch-datapath-dkms
-python-libguestfs
-python-libvirt
-qemu
-qemu-kvm
-qemu-utils
-sgabios
-syslinux
-tftpd-hpa
-xinetd
diff --git a/files/debs/keystone b/files/debs/keystone
index 70a5649..370e4aa 100644
--- a/files/debs/keystone
+++ b/files/debs/keystone
@@ -1,7 +1,5 @@
-python-lxml
-sqlite3
-python-mysqldb
-python-mysql.connector
+libkrb5-dev
 libldap2-dev
 libsasl2-dev
-libkrb5-dev
+python-mysqldb
+sqlite3
diff --git a/files/debs/ldap b/files/debs/ldap
index 26f7aef..aa3a934 100644
--- a/files/debs/ldap
+++ b/files/debs/ldap
@@ -1,3 +1,3 @@
 ldap-utils
-slapd
 python-ldap
+slapd
diff --git a/files/debs/n-cpu b/files/debs/n-cpu
index ffc947a..0da57ee 100644
--- a/files/debs/n-cpu
+++ b/files/debs/n-cpu
@@ -1,8 +1,8 @@
-qemu-utils
+cryptsetup
+genisoimage
 lvm2 # NOPRIME
 open-iscsi
-genisoimage
-sysfsutils
-sg3-utils
 python-guestfs # NOPRIME
-cryptsetup
+qemu-utils
+sg3-utils
+sysfsutils
diff --git a/files/debs/n-novnc b/files/debs/n-novnc
deleted file mode 100644
index c8722b9..0000000
--- a/files/debs/n-novnc
+++ /dev/null
@@ -1 +0,0 @@
-python-numpy
diff --git a/files/debs/neutron b/files/debs/neutron
index b5a457e..e53cc68 100644
--- a/files/debs/neutron
+++ b/files/debs/neutron
@@ -1,18 +1,17 @@
 acl
-ebtables
-iptables
-iputils-ping
-iputils-arping
-libmysqlclient-dev
-mysql-server #NOPRIME
-sudo
-postgresql-server-dev-all
-python-mysqldb
-python-mysql.connector
 dnsmasq-base
 dnsmasq-utils # for dhcp_release only available in dist:precise
+ebtables
+iptables
+iputils-arping
+iputils-ping
+libmysqlclient-dev
+mysql-server #NOPRIME
+postgresql-server-dev-all
+python-mysqldb
 rabbitmq-server # NOPRIME
-sqlite3
-vlan
 radvd # NOPRIME
+sqlite3
+sudo
 uuid-runtime
+vlan
diff --git a/files/debs/nova b/files/debs/nova
index 346b8b3..58dad41 100644
--- a/files/debs/nova
+++ b/files/debs/nova
@@ -1,31 +1,25 @@
+conntrack
+curl
 dnsmasq-base
 dnsmasq-utils # for dhcp_release
-conntrack
-kpartx
-parted
-iputils-arping
-libmysqlclient-dev
-mysql-server # NOPRIME
-python-mysqldb
-python-mysql.connector
-python-lxml # needed for glance which is needed for nova --- this shouldn't be here
-gawk
-iptables
 ebtables
-sqlite3
-sudo
-qemu-kvm # NOPRIME
-qemu # dist:wheezy,jessie NOPRIME
+gawk
+genisoimage # required for config_drive
+iptables
+iputils-arping
+kpartx
+libjs-jquery-tablesorter # Needed for coverage html reports
+libmysqlclient-dev
 libvirt-bin # NOPRIME
 libvirt-dev # NOPRIME
+mysql-server # NOPRIME
+parted
 pm-utils
-libjs-jquery-tablesorter # Needed for coverage html reports
-vlan
-curl
-genisoimage # required for config_drive
+python-mysqldb
+qemu # dist:wheezy,jessie NOPRIME
+qemu-kvm # NOPRIME
 rabbitmq-server # NOPRIME
 socat # used by ajaxterm
-python-libvirt # NOPRIME
-python-libxml2
-python-numpy # used by websockify for spice console
-python-m2crypto
+sqlite3
+sudo
+vlan
diff --git a/files/debs/swift b/files/debs/swift
index 726786e..4b8ac3d 100644
--- a/files/debs/swift
+++ b/files/debs/swift
@@ -1,4 +1,5 @@
 curl
+liberasurecode-dev
 make
 memcached
 sqlite3
diff --git a/files/debs/tempest b/files/debs/tempest
deleted file mode 100644
index bb09529..0000000
--- a/files/debs/tempest
+++ /dev/null
@@ -1,2 +0,0 @@
-libxml2-dev
-libxslt1-dev
diff --git a/files/debs/trove b/files/debs/trove
deleted file mode 100644
index 96f8f29..0000000
--- a/files/debs/trove
+++ /dev/null
@@ -1 +0,0 @@
-libxslt1-dev
diff --git a/files/debs/zookeeper b/files/debs/zookeeper
new file mode 100644
index 0000000..f41b559
--- /dev/null
+++ b/files/debs/zookeeper
@@ -0,0 +1 @@
+zookeeperd
diff --git a/files/ebtables.workaround b/files/ebtables.workaround
new file mode 100644
index 0000000..c8af51f
--- /dev/null
+++ b/files/ebtables.workaround
@@ -0,0 +1,23 @@
+#!/bin/bash
+#
+# Copyright 2015 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+#
+# This is a terrible, terrible, truly terrible work around for
+# environments that have libvirt < 1.2.11. ebtables requires that you
+# specifically tell it you would like to not race and get punched in
+# the face when 2 run at the same time with a --concurrent flag.
+
+flock -w 300 /var/lock/ebtables.nova /sbin/ebtables.real $@
diff --git a/files/rpms-suse/ceilometer-collector b/files/rpms-suse/ceilometer-collector
index 5e4dfcc..fc75ffa 100644
--- a/files/rpms-suse/ceilometer-collector
+++ b/files/rpms-suse/ceilometer-collector
@@ -1,3 +1,3 @@
-# Not available in openSUSE main repositories, but can be fetched from OBS
 # (devel:languages:python and server:database projects)
 mongodb
+# Not available in openSUSE main repositories, but can be fetched from OBS
diff --git a/files/rpms-suse/ceph b/files/rpms-suse/ceph
index 8d46500..8c4955d 100644
--- a/files/rpms-suse/ceph
+++ b/files/rpms-suse/ceph
@@ -1,3 +1,3 @@
 ceph    # NOPRIME
-xfsprogs
 lsb
+xfsprogs
diff --git a/files/rpms-suse/cinder b/files/rpms-suse/cinder
index 3fd03cc..189a232 100644
--- a/files/rpms-suse/cinder
+++ b/files/rpms-suse/cinder
@@ -1,6 +1,4 @@
 lvm2
-tgt # NOPRIME
-qemu-tools
-python-devel
-postgresql-devel
 open-iscsi
+qemu-tools
+tgt # NOPRIME
diff --git a/files/rpms-suse/devlibs b/files/rpms-suse/devlibs
deleted file mode 100644
index 54d13a3..0000000
--- a/files/rpms-suse/devlibs
+++ /dev/null
@@ -1,6 +0,0 @@
-libffi-devel  # pyOpenSSL
-libopenssl-devel  # pyOpenSSL
-libxslt-devel  # lxml
-postgresql-devel  # psycopg2
-libmysqlclient-devel # MySQL-python
-python-devel  # pyOpenSSL
diff --git a/files/rpms-suse/general b/files/rpms-suse/general
index 42756d8..34a2955 100644
--- a/files/rpms-suse/general
+++ b/files/rpms-suse/general
@@ -8,16 +8,23 @@
 git-core
 graphviz # docs
 iputils
+libffi-devel  # pyOpenSSL
+libjpeg8-devel # Pillow 3.0.0
+libmysqlclient-devel # MySQL-python
 libopenssl-devel # to rebuild pyOpenSSL if needed
+libxslt-devel  # lxml
 lsof # useful when debugging
 make
+net-tools
 openssh
 openssl
+postgresql-devel  # psycopg2
 psmisc
 python-cmd2 # dist:opensuse-12.3
+python-devel  # pyOpenSSL
 screen
 tar
 tcpdump
 unzip
 wget
-net-tools
+zlib-devel
diff --git a/files/rpms-suse/glance b/files/rpms-suse/glance
deleted file mode 100644
index bf512de..0000000
--- a/files/rpms-suse/glance
+++ /dev/null
@@ -1 +0,0 @@
-python-devel
diff --git a/files/rpms-suse/horizon b/files/rpms-suse/horizon
index 77f7c34..753ea76 100644
--- a/files/rpms-suse/horizon
+++ b/files/rpms-suse/horizon
@@ -1,2 +1,2 @@
-apache2  # NOPRIME
 apache2-mod_wsgi  # NOPRIME
+apache2  # NOPRIME
diff --git a/files/rpms-suse/keystone b/files/rpms-suse/keystone
index c838b41..46832c7 100644
--- a/files/rpms-suse/keystone
+++ b/files/rpms-suse/keystone
@@ -1,4 +1,3 @@
 cyrus-sasl-devel
 openldap2-devel
-python-devel
 sqlite3
diff --git a/files/rpms-suse/n-api b/files/rpms-suse/n-api
index 6f59e60..af5ac2f 100644
--- a/files/rpms-suse/n-api
+++ b/files/rpms-suse/n-api
@@ -1,2 +1,2 @@
-python-dateutil
 fping
+python-dateutil
diff --git a/files/rpms-suse/n-cpu b/files/rpms-suse/n-cpu
index b3a468d..29bd31b 100644
--- a/files/rpms-suse/n-cpu
+++ b/files/rpms-suse/n-cpu
@@ -1,7 +1,7 @@
-# Stuff for diablo volumes
+cryptsetup
 genisoimage
 lvm2
 open-iscsi
-sysfsutils
 sg3_utils
-cryptsetup
+# Stuff for diablo volumes
+sysfsutils
diff --git a/files/rpms-suse/neutron b/files/rpms-suse/neutron
index 1339799..e9abc6e 100644
--- a/files/rpms-suse/neutron
+++ b/files/rpms-suse/neutron
@@ -5,9 +5,8 @@
 iptables
 iputils
 mariadb # NOPRIME
-postgresql-devel
 rabbitmq-server # NOPRIME
+radvd # NOPRIME
 sqlite3
 sudo
 vlan
-radvd # NOPRIME
diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova
index 039456f..ae115d2 100644
--- a/files/rpms-suse/nova
+++ b/files/rpms-suse/nova
@@ -1,7 +1,7 @@
+conntrack-tools
 curl
 dnsmasq
 dnsmasq-utils # dist:opensuse-12.3,opensuse-13.1
-conntrack-tools
 ebtables
 gawk
 genisoimage # required for config_drive
@@ -9,14 +9,13 @@
 iputils
 kpartx
 kvm # NOPRIME
-# qemu as fallback if kvm cannot be used
-qemu # NOPRIME
 libvirt # NOPRIME
 libvirt-python # NOPRIME
 mariadb # NOPRIME
 parted
 polkit
-python-devel
+# qemu as fallback if kvm cannot be used
+qemu # NOPRIME
 rabbitmq-server # NOPRIME
 socat
 sqlite3
diff --git a/files/rpms-suse/openvswitch b/files/rpms-suse/openvswitch
index edfb4d2..53f8bb2 100644
--- a/files/rpms-suse/openvswitch
+++ b/files/rpms-suse/openvswitch
@@ -1,3 +1,3 @@
+
 openvswitch
 openvswitch-switch
-
diff --git a/files/rpms-suse/swift b/files/rpms-suse/swift
index 6a824f9..3663b98 100644
--- a/files/rpms-suse/swift
+++ b/files/rpms-suse/swift
@@ -1,6 +1,6 @@
 curl
+liberasurecode-devel
 memcached
-python-devel
 sqlite3
 xfsprogs
 xinetd
diff --git a/files/rpms/ceilometer-collector b/files/rpms/ceilometer-collector
index b139ed2..a8b8118 100644
--- a/files/rpms/ceilometer-collector
+++ b/files/rpms/ceilometer-collector
@@ -1,3 +1,3 @@
-selinux-policy-targeted
-mongodb-server #NOPRIME
 mongodb # NOPRIME
+mongodb-server #NOPRIME
+selinux-policy-targeted
diff --git a/files/rpms/ceph b/files/rpms/ceph
index 5483735..64befc5 100644
--- a/files/rpms/ceph
+++ b/files/rpms/ceph
@@ -1,3 +1,3 @@
 ceph    # NOPRIME
-xfsprogs
 redhat-lsb-core
+xfsprogs
diff --git a/files/rpms/cinder b/files/rpms/cinder
index a88503b..0274642 100644
--- a/files/rpms/cinder
+++ b/files/rpms/cinder
@@ -1,5 +1,4 @@
-lvm2
-scsi-target-utils # NOPRIME
-qemu-img
-postgresql-devel
 iscsi-initiator-utils
+lvm2
+qemu-img
+scsi-target-utils # NOPRIME
diff --git a/files/rpms/devlibs b/files/rpms/devlibs
deleted file mode 100644
index 385ed3b..0000000
--- a/files/rpms/devlibs
+++ /dev/null
@@ -1,8 +0,0 @@
-libffi-devel  # pyOpenSSL
-libxml2-devel  # lxml
-libxslt-devel  # lxml
-mariadb-devel  # MySQL-python
-openssl-devel  # pyOpenSSL
-postgresql-devel  # psycopg2
-python-devel  # pyOpenSSL
-redhat-rpm-config # MySQL-python rhbz-1195207 f21
diff --git a/files/rpms/dstat b/files/rpms/dstat
index 8a8f8fe..2b643b8 100644
--- a/files/rpms/dstat
+++ b/files/rpms/dstat
@@ -1 +1 @@
-dstat
\ No newline at end of file
+dstat
diff --git a/files/rpms/general b/files/rpms/general
index c3f3de8..5bc87b6 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -1,31 +1,36 @@
+bc
 bridge-utils
 curl
 dbus
 euca2ools # only for testing client
 gcc
 gcc-c++
+gettext  # used for compiling message catalogs
 git-core
 graphviz # needed only for docs
+iptables-services  # NOPRIME f22,f23
+java-1.7.0-openjdk-headless  # NOPRIME rhel7
+java-1.8.0-openjdk-headless  # NOPRIME f22,f23
+libffi-devel
+libjpeg-turbo-devel # Pillow 3.0.0
+libxml2-devel # lxml
+libxslt-devel # lxml
+libyaml-devel
+mariadb-devel  # MySQL-python
+net-tools
 openssh-server
 openssl
 openssl-devel # to rebuild pyOpenSSL if needed
-libffi-devel
-libxml2-devel
-libxslt-devel
 pkgconfig
+postgresql-devel  # psycopg2
 psmisc
+pyOpenSSL # version in pip uses too much memory
 python-devel
+redhat-rpm-config # MySQL-python rhbz-1195207
 screen
 tar
 tcpdump
 unzip
 wget
 which
-bc
-libyaml-devel
-gettext  # used for compiling message catalogs
-net-tools
-java-1.7.0-openjdk-headless  # NOPRIME rhel7
-java-1.8.0-openjdk-headless  # NOPRIME f21,f22
-pyOpenSSL # version in pip uses too much memory
-iptables-services  # NOPRIME f21,f22
+zlib-devel
diff --git a/files/rpms/glance b/files/rpms/glance
deleted file mode 100644
index 479194f..0000000
--- a/files/rpms/glance
+++ /dev/null
@@ -1,6 +0,0 @@
-libxml2-devel
-libxslt-devel
-mysql-devel
-openssl-devel
-postgresql-devel
-zlib-devel
diff --git a/files/rpms/horizon b/files/rpms/horizon
index b2cf0de..aeb2cb5 100644
--- a/files/rpms/horizon
+++ b/files/rpms/horizon
@@ -1,5 +1,5 @@
 Django
 httpd # NOPRIME
 mod_wsgi  # NOPRIME
-pyxattr
 pcre-devel  # pyScss
+pyxattr
diff --git a/files/rpms/ironic b/files/rpms/ironic
deleted file mode 100644
index 2bf8bb3..0000000
--- a/files/rpms/ironic
+++ /dev/null
@@ -1,14 +0,0 @@
-docker-io
-ipmitool
-iptables
-ipxe-bootimgs
-libguestfs
-libvirt
-libvirt-python
-net-tools
-openssh-clients
-openvswitch
-sgabios
-syslinux
-tftp-server
-xinetd
diff --git a/files/rpms/keystone b/files/rpms/keystone
index 8074119..c01c261 100644
--- a/files/rpms/keystone
+++ b/files/rpms/keystone
@@ -1,4 +1,3 @@
-MySQL-python
-libxslt-devel
-sqlite
 mod_ssl
+MySQL-python
+sqlite
diff --git a/files/rpms/ldap b/files/rpms/ldap
index d89c4cf..d5b8fa4 100644
--- a/files/rpms/ldap
+++ b/files/rpms/ldap
@@ -1,2 +1,2 @@
-openldap-servers
 openldap-clients
+openldap-servers
diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu
index 81278b3..7773b04 100644
--- a/files/rpms/n-cpu
+++ b/files/rpms/n-cpu
@@ -1,7 +1,7 @@
-# Stuff for diablo volumes
+cryptsetup
+genisoimage
 iscsi-initiator-utils
 lvm2
-genisoimage
-sysfsutils
 sg3_utils
-cryptsetup
+# Stuff for diablo volumes
+sysfsutils
diff --git a/files/rpms/neutron b/files/rpms/neutron
index 29851be..2e49a0c 100644
--- a/files/rpms/neutron
+++ b/files/rpms/neutron
@@ -1,16 +1,14 @@
-MySQL-python
 acl
 dnsmasq # for q-dhcp
 dnsmasq-utils # for dhcp_release
 ebtables
 iptables
 iputils
-mysql-connector-python
 mysql-devel
+MySQL-python
 mysql-server # NOPRIME
 openvswitch # NOPRIME
-postgresql-devel
 rabbitmq-server # NOPRIME
+radvd # NOPRIME
 sqlite
 sudo
-radvd # NOPRIME
diff --git a/files/rpms/nova b/files/rpms/nova
index 6eeb623..0312e85 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -1,27 +1,27 @@
-MySQL-python
+conntrack-tools
 curl
 dnsmasq # for nova-network
 dnsmasq-utils # for dhcp_release
-conntrack-tools
 ebtables
 gawk
 genisoimage # required for config_drive
 iptables
 iputils
+kernel-modules # dist:f22,f23
 kpartx
 kvm # NOPRIME
-qemu-kvm # NOPRIME
 libvirt-bin # NOPRIME
 libvirt-devel # NOPRIME
 libvirt-python # NOPRIME
 libxml2-python
-numpy # needed by websockify for spice console
 m2crypto
-mysql-connector-python
 mysql-devel
+MySQL-python
 mysql-server # NOPRIME
+numpy # needed by websockify for spice console
 parted
 polkit
+qemu-kvm # NOPRIME
 rabbitmq-server # NOPRIME
 sqlite
 sudo
diff --git a/files/rpms/swift b/files/rpms/swift
index 1bf57cc..46dc59d 100644
--- a/files/rpms/swift
+++ b/files/rpms/swift
@@ -1,7 +1,8 @@
 curl
+liberasurecode-devel
 memcached
 pyxattr
+rsync-daemon # dist:f22,f23
 sqlite
 xfsprogs
 xinetd
-rsync-daemon # dist:f22,f23
diff --git a/files/rpms/tempest b/files/rpms/tempest
deleted file mode 100644
index e7bbd43..0000000
--- a/files/rpms/tempest
+++ /dev/null
@@ -1 +0,0 @@
-libxslt-devel
diff --git a/files/rpms/trove b/files/rpms/trove
deleted file mode 100644
index e7bbd43..0000000
--- a/files/rpms/trove
+++ /dev/null
@@ -1 +0,0 @@
-libxslt-devel
diff --git a/files/rpms/zookeeper b/files/rpms/zookeeper
new file mode 100644
index 0000000..1bfac53
--- /dev/null
+++ b/files/rpms/zookeeper
@@ -0,0 +1 @@
+zookeeper
diff --git a/files/venv-requirements.txt b/files/venv-requirements.txt
deleted file mode 100644
index b9a55b4..0000000
--- a/files/venv-requirements.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-# Once we can prebuild wheels before a devstack run, uncomment the skipped libraries
-cryptography
-# lxml # still install from from packages
-# netifaces # still install from packages
-#numpy    # slowest wheel by far, stop building until we are actually using the output
-posix-ipc
-# psycopg # still install from packages
-pycrypto
-pyOpenSSL
-PyYAML
-xattr
diff --git a/files/zookeeper/environment b/files/zookeeper/environment
new file mode 100644
index 0000000..afa2d2f
--- /dev/null
+++ b/files/zookeeper/environment
@@ -0,0 +1,36 @@
+#
+# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Modified from http://packages.ubuntu.com/saucy/zookeeperd
+NAME=zookeeper
+ZOOCFGDIR=/etc/zookeeper/conf
+
+# seems, that log4j requires the log4j.properties file to be in the classpath
+CLASSPATH="$ZOOCFGDIR:/usr/share/java/jline.jar:/usr/share/java/log4j-1.2.jar:/usr/share/java/xercesImpl.jar:/usr/share/java/xmlParserAPIs.jar:/usr/share/java/netty.jar:/usr/share/java/slf4j-api.jar:/usr/share/java/slf4j-log4j12.jar:/usr/share/java/zookeeper.jar"
+
+ZOOCFG="$ZOOCFGDIR/zoo.cfg"
+ZOO_LOG_DIR=/var/log/zookeeper
+USER=$NAME
+GROUP=$NAME
+PIDDIR=/var/run/$NAME
+PIDFILE=$PIDDIR/$NAME.pid
+SCRIPTNAME=/etc/init.d/$NAME
+JAVA=/usr/bin/java
+ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain"
+ZOO_LOG4J_PROP="INFO,ROLLINGFILE"
+JMXLOCALONLY=false
+JAVA_OPTS=""
diff --git a/files/zookeeper/log4j.properties b/files/zookeeper/log4j.properties
new file mode 100644
index 0000000..6c45a4a
--- /dev/null
+++ b/files/zookeeper/log4j.properties
@@ -0,0 +1,69 @@
+#
+# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# From http://packages.ubuntu.com/saucy/zookeeperd
+
+# ZooKeeper Logging Configuration
+#
+
+# Format is "<default threshold> (, <appender>)+
+
+log4j.rootLogger=${zookeeper.root.logger}
+
+# Example: console appender only
+# log4j.rootLogger=INFO, CONSOLE
+
+# Example with rolling log file
+#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE
+
+# Example with rolling log file and tracing
+#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE
+
+#
+# Log INFO level and above messages to the console
+#
+log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
+log4j.appender.CONSOLE.Threshold=INFO
+log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
+log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
+
+#
+# Add ROLLINGFILE to rootLogger to get log file output
+#    Log DEBUG level and above messages to a log file
+log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
+log4j.appender.ROLLINGFILE.Threshold=WARN
+log4j.appender.ROLLINGFILE.File=${zookeeper.log.dir}/zookeeper.log
+
+# Max log file size of 10MB
+log4j.appender.ROLLINGFILE.MaxFileSize=10MB
+# uncomment the next line to limit number of backup files
+#log4j.appender.ROLLINGFILE.MaxBackupIndex=10
+
+log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
+log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
+
+
+#
+# Add TRACEFILE to rootLogger to get log file output
+#    Log DEBUG level and above messages to a log file
+log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
+log4j.appender.TRACEFILE.Threshold=TRACE
+log4j.appender.TRACEFILE.File=${zookeeper.log.dir}/zookeeper_trace.log
+
+log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
+### Notice we are including log4j's NDC here (%x)
+log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n
diff --git a/files/zookeeper/myid b/files/zookeeper/myid
new file mode 100644
index 0000000..c227083
--- /dev/null
+++ b/files/zookeeper/myid
@@ -0,0 +1 @@
+0
\ No newline at end of file
diff --git a/files/zookeeper/zoo.cfg b/files/zookeeper/zoo.cfg
new file mode 100644
index 0000000..b8f5582
--- /dev/null
+++ b/files/zookeeper/zoo.cfg
@@ -0,0 +1,74 @@
+#
+# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# http://hadoop.apache.org/zookeeper/docs/current/zookeeperAdmin.html
+
+# The number of milliseconds of each tick
+tickTime=2000
+# The number of ticks that the initial
+# synchronization phase can take
+initLimit=10
+# The number of ticks that can pass between
+# sending a request and getting an acknowledgement
+syncLimit=5
+# the directory where the snapshot is stored.
+dataDir=/var/lib/zookeeper
+# Place the dataLogDir to a separate physical disc for better performance
+# dataLogDir=/disk2/zookeeper
+
+# the port at which the clients will connect
+clientPort=2181
+
+# Maximum number of clients that can connect from one client
+maxClientCnxns=60
+
+# specify all zookeeper servers
+# The fist port is used by followers to connect to the leader
+# The second one is used for leader election
+
+server.0=127.0.0.1:2888:3888
+
+# To avoid seeks ZooKeeper allocates space in the transaction log file in
+# blocks of preAllocSize kilobytes. The default block size is 64M. One reason
+# for changing the size of the blocks is to reduce the block size if snapshots
+# are taken more often. (Also, see snapCount).
+#preAllocSize=65536
+
+# Clients can submit requests faster than ZooKeeper can process them,
+# especially if there are a lot of clients. To prevent ZooKeeper from running
+# out of memory due to queued requests, ZooKeeper will throttle clients so that
+# there is no more than globalOutstandingLimit outstanding requests in the
+# system. The default limit is 1,000.ZooKeeper logs transactions to a
+# transaction log. After snapCount transactions are written to a log file a
+# snapshot is started and a new transaction log file is started. The default
+# snapCount is 10,000.
+#snapCount=1000
+
+# If this option is defined, requests will be will logged to a trace file named
+# traceFile.year.month.day.
+#traceFile=
+
+# Leader accepts client connections. Default value is "yes". The leader machine
+# coordinates updates. For higher update throughput at thes slight expense of
+# read throughput the leader can be configured to not accept clients and focus
+# on coordination.
+#leaderServes=yes
+
+# Autopurge every hour to avoid using lots of disk in bursts
+# Order of the next 2 properties matters.
+# autopurge.snapRetainCount must be before autopurge.purgeInterval.
+autopurge.snapRetainCount=3
+autopurge.purgeInterval=1
\ No newline at end of file
diff --git a/functions b/functions
index ff95c89..9495710 100644
--- a/functions
+++ b/functions
@@ -22,7 +22,7 @@
 source ${FUNC_DIR}/inc/rootwrap
 
 # Save trace setting
-XTRACE=$(set +o | grep xtrace)
+_XTRACE_FUNCTIONS=$(set +o | grep xtrace)
 set +o xtrace
 
 # Check if a function already exists
@@ -264,7 +264,8 @@
             ;;
         *.img)
             image_name=$(basename "$image" ".img")
-            local format=$(qemu-img info ${image} | awk '/^file format/ { print $3; exit }')
+            local format
+            format=$(qemu-img info ${image} | awk '/^file format/ { print $3; exit }')
             if [[ ",qcow2,raw,vdi,vmdk,vpc," =~ ",$format," ]]; then
                 disk_format=$format
             else
@@ -341,7 +342,7 @@
         # No backends registered means this is likely called from ``localrc``
         # This is now deprecated usage
         DATABASE_TYPE=$1
-        DEPRECATED_TEXT="$DEPRECATED_TEXT\nThe database backend needs to be properly set in ENABLED_SERVICES; use_database is deprecated localrc\n"
+        deprecated "The database backend needs to be properly set in ENABLED_SERVICES; use_database is deprecated localrc"
     else
         # This should no longer get called...here for posterity
         use_exclusive_service DATABASE_BACKENDS DATABASE_TYPE $1
@@ -356,7 +357,9 @@
 function wait_for_service {
     local timeout=$1
     local url=$2
+    time_start "wait_for_service"
     timeout $timeout sh -c "while ! $CURL_GET -k --noproxy '*' -s $url >/dev/null; do sleep 1; done"
+    time_stop "wait_for_service"
 }
 
 
@@ -405,10 +408,11 @@
     local vm_id=$1
     local network_name=$2
     local nova_result="$(nova show $vm_id)"
-    local ip=$(echo "$nova_result" | grep "$network_name" | get_field 2)
+    local ip
+    ip=$(echo "$nova_result" | grep "$network_name" | get_field 2)
     if [[ $ip = "" ]];then
         echo "$nova_result"
-        die $LINENO "[Fail] Coudn't get ipaddress of VM"
+        die $LINENO "[Fail] Couldn't get ipaddress of VM"
     fi
     echo $ip
 }
@@ -455,7 +459,8 @@
     # homedir permissions on RHEL and common practice of making DEST in
     # the stack user's homedir.
 
-    local real_path=$(readlink -f $1)
+    local real_path
+    real_path=$(readlink -f $1)
     local rebuilt_path=""
     for i in $(echo ${real_path} | tr "/" " "); do
         rebuilt_path=$rebuilt_path"/"$i
@@ -600,7 +605,7 @@
 }
 
 # Restore xtrace
-$XTRACE
+$_XTRACE_FUNCTIONS
 
 # Local variables:
 # mode: shell-script
diff --git a/functions-common b/functions-common
index a7fec41..a150dc5 100644
--- a/functions-common
+++ b/functions-common
@@ -32,7 +32,7 @@
 #
 
 # Save trace setting
-XTRACE=$(set +o | grep xtrace)
+_XTRACE_FUNCTIONS_COMMON=$(set +o | grep xtrace)
 set +o xtrace
 
 # ensure we don't re-source this in the same environment
@@ -76,9 +76,11 @@
     # The location is a variable to allow for easier refactoring later to make it
     # overridable. There is currently no usecase where doing so makes sense, so
     # it's not currently configurable.
-    CLOUDS_YAML=~/.config/openstack/clouds.yaml
 
-    mkdir -p $(dirname $CLOUDS_YAML)
+    CLOUDS_YAML=/etc/openstack/clouds.yaml
+
+    sudo mkdir -p $(dirname $CLOUDS_YAML)
+    sudo chown -R $STACK_USER /etc/openstack
 
     CA_CERT_ARG=''
     if [ -f "$SSL_BUNDLE_FILE" ]; then
@@ -88,9 +90,9 @@
         --file $CLOUDS_YAML \
         --os-cloud devstack \
         --os-region-name $REGION_NAME \
-        --os-identity-api-version $IDENTITY_API_VERSION \
+        --os-identity-api-version 3 \
         $CA_CERT_ARG \
-        --os-auth-url $KEYSTONE_AUTH_URI/v$IDENTITY_API_VERSION \
+        --os-auth-url $KEYSTONE_AUTH_URI \
         --os-username demo \
         --os-password $ADMIN_PASSWORD \
         --os-project-name demo
@@ -98,23 +100,35 @@
         --file $CLOUDS_YAML \
         --os-cloud devstack-admin \
         --os-region-name $REGION_NAME \
-        --os-identity-api-version $IDENTITY_API_VERSION \
+        --os-identity-api-version 3 \
         $CA_CERT_ARG \
-        --os-auth-url $KEYSTONE_AUTH_URI/v$IDENTITY_API_VERSION \
+        --os-auth-url $KEYSTONE_AUTH_URI \
         --os-username admin \
         --os-password $ADMIN_PASSWORD \
         --os-project-name admin
 }
 
-# Normalize config values to True or False
-# Accepts as False: 0 no No NO false False FALSE
-# Accepts as True: 1 yes Yes YES true True TRUE
-# VAR=$(trueorfalse default-value test-value)
+# trueorfalse <True|False> <VAR>
+#
+# Normalize config-value provided in variable VAR to either "True" or
+# "False".  If VAR is unset (i.e. $VAR evaluates as empty), the value
+# of the second argument will be used as the default value.
+#
+#  Accepts as False: 0 no  No  NO  false False FALSE
+#  Accepts as True:  1 yes Yes YES true  True  TRUE
+#
+# usage:
+#  VAL=$(trueorfalse False VAL)
 function trueorfalse {
-    local xtrace=$(set +o | grep xtrace)
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
     set +o xtrace
 
     local default=$1
+
+    if [ -z $2 ]; then
+        die $LINENO "variable to normalize required"
+    fi
     local testval=${!2:-}
 
     case "$testval" in
@@ -139,7 +153,8 @@
 # backtrace level
 function backtrace {
     local level=$1
-    local deep=$((${#BASH_SOURCE[@]} - 1))
+    local deep
+    deep=$((${#BASH_SOURCE[@]} - 1))
     echo "[Call Trace]"
     while [ $level -le $deep ]; do
         echo "${BASH_SOURCE[$deep]}:${BASH_LINENO[$deep-1]}:${FUNCNAME[$deep-1]}"
@@ -169,7 +184,8 @@
 # die_if_not_set $LINENO env-var "message"
 function die_if_not_set {
     local exitcode=$?
-    local xtrace=$(set +o | grep xtrace)
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
     set +o xtrace
     local line=$1; shift
     local evar=$1; shift
@@ -179,11 +195,18 @@
     $xtrace
 }
 
+function deprecated {
+    local text=$1
+    DEPRECATED_TEXT+="\n$text"
+    echo "WARNING: $text"
+}
+
 # Prints line number and "message" in error format
 # err $LINENO "message"
 function err {
     local exitcode=$?
-    local xtrace=$(set +o | grep xtrace)
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
     set +o xtrace
     local msg="[ERROR] ${BASH_SOURCE[2]}:$1 $2"
     echo $msg 1>&2;
@@ -200,7 +223,8 @@
 # err_if_not_set $LINENO env-var "message"
 function err_if_not_set {
     local exitcode=$?
-    local xtrace=$(set +o | grep xtrace)
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
     set +o xtrace
     local line=$1; shift
     local evar=$1; shift
@@ -236,7 +260,8 @@
 # warn $LINENO "message"
 function warn {
     local exitcode=$?
-    local xtrace=$(set +o | grep xtrace)
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
     set +o xtrace
     local msg="[WARNING] ${BASH_SOURCE[2]}:$1 $2"
     echo $msg
@@ -472,7 +497,8 @@
     local git_remote=$1
     local git_dest=$2
     local git_ref=$3
-    local orig_dir=$(pwd)
+    local orig_dir
+    orig_dir=$(pwd)
     local git_clone_flags=""
 
     RECLONE=$(trueorfalse False RECLONE)
@@ -571,6 +597,7 @@
         timeout=${GIT_TIMEOUT}
     fi
 
+    time_start "git_timed"
     until timeout -s SIGINT ${timeout} git "$@"; do
         # 124 is timeout(1)'s special return code when it reached the
         # timeout; otherwise assume fatal failure
@@ -585,6 +612,7 @@
         fi
         sleep 5
     done
+    time_stop "git_timed"
 }
 
 # git update using reference as a branch.
@@ -636,7 +664,8 @@
         host_ip=""
         # Find the interface used for the default route
         host_ip_iface=${host_ip_iface:-$(ip -f $af route | awk '/default/ {print $5}' | head -1)}
-        local host_ips=$(LC_ALL=C ip -f $af addr show ${host_ip_iface} | sed /temporary/d |awk /$af'/ {split($2,parts,"/");  print parts[1]}')
+        local host_ips
+        host_ips=$(LC_ALL=C ip -f $af addr show ${host_ip_iface} | sed /temporary/d |awk /$af'/ {split($2,parts,"/");  print parts[1]}')
         local ip
         for ip in $host_ips; do
             # Attempt to filter out IP addresses that are part of the fixed and
@@ -685,7 +714,8 @@
 # copy over a default policy.json and policy.d for projects
 function install_default_policy {
     local project=$1
-    local project_uc=$(echo $1|tr a-z A-Z)
+    local project_uc
+    project_uc=$(echo $1|tr a-z A-Z)
     local conf_dir="${project_uc}_CONF_DIR"
     # eval conf dir to get the variable
     conf_dir="${!conf_dir}"
@@ -718,7 +748,8 @@
 
     # Add a terminating comma to policy lines without one
     # Remove the closing '}' and all lines following to the end-of-file
-    local tmpfile=$(mktemp)
+    local tmpfile
+    tmpfile=$(mktemp)
     uniq ${policy_file} | sed -e '
         s/]$/],/
         /^[}]/,$d
@@ -735,16 +766,13 @@
 # Usage: get_or_create_domain <name> <description>
 function get_or_create_domain {
     local domain_id
-    local os_url="$KEYSTONE_SERVICE_URI_V3"
     # Gets domain id
     domain_id=$(
         # Gets domain id
-        openstack --os-token=$OS_TOKEN --os-url=$os_url \
-            --os-identity-api-version=3 domain show $1 \
+        openstack domain show $1 \
             -f value -c id 2>/dev/null ||
         # Creates new domain
-        openstack --os-token=$OS_TOKEN --os-url=$os_url \
-            --os-identity-api-version=3 domain create $1 \
+        openstack domain create $1 \
             --description "$2" \
             -f value -c id
     )
@@ -755,13 +783,11 @@
 # Usage: get_or_create_group <groupname> <domain> [<description>]
 function get_or_create_group {
     local desc="${3:-}"
-    local os_url="$KEYSTONE_SERVICE_URI_V3"
     local group_id
     # Gets group id
     group_id=$(
         # Creates new group with --or-show
-        openstack --os-token=$OS_TOKEN --os-url=$os_url \
-            --os-identity-api-version=3 group create $1 \
+        openstack group create $1 \
             --domain $2 --description "$desc" --or-show \
             -f value -c id
     )
@@ -783,8 +809,6 @@
         openstack user create \
             $1 \
             --password "$2" \
-            --os-url=$KEYSTONE_SERVICE_URI_V3 \
-            --os-identity-api-version=3 \
             --domain=$3 \
             $email \
             --or-show \
@@ -799,9 +823,7 @@
     local project_id
     project_id=$(
         # Creates new project with --or-show
-        openstack --os-url=$KEYSTONE_SERVICE_URI_V3 \
-            --os-identity-api-version=3 \
-            project create $1 \
+        openstack project create $1 \
             --domain=$2 \
             --or-show -f value -c id
     )
@@ -815,8 +837,6 @@
     role_id=$(
         # Creates role with --or-show
         openstack role create $1 \
-            --os-url=$KEYSTONE_SERVICE_URI_V3 \
-            --os-identity-api-version=3 \
             --or-show -f value -c id
     )
     echo $role_id
@@ -829,8 +849,6 @@
     # Gets user role id
     user_role_id=$(openstack role list \
         --user $2 \
-        --os-url=$KEYSTONE_SERVICE_URI_V3 \
-        --os-identity-api-version=3 \
         --column "ID" \
         --project $3 \
         --column "Name" \
@@ -839,13 +857,9 @@
         # Adds role to user and get it
         openstack role add $1 \
             --user $2 \
-            --project $3 \
-            --os-url=$KEYSTONE_SERVICE_URI_V3 \
-            --os-identity-api-version=3
+            --project $3
         user_role_id=$(openstack role list \
             --user $2 \
-            --os-url=$KEYSTONE_SERVICE_URI_V3 \
-            --os-identity-api-version=3 \
             --column "ID" \
             --project $3 \
             --column "Name" \
@@ -854,27 +868,47 @@
     echo $user_role_id
 }
 
+# Gets or adds user role to domain
+# Usage: get_or_add_user_domain_role <role> <user> <domain>
+function get_or_add_user_domain_role {
+    local user_role_id
+    # Gets user role id
+    user_role_id=$(openstack role list \
+        --user $2 \
+        --column "ID" \
+        --domain $3 \
+        --column "Name" \
+        | grep " $1 " | get_field 1)
+    if [[ -z "$user_role_id" ]]; then
+        # Adds role to user and get it
+        openstack role add $1 \
+            --user $2 \
+            --domain $3
+        user_role_id=$(openstack role list \
+            --user $2 \
+            --column "ID" \
+            --domain $3 \
+            --column "Name" \
+            | grep " $1 " | get_field 1)
+    fi
+    echo $user_role_id
+}
+
 # Gets or adds group role to project
 # Usage: get_or_add_group_project_role <role> <group> <project>
 function get_or_add_group_project_role {
     local group_role_id
     # Gets group role id
     group_role_id=$(openstack role list \
-        --os-url=$KEYSTONE_SERVICE_URI_V3 \
-        --os-identity-api-version=3 \
         --group $2 \
         --project $3 \
         -c "ID" -f value)
     if [[ -z "$group_role_id" ]]; then
         # Adds role to group and get it
         openstack role add $1 \
-            --os-url=$KEYSTONE_SERVICE_URI_V3 \
-            --os-identity-api-version=3 \
             --group $2 \
             --project $3
         group_role_id=$(openstack role list \
-            --os-url=$KEYSTONE_SERVICE_URI_V3 \
-            --os-identity-api-version=3 \
             --group $2 \
             --project $3 \
             -c "ID" -f value)
@@ -892,8 +926,6 @@
         openstack service show $2 -f value -c id 2>/dev/null ||
         # Creates new service if not exists
         openstack service create \
-            --os-url $KEYSTONE_SERVICE_URI_V3 \
-            --os-identity-api-version=3 \
             $2 \
             --name $1 \
             --description="$3" \
@@ -906,23 +938,14 @@
 # Usage: _get_or_create_endpoint_with_interface <service> <interface> <url> <region>
 function _get_or_create_endpoint_with_interface {
     local endpoint_id
-    # TODO(dgonzalez): The check of the region name, as done in the grep
-    # statement below, exists only because keystone does currently
-    # not allow filtering the region name when listing endpoints. If keystone
-    # gets support for this, the check for the region name can be removed.
-    # Related bug in keystone: https://bugs.launchpad.net/keystone/+bug/1482772
     endpoint_id=$(openstack endpoint list \
-        --os-url $KEYSTONE_SERVICE_URI_V3 \
-        --os-identity-api-version=3 \
         --service $1 \
         --interface $2 \
         --region $4 \
-        -c ID -c Region -f value | grep $4 | cut -f 1 -d " ")
+        -c ID -f value)
     if [[ -z "$endpoint_id" ]]; then
         # Creates new endpoint
         endpoint_id=$(openstack endpoint create \
-            --os-url $KEYSTONE_SERVICE_URI_V3 \
-            --os-identity-api-version=3 \
             $1 $2 $3 --region $4 -f value -c id)
     fi
 
@@ -940,7 +963,8 @@
     # scenarios currently that use the returned id. Ideally this behaviour
     # should be pushed out to the service setups and let them create the
     # endpoints they need.
-    local public_id=$(_get_or_create_endpoint_with_interface $1 public $3 $2)
+    local public_id
+    public_id=$(_get_or_create_endpoint_with_interface $1 public $3 $2)
     _get_or_create_endpoint_with_interface $1 admin $4 $2
     _get_or_create_endpoint_with_interface $1 internal $5 $2
 
@@ -958,6 +982,15 @@
             -c URL -f value)
 }
 
+# check if we are using ironic with hardware
+# TODO(jroll) this is a kludge left behind when ripping ironic code
+# out of tree, as it is used by nova and neutron.
+# figure out a way to refactor nova/neutron code to eliminate this
+function is_ironic_hardware {
+    is_service_enabled ironic && [[ -n "${IRONIC_DEPLOY_DRIVER##*_ssh}" ]] && return 0
+    return 1
+}
+
 
 # Package Functions
 # =================
@@ -982,23 +1015,60 @@
     echo "$pkg_dir"
 }
 
+# Wrapper for ``apt-get update`` to try multiple times on the update
+# to address bad package mirrors (which happen all the time).
+function apt_get_update {
+    # only do this once per run
+    if [[ "$REPOS_UPDATED" == "True" && "$RETRY_UPDATE" != "True" ]]; then
+        return
+    fi
+
+    # bail if we are offline
+    [[ "$OFFLINE" = "True" ]] && return
+
+    local sudo="sudo"
+    [[ "$(id -u)" = "0" ]] && sudo="env"
+
+    # time all the apt operations
+    time_start "apt-get-update"
+
+    local proxies="http_proxy=${http_proxy:-} https_proxy=${https_proxy:-} no_proxy=${no_proxy:-} "
+    local update_cmd="$sudo $proxies apt-get update"
+    if ! timeout 300 sh -c "while ! $update_cmd; do sleep 30; done"; then
+        die $LINENO "Failed to update apt repos, we're dead now"
+    fi
+
+    REPOS_UPDATED=True
+    # stop the clock
+    time_stop "apt-get-update"
+}
+
 # Wrapper for ``apt-get`` to set cache and proxy environment variables
 # Uses globals ``OFFLINE``, ``*_proxy``
 # apt_get operation package [package ...]
 function apt_get {
-    local xtrace=$(set +o | grep xtrace)
+    local xtrace result
+    xtrace=$(set +o | grep xtrace)
     set +o xtrace
 
     [[ "$OFFLINE" = "True" || -z "$@" ]] && return
     local sudo="sudo"
     [[ "$(id -u)" = "0" ]] && sudo="env"
 
+    # time all the apt operations
+    time_start "apt-get"
+
     $xtrace
 
     $sudo DEBIAN_FRONTEND=noninteractive \
         http_proxy=${http_proxy:-} https_proxy=${https_proxy:-} \
         no_proxy=${no_proxy:-} \
         apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@"
+    result=$?
+
+    # stop the clock
+    time_stop "apt-get"
+    return $result
 }
 
 function _parse_package_files {
@@ -1028,7 +1098,7 @@
                 # We are using BASH regexp matching feature.
                 package=${BASH_REMATCH[1]}
                 distros=${BASH_REMATCH[2]}
-                # In bash ${VAR,,} will lowecase VAR
+                # In bash ${VAR,,} will lowercase VAR
                 # Look for a match in the distro list
                 if [[ ! ${distros,,} =~ ${DISTRO,,} ]]; then
                     # If no match then skip this package
@@ -1055,13 +1125,19 @@
 # - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection
 #   of the package to the distros listed.  The distro names are case insensitive.
 function get_packages {
-    local xtrace=$(set +o | grep xtrace)
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
     set +o xtrace
     local services=$@
-    local package_dir=$(_get_package_dir)
+    local package_dir
+    package_dir=$(_get_package_dir)
     local file_to_parse=""
     local service=""
 
+    if [ $# -ne 1 ]; then
+        die $LINENO "get_packages takes a single, comma-separated argument"
+    fi
+
     if [[ -z "$package_dir" ]]; then
         echo "No package directory supplied"
         return 1
@@ -1123,13 +1199,14 @@
 # The same metadata used in the main DevStack prerequisite files may be used
 # in these prerequisite files, see get_packages() for more info.
 function get_plugin_packages {
-    local xtrace=$(set +o | grep xtrace)
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
     set +o xtrace
     local files_to_parse=""
     local package_dir=""
     for plugin in ${DEVSTACK_PLUGINS//,/ }; do
         local package_dir="$(_get_package_dir ${GITDIR[$plugin]}/devstack/files)"
-        files_to_parse+="$package_dir/$plugin"
+        files_to_parse+=" $package_dir/$plugin"
     done
     echo "$(_parse_package_files $files_to_parse)"
     $xtrace
@@ -1148,15 +1225,7 @@
     fi
 
     if is_ubuntu; then
-        local xtrace=$(set +o | grep xtrace)
-        set +o xtrace
-        if [[ "$REPOS_UPDATED" != "True" || "$RETRY_UPDATE" = "True" ]]; then
-            # if there are transient errors pulling the updates, that's fine.
-            # It may be secondary repositories that we don't really care about.
-            apt_get update  || /bin/true
-            REPOS_UPDATED=True
-        fi
-        $xtrace
+        apt_get_update
     fi
 }
 
@@ -1279,10 +1348,11 @@
     exec 3>&-
     exec 6>&-
 
-    local real_logfile="${LOGDIR}/${service}.log.${CURRENT_LOG_TIME}"
+    local logfile="${service}.log.${CURRENT_LOG_TIME}"
+    local real_logfile="${LOGDIR}/${logfile}"
     if [[ -n ${LOGDIR} ]]; then
         exec 1>&"$real_logfile" 2>&1
-        ln -sf "$real_logfile" ${LOGDIR}/${service}.log
+        bash -c "cd '$LOGDIR' && ln -sf '$logfile' ${service}.log"
         if [[ -n ${SCREEN_LOGDIR} ]]; then
             # Drop the backward-compat symlink
             ln -sf "$real_logfile" ${SCREEN_LOGDIR}/screen-${service}.log
@@ -1336,12 +1406,14 @@
 # If the command includes shell metachatacters (;<>*) it must be run using a shell
 # If an optional group is provided sg will be used to run the
 # command as that group.
+# Uses globals ``USE_SCREEN``
 # run_process service "command-line" [group]
 function run_process {
     local service=$1
     local command="$2"
     local group=$3
 
+    time_start "run_process"
     if is_service_enabled $service; then
         if [[ "$USE_SCREEN" = "True" ]]; then
             screen_process "$service" "$command" "$group"
@@ -1350,11 +1422,12 @@
             _run_process "$service" "$command" "$group" &
         fi
     fi
+    time_stop "run_process"
 }
 
 # Helper to launch a process in a named screen
 # Uses globals ``CURRENT_LOG_TIME``, ```LOGDIR``, ``SCREEN_LOGDIR``, `SCREEN_NAME``,
-# ``SERVICE_DIR``, ``USE_SCREEN``
+# ``SERVICE_DIR``, ``SCREEN_IS_LOGGING``
 # screen_process name "command-line" [group]
 # Run a command in a shell in a screen window, if an optional group
 # is provided, use sg to set the group of the command.
@@ -1365,18 +1438,22 @@
 
     SCREEN_NAME=${SCREEN_NAME:-stack}
     SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
-    USE_SCREEN=$(trueorfalse True USE_SCREEN)
 
     screen -S $SCREEN_NAME -X screen -t $name
 
-    local real_logfile="${LOGDIR}/${name}.log.${CURRENT_LOG_TIME}"
+    local logfile="${name}.log.${CURRENT_LOG_TIME}"
+    local real_logfile="${LOGDIR}/${logfile}"
     echo "LOGDIR: $LOGDIR"
     echo "SCREEN_LOGDIR: $SCREEN_LOGDIR"
     echo "log: $real_logfile"
     if [[ -n ${LOGDIR} ]]; then
-        screen -S $SCREEN_NAME -p $name -X logfile "$real_logfile"
-        screen -S $SCREEN_NAME -p $name -X log on
-        ln -sf "$real_logfile" ${LOGDIR}/${name}.log
+        if [[ "$SCREEN_IS_LOGGING" == "True" ]]; then
+            screen -S $SCREEN_NAME -p $name -X logfile "$real_logfile"
+            screen -S $SCREEN_NAME -p $name -X log on
+        fi
+        # If logging isn't active then avoid a broken symlink
+        touch "$real_logfile"
+        bash -c "cd '$LOGDIR' && ln -sf '$logfile' ${name}.log"
         if [[ -n ${SCREEN_LOGDIR} ]]; then
             # Drop the backward-compat symlink
             ln -sf "$real_logfile" ${SCREEN_LOGDIR}/screen-${1}.log
@@ -1414,7 +1491,7 @@
 }
 
 # Screen rc file builder
-# Uses globals ``SCREEN_NAME``, ``SCREENRC``
+# Uses globals ``SCREEN_NAME``, ``SCREENRC``, ``SCREEN_IS_LOGGING``
 # screen_rc service "command-line"
 function screen_rc {
     SCREEN_NAME=${SCREEN_NAME:-stack}
@@ -1434,7 +1511,7 @@
         echo "screen -t $1 bash" >> $SCREENRC
         echo "stuff \"$2$NL\"" >> $SCREENRC
 
-        if [[ -n ${LOGDIR} ]]; then
+        if [[ -n ${LOGDIR} ]] && [[ "$SCREEN_IS_LOGGING" == "True" ]]; then
             echo "logfile ${LOGDIR}/${1}.log.${CURRENT_LOG_TIME}" >>$SCREENRC
             echo "log on" >>$SCREENRC
         fi
@@ -1445,14 +1522,13 @@
 # If a PID is available use it, kill the whole process group via TERM
 # If screen is being used kill the screen window; this will catch processes
 # that did not leave a PID behind
-# Uses globals ``SCREEN_NAME``, ``SERVICE_DIR``, ``USE_SCREEN``
+# Uses globals ``SCREEN_NAME``, ``SERVICE_DIR``
 # screen_stop_service service
 function screen_stop_service {
     local service=$1
 
     SCREEN_NAME=${SCREEN_NAME:-stack}
     SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
-    USE_SCREEN=$(trueorfalse True USE_SCREEN)
 
     if is_service_enabled $service; then
         # Clean up the screen window
@@ -1470,12 +1546,27 @@
     local service=$1
 
     SERVICE_DIR=${SERVICE_DIR:-${DEST}/status}
-    USE_SCREEN=$(trueorfalse True USE_SCREEN)
 
     if is_service_enabled $service; then
         # Kill via pid if we have one available
         if [[ -r $SERVICE_DIR/$SCREEN_NAME/$service.pid ]]; then
             pkill -g $(cat $SERVICE_DIR/$SCREEN_NAME/$service.pid)
+            # oslo.service tends to stop actually shutting down
+            # reliably in between releases because someone believes it
+            # is dying too early due to some inflight work they
+            # have. This is a tension. It happens often enough we're
+            # going to just account for it in devstack and assume it
+            # doesn't work.
+            #
+            # Set OSLO_SERVICE_WORKS=True to skip this block
+            if [[ -z "$OSLO_SERVICE_WORKS" ]]; then
+                # TODO(danms): Remove this double-kill when we have
+                # this fixed in all services:
+                # https://bugs.launchpad.net/oslo-incubator/+bug/1446583
+                sleep 1
+                # /bin/true because pkill on a non existent process returns an error
+                pkill -g $(cat $SERVICE_DIR/$SCREEN_NAME/$service.pid) || /bin/true
+            fi
             rm $SERVICE_DIR/$SCREEN_NAME/$service.pid
         fi
         if [[ "$USE_SCREEN" = "True" ]]; then
@@ -1516,11 +1607,11 @@
 }
 
 # Tail a log file in a screen if USE_SCREEN is true.
+# Uses globals ``USE_SCREEN``
 function tail_log {
     local name=$1
     local logfile=$2
 
-    USE_SCREEN=$(trueorfalse True USE_SCREEN)
     if [[ "$USE_SCREEN" = "True" ]]; then
         screen_process "$name" "sudo tail -f $logfile"
     fi
@@ -1681,7 +1772,7 @@
         if [[ -f $dir/devstack/override-defaults ]]; then
             # be really verbose that an override is happening, as it
             # may not be obvious if things fail later.
-            echo "$plugin has overriden the following defaults"
+            echo "$plugin has overridden the following defaults"
             cat $dir/devstack/override-defaults
             source $dir/devstack/override-defaults
         fi
@@ -1710,13 +1801,27 @@
     local mode=$1
     local phase=$2
     if [[ -d $TOP_DIR/extras.d ]]; then
-        for i in $TOP_DIR/extras.d/*.sh; do
-            [[ -r $i ]] && source $i $mode $phase
+        local extra_plugin_file_name
+        for extra_plugin_file_name in $TOP_DIR/extras.d/*.sh; do
+            # NOTE(sdague): only process extras.d for the 3 explicitly
+            # white listed elements in tree. We want these to move out
+            # over time as well, but they are in tree, so we need to
+            # manage that.
+            local exceptions="60-ceph.sh 80-tempest.sh"
+            local extra
+            extra=$(basename $extra_plugin_file_name)
+            if [[ ! ( $exceptions =~ "$extra" ) ]]; then
+                warn "use of extras.d is no longer supported"
+                warn "processing of project $extra is skipped"
+            else
+                [[ -r $extra_plugin_file_name ]] && source $extra_plugin_file_name $mode $phase
+            fi
         done
     fi
     # the source phase corresponds to settings loading in plugins
     if [[ "$mode" == "source" ]]; then
         load_plugin_settings
+        verify_disabled_services
     elif [[ "$mode" == "override_defaults" ]]; then
         plugin_override_defaults
     else
@@ -1731,11 +1836,17 @@
 # remove extra commas from the input string (i.e. ``ENABLED_SERVICES``)
 # _cleanup_service_list service-list
 function _cleanup_service_list {
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+
     echo "$1" | sed -e '
         s/,,/,/g;
         s/^,//;
         s/,$//
     '
+
+    $xtrace
 }
 
 # disable_all_services() removes all current services
@@ -1753,6 +1864,10 @@
 # Uses global ``ENABLED_SERVICES``
 # disable_negated_services
 function disable_negated_services {
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+
     local to_remove=""
     local remaining=""
     local service
@@ -1770,27 +1885,36 @@
     # go through the service list.  if this service appears in the "to
     # be removed" list, drop it
     ENABLED_SERVICES=$(remove_disabled_services "$remaining" "$to_remove")
+
+    $xtrace
 }
 
-# disable_service() removes the services passed as argument to the
-# ``ENABLED_SERVICES`` list, if they are present.
+# disable_service() prepares the services passed as argument to be
+# removed from the ``ENABLED_SERVICES`` list, if they are present.
 #
 # For example:
 #   disable_service rabbit
 #
-# This function does not know about the special cases
-# for nova, glance, and neutron built into is_service_enabled().
-# Uses global ``ENABLED_SERVICES``
+# Uses global ``DISABLED_SERVICES``
 # disable_service service [service ...]
 function disable_service {
-    local tmpsvcs=",${ENABLED_SERVICES},"
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+
+    local disabled_svcs="${DISABLED_SERVICES}"
+    local enabled_svcs=",${ENABLED_SERVICES},"
     local service
     for service in $@; do
+        disabled_svcs+=",$service"
         if is_service_enabled $service; then
-            tmpsvcs=${tmpsvcs//,$service,/,}
+            enabled_svcs=${enabled_svcs//,$service,/,}
         fi
     done
-    ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs")
+    DISABLED_SERVICES=$(_cleanup_service_list "$disabled_svcs")
+    ENABLED_SERVICES=$(_cleanup_service_list "$enabled_svcs")
+
+    $xtrace
 }
 
 # enable_service() adds the services passed as argument to the
@@ -1804,15 +1928,25 @@
 # Uses global ``ENABLED_SERVICES``
 # enable_service service [service ...]
 function enable_service {
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+
     local tmpsvcs="${ENABLED_SERVICES}"
     local service
     for service in $@; do
+        if [[ ,${DISABLED_SERVICES}, =~ ,${service}, ]]; then
+            warn $LINENO "Attempt to enable_service ${service} when it has been disabled"
+            continue
+        fi
         if ! is_service_enabled $service; then
             tmpsvcs+=",$service"
         fi
     done
     ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs")
     disable_negated_services
+
+    $xtrace
 }
 
 # is_service_enabled() checks if the service(s) specified as arguments are
@@ -1838,8 +1972,10 @@
 # Uses global ``ENABLED_SERVICES``
 # is_service_enabled service [service ...]
 function is_service_enabled {
-    local xtrace=$(set +o | grep xtrace)
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
     set +o xtrace
+
     local enabled=1
     local services=$@
     local service
@@ -1859,12 +1995,12 @@
         [[ ${service} == n-cpu-* && ${ENABLED_SERVICES} =~ "n-cpu" ]] && enabled=0
         [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && enabled=0
         [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && enabled=0
-        [[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && enabled=0
         [[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && enabled=0
         [[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && enabled=0
         [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && enabled=0
         [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && enabled=0
     done
+
     $xtrace
     return $enabled
 }
@@ -1872,6 +2008,10 @@
 # remove specified list from the input string
 # remove_disabled_services service-list remove-list
 function remove_disabled_services {
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+
     local service_list=$1
     local remove_list=$2
     local service
@@ -1890,6 +2030,9 @@
             enabled="${enabled},$service"
         fi
     done
+
+    $xtrace
+
     _cleanup_service_list "$enabled"
 }
 
@@ -1910,6 +2053,18 @@
     return 0
 }
 
+# Make sure that nothing has manipulated ENABLED_SERVICES in a way
+# that conflicts with prior calls to disable_service.
+# Uses global ``ENABLED_SERVICES``
+function verify_disabled_services {
+    local service
+    for service in ${ENABLED_SERVICES//,/ }; do
+        if [[ ,${DISABLED_SERVICES}, =~ ,${service}, ]]; then
+            die $LINENO "ENABLED_SERVICES directly modified to overcome 'disable_service ${service}'"
+        fi
+    done
+}
+
 
 # System Functions
 # ================
@@ -1917,7 +2072,8 @@
 # Only run the command if the target file (the last arg) is not on an
 # NFS filesystem.
 function _safe_permission_operation {
-    local xtrace=$(set +o | grep xtrace)
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
     set +o xtrace
     local args=( $@ )
     local last
@@ -1953,8 +2109,10 @@
     local ip=$1
     local range=$2
     local masklen=${range#*/}
-    local network=$(maskip ${range%/*} $(cidr2netmask $masklen))
-    local subnet=$(maskip $ip $(cidr2netmask $masklen))
+    local network
+    network=$(maskip ${range%/*} $(cidr2netmask $masklen))
+    local subnet
+    subnet=$(maskip $ip $(cidr2netmask $masklen))
     [[ $network == $subnet ]]
 }
 
@@ -2006,7 +2164,8 @@
 
 # Returns true if the directory is on a filesystem mounted via NFS.
 function is_nfs_directory {
-    local mount_type=`stat -f -L -c %T $1`
+    local mount_type
+    mount_type=`stat -f -L -c %T $1`
     test "$mount_type" == "nfs"
 }
 
@@ -2017,13 +2176,15 @@
     local ip=$1
     local mask=$2
     local l="${ip%.*}"; local r="${ip#*.}"; local n="${mask%.*}"; local m="${mask#*.}"
-    local subnet=$((${ip%%.*}&${mask%%.*})).$((${r%%.*}&${m%%.*})).$((${l##*.}&${n##*.})).$((${ip##*.}&${mask##*.}))
+    local subnet
+    subnet=$((${ip%%.*}&${mask%%.*})).$((${r%%.*}&${m%%.*})).$((${l##*.}&${n##*.})).$((${ip##*.}&${mask##*.}))
     echo $subnet
 }
 
 # Return the current python as "python<major>.<minor>"
 function python_version {
-    local python_version=$(python -c 'import sys; print("%s.%s" % sys.version_info[0:2])')
+    local python_version
+    python_version=$(python -c 'import sys; print("%s.%s" % sys.version_info[0:2])')
     echo "python${python_version}"
 }
 
@@ -2077,14 +2238,87 @@
     local until=${3:-10}
     local sleep=${4:-0.5}
 
+    time_start "test_with_retry"
     if ! timeout $until sh -c "while ! $testcmd; do sleep $sleep; done"; then
         die $LINENO "$failmsg"
     fi
+    time_stop "test_with_retry"
 }
 
+# Timing infrastructure - figure out where large blocks of time are
+# used in DevStack
+#
+# The timing infrastructure for DevStack is about collecting buckets
+# of time that are spend in some subtask. For instance, that might be
+# 'apt', 'pip', 'osc', even database migrations. We do this by a pair
+# of functions: time_start / time_stop.
+#
+# These take a single parameter: $name - which specifies the name of
+# the bucket to be accounted against. time_totals function spits out
+# the results.
+#
+# Resolution is only in whole seconds, so should be used for long
+# running activities.
+
+declare -A TOTAL_TIME
+declare -A START_TIME
+
+# time_start $name
+#
+# starts the clock for a timer by name. Errors if that clock is
+# already started.
+function time_start {
+    local name=$1
+    local start_time=${START_TIME[$name]}
+    if [[ -n "$start_time" ]]; then
+        die $LINENO "Trying to start the clock on $name, but it's already been started"
+    fi
+    START_TIME[$name]=$(date +%s)
+}
+
+# time_stop $name
+#
+# stops the clock for a timer by name, and accumulate that time in the
+# global counter for that name. Errors if that clock had not
+# previously been started.
+function time_stop {
+    local name
+    local end_time
+    local elpased_time
+    local total
+    local start_time
+
+    name=$1
+    start_time=${START_TIME[$name]}
+
+    if [[ -z "$start_time" ]]; then
+        die $LINENO "Trying to stop the clock on $name, but it was never started"
+    fi
+    end_time=$(date +%s)
+    elapsed_time=$(($end_time - $start_time))
+    total=${TOTAL_TIME[$name]:-0}
+    # reset the clock so we can start it in the future
+    START_TIME[$name]=""
+    TOTAL_TIME[$name]=$(($total + $elapsed_time))
+}
+
+# time_totals
+#
+# prints out total time
+function time_totals {
+    echo
+    echo "========================"
+    echo "DevStack Components Timed"
+    echo "========================"
+    echo
+    for t in ${!TOTAL_TIME[*]}; do
+        local v=${TOTAL_TIME[$t]}
+        echo "$t - $v secs"
+    done
+}
 
 # Restore xtrace
-$XTRACE
+$_XTRACE_FUNCTIONS_COMMON
 
 # Local variables:
 # mode: shell-script
diff --git a/inc/ini-config b/inc/ini-config
index 58386e2..e99b088 100644
--- a/inc/ini-config
+++ b/inc/ini-config
@@ -17,7 +17,8 @@
 # Append a new option in an ini file without replacing the old value
 # iniadd [-sudo] config-file section option value1 value2 value3 ...
 function iniadd {
-    local xtrace=$(set +o | grep xtrace)
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
     set +o xtrace
     local sudo=""
     if [ $1 == "-sudo" ]; then
@@ -37,7 +38,8 @@
 # Comment an option in an INI file
 # inicomment [-sudo] config-file section option
 function inicomment {
-    local xtrace=$(set +o | grep xtrace)
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
     set +o xtrace
     local sudo=""
     if [ $1 == "-sudo" ]; then
@@ -55,7 +57,8 @@
 # Get an option from an INI file
 # iniget config-file section option
 function iniget {
-    local xtrace=$(set +o | grep xtrace)
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
     set +o xtrace
     local file=$1
     local section=$2
@@ -70,7 +73,8 @@
 # Get a multiple line option from an INI file
 # iniget_multiline config-file section option
 function iniget_multiline {
-    local xtrace=$(set +o | grep xtrace)
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
     set +o xtrace
     local file=$1
     local section=$2
@@ -85,7 +89,8 @@
 # Determinate is the given option present in the INI file
 # ini_has_option config-file section option
 function ini_has_option {
-    local xtrace=$(set +o | grep xtrace)
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
     set +o xtrace
     local file=$1
     local section=$2
@@ -107,7 +112,8 @@
 #
 # iniadd_literal [-sudo] config-file section option value
 function iniadd_literal {
-    local xtrace=$(set +o | grep xtrace)
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
     set +o xtrace
     local sudo=""
     if [ $1 == "-sudo" ]; then
@@ -135,7 +141,8 @@
 # Remove an option from an INI file
 # inidelete [-sudo] config-file section option
 function inidelete {
-    local xtrace=$(set +o | grep xtrace)
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
     set +o xtrace
     local sudo=""
     if [ $1 == "-sudo" ]; then
@@ -161,7 +168,8 @@
 # iniset [-sudo] config-file section option value
 #  - if the file does not exist, it is created
 function iniset {
-    local xtrace=$(set +o | grep xtrace)
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
     set +o xtrace
     local sudo=""
     if [ $1 == "-sudo" ]; then
@@ -188,7 +196,8 @@
 $option = $value
 " "$file"
     else
-        local sep=$(echo -ne "\x01")
+        local sep
+        sep=$(echo -ne "\x01")
         # Replace it
         $sudo sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file"
     fi
@@ -196,9 +205,10 @@
 }
 
 # Set a multiple line option in an INI file
-# iniset_multiline [-sudo] config-file section option value1 value2 valu3 ...
+# iniset_multiline [-sudo] config-file section option value1 value2 value3 ...
 function iniset_multiline {
-    local xtrace=$(set +o | grep xtrace)
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
     set +o xtrace
     local sudo=""
     if [ $1 == "-sudo" ]; then
@@ -236,7 +246,8 @@
 # Uncomment an option in an INI file
 # iniuncomment config-file section option
 function iniuncomment {
-    local xtrace=$(set +o | grep xtrace)
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
     set +o xtrace
     local sudo=""
     if [ $1 == "-sudo" ]; then
@@ -250,6 +261,18 @@
     $xtrace
 }
 
+# Get list of sections from an INI file
+# iniget_sections config-file
+function iniget_sections {
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+    local file=$1
+
+    echo $(sed -ne "s/^\[\(.*\)\]/\1/p" "$file")
+    $xtrace
+}
+
 # Restore xtrace
 $INC_CONF_TRACE
 
diff --git a/inc/meta-config b/inc/meta-config
index e5f902d..b6fe437 100644
--- a/inc/meta-config
+++ b/inc/meta-config
@@ -20,7 +20,7 @@
 # file-name is the destination of the config file
 
 # Save trace setting
-INC_META_XTRACE=$(set +o | grep xtrace)
+_XTRACE_INC_META=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -89,9 +89,10 @@
     # note, configfile might be a variable (note the iniset, etc
     # created in the mega-awk below is "eval"ed too, so we just leave
     # it alone.
-    local real_configfile=$(eval echo $configfile)
+    local real_configfile
+    real_configfile=$(eval echo $configfile)
     if [ ! -f $real_configfile ]; then
-        touch $real_configfile
+        touch $real_configfile || die $LINENO "could not create config file $real_configfile ($configfile)"
     fi
 
     get_meta_section $file $matchgroup $configfile | \
@@ -177,8 +178,18 @@
     local configfile group
     for group in $matchgroups; do
         for configfile in $(get_meta_section_files $localfile $group); do
-            if [[ -d $(dirname $(eval "echo $configfile")) ]]; then
+            local realconfigfile
+            local dir
+
+            realconfigfile=$(eval "echo $configfile")
+            if [[ -z $realconfigfile ]]; then
+                die $LINENO "bogus config file specification: $configfile is undefined"
+            fi
+            dir=$(dirname $realconfigfile)
+            if [[ -d $dir ]]; then
                 merge_config_file $localfile $group $configfile
+            else
+                die $LINENO "bogus config file specification $configfile ($configfile=$realconfigfile, $dir is not a directory)"
             fi
         done
     done
@@ -186,7 +197,7 @@
 
 
 # Restore xtrace
-$INC_META_XTRACE
+$_XTRACE_INC_META
 
 # Local variables:
 # mode: shell-script
diff --git a/inc/python b/inc/python
index fd0d616..35bab6f 100644
--- a/inc/python
+++ b/inc/python
@@ -17,7 +17,7 @@
 
 # Global Config Variables
 
-# PROJECT_VENV contains the name of the virtual enviromnet for each
+# PROJECT_VENV contains the name of the virtual environment for each
 # project.  A null value installs to the system Python directories.
 declare -A PROJECT_VENV
 
@@ -28,17 +28,21 @@
 # Get the path to the pip command.
 # get_pip_command
 function get_pip_command {
-    which pip || which pip-python
+    local version="$1"
+    # NOTE(dhellmann): I don't know if we actually get a pip3.4-python
+    # under any circumstances.
+    which pip${version} || which pip${version}-python
 
     if [ $? -ne 0 ]; then
-        die $LINENO "Unable to find pip; cannot continue"
+        die $LINENO "Unable to find pip${version}; cannot continue"
     fi
 }
 
-# Get the path to the direcotry where python executables are installed.
+# Get the path to the directory where python executables are installed.
 # get_python_exec_prefix
 function get_python_exec_prefix {
-    local xtrace=$(set +o | grep xtrace)
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
     set +o xtrace
     if [[ -z "$os_PACKAGE" ]]; then
         GetOSVersion
@@ -60,16 +64,25 @@
 # pip_install_gr packagename
 function pip_install_gr {
     local name=$1
-    local clean_name=$(get_from_global_requirements $name)
+    local clean_name
+    clean_name=$(get_from_global_requirements $name)
     pip_install $clean_name
 }
 
+# Determine the python versions supported by a package
+function get_python_versions_for_package {
+    local name=$1
+    cd $name && python setup.py --classifiers \
+        | grep 'Language' | cut -f5 -d: | grep '\.' | tr '\n' ' '
+}
+
 # Wrapper for ``pip install`` to set cache and proxy environment variables
 # Uses globals ``OFFLINE``, ``PIP_VIRTUAL_ENV``,
 # ``PIP_UPGRADE``, ``TRACK_DEPENDS``, ``*_proxy``,
 # pip_install package [package ...]
 function pip_install {
-    local xtrace=$(set +o | grep xtrace)
+    local xtrace result
+    xtrace=$(set +o | grep xtrace)
     set +o xtrace
     local upgrade=""
     local offline=${OFFLINE:-False}
@@ -78,6 +91,8 @@
         return
     fi
 
+    time_start "pip_install"
+
     PIP_UPGRADE=$(trueorfalse False PIP_UPGRADE)
     if [[ "$PIP_UPGRADE" = "True" ]] ; then
         upgrade="--upgrade"
@@ -88,7 +103,7 @@
     fi
     if [[ $TRACK_DEPENDS = True && ! "$@" =~ virtualenv ]]; then
         # TRACK_DEPENDS=True installation creates a circular dependency when
-        # we attempt to install virtualenv into a virualenv, so we must global
+        # we attempt to install virtualenv into a virtualenv, so we must global
         # that installation.
         source $DEST/.venv/bin/activate
         local cmd_pip=$DEST/.venv/bin/pip
@@ -98,8 +113,23 @@
             local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip
             local sudo_pip="env"
         else
-            local cmd_pip=$(get_pip_command)
+            local cmd_pip
+            cmd_pip=$(get_pip_command $PYTHON2_VERSION)
             local sudo_pip="sudo -H"
+            if python3_enabled; then
+                # Look at the package classifiers to find the python
+                # versions supported, and if we find the version of
+                # python3 we've been told to use, use that instead of the
+                # default pip
+                local package_dir=${!#}
+                local python_versions
+                if [[ -d "$package_dir" ]]; then
+                    python_versions=$(get_python_versions_for_package $package_dir)
+                    if [[ $python_versions =~ $PYTHON3_VERSION ]]; then
+                        cmd_pip=$(get_pip_command $PYTHON3_VERSION)
+                    fi
+                fi
+            fi
         fi
     fi
 
@@ -107,7 +137,10 @@
     # Always apply constraints
     cmd_pip="$cmd_pip -c $REQUIREMENTS_DIR/upper-constraints.txt"
 
-    local pip_version=$(python -c "import pip; \
+    # FIXME(dhellmann): Need to force multiple versions of pip for
+    # packages like setuptools?
+    local pip_version
+    pip_version=$(python -c "import pip; \
                         print(pip.__version__.strip('.')[0])")
     if (( pip_version<6 )); then
         die $LINENO "Currently installed pip version ${pip_version} does not" \
@@ -122,10 +155,11 @@
         PIP_FIND_LINKS=$PIP_FIND_LINKS \
         $cmd_pip $upgrade \
         $@
+    result=$?
 
     # Also install test requirements
     local test_req="${!#}/test-requirements.txt"
-    if [[ -e "$test_req" ]]; then
+    if [[ $result == 0 ]] && [[ -e "$test_req" ]]; then
         echo "Installing test-requirements for $test_req"
         $sudo_pip \
             http_proxy=${http_proxy:-} \
@@ -134,14 +168,19 @@
             PIP_FIND_LINKS=$PIP_FIND_LINKS \
             $cmd_pip $upgrade \
             -r $test_req
+        result=$?
     fi
+
+    time_stop "pip_install"
+    return $result
 }
 
 # get version of a package from global requirements file
 # get_from_global_requirements <package>
 function get_from_global_requirements {
     local package=$1
-    local required_pkg=$(grep -i -h ^${package} $REQUIREMENTS_DIR/global-requirements.txt | cut -d\# -f1)
+    local required_pkg
+    required_pkg=$(grep -i -h ^${package} $REQUIREMENTS_DIR/global-requirements.txt | cut -d\# -f1)
     if [[ $required_pkg == ""  ]]; then
         die $LINENO "Can't find package $package in requirements"
     fi
@@ -189,7 +228,7 @@
     setup_install $dir
 }
 
-# setup a library by name in editiable mode. If we are trying to use
+# setup a library by name in editable mode. If we are trying to use
 # the library from git, we'll do a git based install, otherwise we'll
 # punt and the library should be installed by a requirements pull from
 # another project.
@@ -203,15 +242,31 @@
 
 # this should be used if you want to install globally, all libraries should
 # use this, especially *oslo* ones
+#
+# setup_install project_dir [extras]
+# project_dir: directory of project repo (e.g., /opt/stack/keystone)
+# extras: comma-separated list of optional dependencies to install
+#         (e.g., ldap,memcache).
+#         See http://docs.openstack.org/developer/pbr/#extra-requirements
+# The command is like "pip install <project_dir>[<extras>]"
 function setup_install {
     local project_dir=$1
-    setup_package_with_constraints_edit $project_dir
+    local extras=$2
+    _setup_package_with_constraints_edit $project_dir "" $extras
 }
 
 # this should be used for projects which run services, like all services
+#
+# setup_develop project_dir [extras]
+# project_dir: directory of project repo (e.g., /opt/stack/keystone)
+# extras: comma-separated list of optional dependencies to install
+#         (e.g., ldap,memcache).
+#         See http://docs.openstack.org/developer/pbr/#extra-requirements
+# The command is like "pip install -e <project_dir>[<extras>]"
 function setup_develop {
     local project_dir=$1
-    setup_package_with_constraints_edit $project_dir -e
+    local extras=$2
+    _setup_package_with_constraints_edit $project_dir -e $extras
 }
 
 # determine if a project as specified by directory is in
@@ -220,7 +275,8 @@
 # practical ways.
 function is_in_projects_txt {
     local project_dir=$1
-    local project_name=$(basename $project_dir)
+    local project_name
+    project_name=$(basename $project_dir)
     grep -q "/$project_name\$" $REQUIREMENTS_DIR/projects.txt
 }
 
@@ -232,38 +288,80 @@
 # install this package we get the from source version.
 #
 # Uses globals ``REQUIREMENTS_DIR``
-# setup_develop directory
-function setup_package_with_constraints_edit {
+# _setup_package_with_constraints_edit project_dir flags [extras]
+# project_dir: directory of project repo (e.g., /opt/stack/keystone)
+# flags: pip CLI options/flags
+# extras: comma-separated list of optional dependencies to install
+#         (e.g., ldap,memcache).
+#         See http://docs.openstack.org/developer/pbr/#extra-requirements
+# The command is like "pip install <flags> <project_dir>[<extras>]"
+function _setup_package_with_constraints_edit {
     local project_dir=$1
     local flags=$2
+    local extras=$3
 
     if [ -n "$REQUIREMENTS_DIR" ]; then
         # Constrain this package to this project directory from here on out.
-        local name=$(awk '/^name.*=/ {print $3}' $project_dir/setup.cfg)
+        local name
+        name=$(awk '/^name.*=/ {print $3}' $project_dir/setup.cfg)
         $REQUIREMENTS_DIR/.venv/bin/edit-constraints \
             $REQUIREMENTS_DIR/upper-constraints.txt -- $name \
             "$flags file://$project_dir#egg=$name"
     fi
 
-    setup_package $project_dir $flags
+    setup_package $project_dir "$flags" $extras
 
 }
 
 # ``pip install -e`` the package, which processes the dependencies
 # using pip before running `setup.py develop`
+#
 # Uses globals ``STACK_USER``
-# setup_develop_no_requirements_update directory
+# setup_package project_dir [flags] [extras]
+# project_dir: directory of project repo (e.g., /opt/stack/keystone)
+# flags: pip CLI options/flags
+# extras: comma-separated list of optional dependencies to install
+#         (e.g., ldap,memcache).
+#         See http://docs.openstack.org/developer/pbr/#extra-requirements
+# The command is like "pip install <flags> <project_dir>[<extras>]"
 function setup_package {
     local project_dir=$1
     local flags=$2
+    local extras=$3
 
-    pip_install $flags $project_dir
+    # if the flags variable exists, and it doesn't look like a flag,
+    # assume it's actually the extras list.
+    if [[ -n "$flags" && -z "$extras" && ! "$flags" =~ ^-.* ]]; then
+        extras=$flags
+        flags=""
+    fi
+
+    if [[ ! -z "$extras" ]]; then
+        extras="[$extras]"
+    fi
+
+    pip_install $flags "$project_dir$extras"
     # ensure that further actions can do things like setup.py sdist
     if [[ "$flags" == "-e" ]]; then
         safe_chown -R $STACK_USER $1/*.egg-info
     fi
 }
 
+# Report whether python 3 should be used
+function python3_enabled {
+    if [[ $USE_PYTHON3 == "True" ]]; then
+        return 0
+    else
+        return 1
+    fi
+}
+
+# Install python3 packages
+function install_python3 {
+    if is_ubuntu; then
+        apt_get install python3.4 python3.4-dev
+    fi
+}
 
 # Restore xtrace
 $INC_PY_TRACE
diff --git a/inc/rootwrap b/inc/rootwrap
index f91e557..2a6e4b6 100644
--- a/inc/rootwrap
+++ b/inc/rootwrap
@@ -22,14 +22,14 @@
     local line
 
     # This is pretty simplistic for now - assume only the first line is used
-    if [[ -r SUDO_SECURE_PATH_FILE ]]; then
+    if [[ -r $SUDO_SECURE_PATH_FILE ]]; then
         line=$(head -1 $SUDO_SECURE_PATH_FILE)
     else
         line="Defaults:$STACK_USER secure_path=/usr/local/sbin:/usr/local/bin:/usr/sbin:/sbin:/usr/bin:/bin"
     fi
 
     # Only add ``dir`` if it is not already present
-    if [[ $line =~ $dir ]]; then
+    if [[ ! $line =~ $dir ]]; then
         echo "${line}:$dir" | sudo tee $SUDO_SECURE_PATH_FILE
         sudo chmod 400 $SUDO_SECURE_PATH_FILE
         sudo chown root:root $SUDO_SECURE_PATH_FILE
@@ -41,7 +41,8 @@
 # configure_rootwrap project
 function configure_rootwrap {
     local project=$1
-    local project_uc=$(echo $1|tr a-z A-Z)
+    local project_uc
+    project_uc=$(echo $1|tr a-z A-Z)
     local bin_dir="${project_uc}_BIN_DIR"
     bin_dir="${!bin_dir}"
     local project_dir="${project_uc}_DIR"
@@ -60,7 +61,8 @@
     sudo sed -e "s:^filters_path=.*$:filters_path=/etc/${project}/rootwrap.d:" -i /etc/${project}/rootwrap.conf
 
     # Set up the rootwrap sudoers
-    local tempfile=$(mktemp)
+    local tempfile
+    tempfile=$(mktemp)
     # Specify rootwrap.conf as first parameter to rootwrap
     rootwrap_sudo_cmd="${rootwrap_bin} /etc/${project}/rootwrap.conf *"
     echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_sudo_cmd" >$tempfile
diff --git a/lib/apache b/lib/apache
index a8e9bc5..2c84c7a 100644
--- a/lib/apache
+++ b/lib/apache
@@ -19,7 +19,7 @@
 # - restart_apache_server
 
 # Save trace setting
-XTRACE=$(set +o | grep xtrace)
+_XTRACE_LIB_APACHE=$(set +o | grep xtrace)
 set +o xtrace
 
 # Allow overriding the default Apache user and group, default to
@@ -72,11 +72,14 @@
 # various differences between Apache 2.2 and 2.4 that warrant special handling.
 function get_apache_version {
     if is_ubuntu; then
-        local version_str=$(sudo /usr/sbin/apache2ctl -v | awk '/Server version/ {print $3}' | cut -f2 -d/)
+        local version_str
+        version_str=$(sudo /usr/sbin/apache2ctl -v | awk '/Server version/ {print $3}' | cut -f2 -d/)
     elif is_fedora; then
-        local version_str=$(rpm -qa --queryformat '%{VERSION}' httpd)
+        local version_str
+        version_str=$(rpm -qa --queryformat '%{VERSION}' httpd)
     elif is_suse; then
-        local version_str=$(rpm -qa --queryformat '%{VERSION}' apache2)
+        local version_str
+        version_str=$(rpm -qa --queryformat '%{VERSION}' apache2)
     else
         exit_distro_not_supported "cannot determine apache version"
     fi
@@ -115,7 +118,8 @@
 function apache_site_config_for {
     local site=$@
     if is_ubuntu; then
-        local apache_version=$(get_apache_version)
+        local apache_version
+        apache_version=$(get_apache_version)
         if [[ "$apache_version" == "2.2" ]]; then
             # Ubuntu 12.04 - Apache 2.2
             echo $APACHE_CONF_DIR/${site}
@@ -181,13 +185,15 @@
     # Apache can be slow to stop, doing an explicit stop, sleep, start helps
     # to mitigate issues where apache will claim a port it's listening on is
     # still in use and fail to start.
+    time_start "restart_apache_server"
     stop_service $APACHE_NAME
     sleep 3
     start_service $APACHE_NAME
+    time_stop "restart_apache_server"
 }
 
 # Restore xtrace
-$XTRACE
+$_XTRACE_LIB_APACHE
 
 # Tell emacs to use shell-script-mode
 ## Local variables:
diff --git a/lib/ceph b/lib/ceph
index 8e34aa4..3e0839a 100644
--- a/lib/ceph
+++ b/lib/ceph
@@ -18,7 +18,7 @@
 # - cleanup_ceph
 
 # Save trace setting
-XTRACE=$(set +o | grep xtrace)
+_XTRACE_LIB_CEPH=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -83,7 +83,8 @@
 # ------------
 
 function get_ceph_version {
-    local ceph_version_str=$(sudo ceph daemon mon.$(hostname) version | cut -d '"' -f 4 | cut -f 1,2 -d '.')
+    local ceph_version_str
+    ceph_version_str=$(sudo ceph daemon mon.$(hostname) version | cut -d '"' -f 4 | cut -f 1,2 -d '.')
     echo $ceph_version_str
 }
 
@@ -106,7 +107,8 @@
 # undefine_virsh_secret() - Undefine Cinder key secret from libvirt
 function undefine_virsh_secret {
     if is_service_enabled cinder || is_service_enabled nova; then
-        local virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
+        local virsh_uuid
+        virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
         sudo virsh secret-undefine ${virsh_uuid} >/dev/null 2>&1
     fi
 }
@@ -114,7 +116,7 @@
 
 # check_os_support_ceph() - Check if the operating system provides a decent version of Ceph
 function check_os_support_ceph {
-    if [[ ! ${DISTRO} =~ (trusty|f21|f22) ]]; then
+    if [[ ! ${DISTRO} =~ (trusty|f22|f23) ]]; then
         echo "WARNING: your distro $DISTRO does not provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 20 (and higher)"
         if [[ "$FORCE_CEPH_INSTALL" != "yes" ]]; then
             die $LINENO "If you wish to install Ceph on this distribution anyway run with FORCE_CEPH_INSTALL=yes"
@@ -219,7 +221,8 @@
     done
 
     # pools data and metadata were removed in the Giant release so depending on the version we apply different commands
-    local ceph_version=$(get_ceph_version)
+    local ceph_version
+    ceph_version=$(get_ceph_version)
     # change pool replica size according to the CEPH_REPLICAS set by the user
     if [[ ${ceph_version%%.*} -eq 0 ]] && [[ ${ceph_version##*.} -lt 87 ]]; then
         sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd size ${CEPH_REPLICAS}
@@ -372,7 +375,7 @@
 
 
 # Restore xtrace
-$XTRACE
+$_XTRACE_LIB_CEPH
 
 ## Local variables:
 ## mode: shell-script
diff --git a/lib/cinder b/lib/cinder
index 1014411..5bd940b 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -20,7 +20,7 @@
 # - cleanup_cinder
 
 # Save trace setting
-XTRACE=$(set +o | grep xtrace)
+_XTRACE_CINDER=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -93,7 +93,7 @@
     if [[ $CINDER_SECURE_DELETE == "False" ]]; then
         CINDER_VOLUME_CLEAR_DEFAULT="none"
     fi
-    DEPRECATED_TEXT="$DEPRECATED_TEXT\nConfigure secure Cinder volume deletion using CINDER_VOLUME_CLEAR instead of CINDER_SECURE_DELETE.\n"
+    deprecated "Configure secure Cinder volume deletion using CINDER_VOLUME_CLEAR instead of CINDER_SECURE_DELETE."
 fi
 CINDER_VOLUME_CLEAR=${CINDER_VOLUME_CLEAR:-${CINDER_VOLUME_CLEAR_DEFAULT:-zero}}
 CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]')
@@ -150,7 +150,8 @@
     # ensure the volume group is cleared up because fails might
     # leave dead volumes in the group
     if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then
-        local targets=$(sudo tgtadm --op show --mode target)
+        local targets
+        targets=$(sudo tgtadm --op show --mode target)
         if [ $? -ne 0 ]; then
             # If tgt driver isn't running this won't work obviously
             # So check the response and restart if need be
@@ -198,7 +199,8 @@
 
 # _cinder_config_apache_wsgi() - Set WSGI config files
 function _cinder_config_apache_wsgi {
-    local cinder_apache_conf=$(apache_site_config_for osapi-volume)
+    local cinder_apache_conf
+    cinder_apache_conf=$(apache_site_config_for osapi-volume)
     local cinder_ssl=""
     local cinder_certfile=""
     local cinder_keyfile=""
@@ -268,10 +270,6 @@
     iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH
     iniset $CINDER_CONF oslo_concurrency lock_path $CINDER_STATE_PATH
     iniset $CINDER_CONF DEFAULT periodic_interval $CINDER_PERIODIC_INTERVAL
-    # NOTE(thingee): Cinder V1 API is deprecated and defaults to off as of
-    # Juno. Keep it enabled so we can continue testing while it's still
-    # supported.
-    iniset $CINDER_CONF DEFAULT enable_v1_api true
 
     iniset $CINDER_CONF DEFAULT os_region_name "$REGION_NAME"
 
@@ -297,7 +295,7 @@
     fi
 
     if is_service_enabled swift; then
-        iniset $CINDER_CONF DEFAULT backup_swift_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:8080/v1/AUTH_"
+        iniset $CINDER_CONF DEFAULT backup_swift_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_"
     fi
 
     if is_service_enabled ceilometer; then
@@ -307,6 +305,8 @@
     if is_service_enabled tls-proxy; then
         # Set the service port for a proxy to take the original
         iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT
+
+        iniset $CINDER_CONF DEFAULT public_endpoint $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT
     fi
 
     if [ "$SYSLOG" != "False" ]; then
@@ -315,9 +315,7 @@
 
     iniset_rpc_backend cinder $CINDER_CONF
 
-    if [[ "$CINDER_VOLUME_CLEAR" == "none" ]] || [[ "$CINDER_VOLUME_CLEAR" == "zero" ]] || [[ "$CINDER_VOLUME_CLEAR" == "shred" ]]; then
-        iniset $CINDER_CONF DEFAULT volume_clear $CINDER_VOLUME_CLEAR
-    fi
+    iniset $CINDER_CONF DEFAULT volume_clear $CINDER_VOLUME_CLEAR
 
     # Format logging
     if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$CINDER_USE_MOD_WSGI" == "False" ]; then
@@ -355,7 +353,7 @@
     iniset $CINDER_CONF DEFAULT os_privileged_user_name nova
     iniset $CINDER_CONF DEFAULT os_privileged_user_password "$SERVICE_PASSWORD"
     iniset $CINDER_CONF DEFAULT os_privileged_user_tenant "$SERVICE_TENANT_NAME"
-
+    iniset $CINDER_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT"
 }
 
 # create_cinder_accounts() - Set up common required cinder accounts
@@ -550,9 +548,7 @@
         local be be_name
         for be in ${CINDER_ENABLED_BACKENDS//,/ }; do
             be_name=${be##*:}
-            # FIXME(jamielennox): Remove --os-volume-api-version pinning when
-            # osc supports volume type create on v2 api. bug #1475060
-            openstack volume type create --os-volume-api-version 1 --property volume_backend_name="${be_name}" ${be_name}
+            openstack volume type create --property volume_backend_name="${be_name}" ${be_name}
         done
     fi
 }
@@ -567,7 +563,7 @@
 
 
 # Restore xtrace
-$XTRACE
+$_XTRACE_CINDER
 
 # Tell emacs to use shell-script-mode
 ## Local variables:
diff --git a/lib/cinder_backends/ceph b/lib/cinder_backends/ceph
index 7e9d2d3..9bff5be 100644
--- a/lib/cinder_backends/ceph
+++ b/lib/cinder_backends/ceph
@@ -22,7 +22,7 @@
 
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+_XTRACE_CINDER_CEPH=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -62,7 +62,7 @@
                 sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
             fi
         fi
-        sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
+        sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
         sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
 
         iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph"
@@ -76,7 +76,7 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$_XTRACE_CINDER_CEPH
 
 # Local variables:
 # mode: shell-script
diff --git a/lib/cinder_backends/glusterfs b/lib/cinder_backends/glusterfs
index 00c62e0..4e34f8e 100644
--- a/lib/cinder_backends/glusterfs
+++ b/lib/cinder_backends/glusterfs
@@ -19,7 +19,7 @@
 # configure_cinder_backend_glusterfs - Configure Cinder for GlusterFS backends
 
 # Save trace setting
-GLUSTERFS_XTRACE=$(set +o | grep xtrace)
+_XTRACE_CINDER_GLUSTERFS=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -41,7 +41,7 @@
 
 
 # Restore xtrace
-$GLUSTERFS_XTRACE
+$_XTRACE_CINDER_GLUSTERFS
 
 # Local variables:
 # mode: shell-script
diff --git a/lib/cinder_backends/lvm b/lib/cinder_backends/lvm
index 411b82c..d927f9c 100644
--- a/lib/cinder_backends/lvm
+++ b/lib/cinder_backends/lvm
@@ -22,7 +22,7 @@
 
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+_XTRACE_CINDER_LVM=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -68,7 +68,7 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$_XTRACE_CINDER_LVM
 
 # mode: shell-script
 # End:
diff --git a/lib/cinder_backends/netapp_iscsi b/lib/cinder_backends/netapp_iscsi
index be9442e..5cce30a 100644
--- a/lib/cinder_backends/netapp_iscsi
+++ b/lib/cinder_backends/netapp_iscsi
@@ -20,7 +20,7 @@
 # configure_cinder_backend_netapp_iscsi - configure iSCSI
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+_XTRACE_CINDER_NETAPP=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -59,7 +59,7 @@
 
 
 # Restore xtrace
-$MY_XTRACE
+$_XTRACE_CINDER_NETAPP
 
 # Local variables:
 # mode: shell-script
diff --git a/lib/cinder_backends/netapp_nfs b/lib/cinder_backends/netapp_nfs
index dc919ad..7ba36d2 100644
--- a/lib/cinder_backends/netapp_nfs
+++ b/lib/cinder_backends/netapp_nfs
@@ -20,7 +20,7 @@
 # configure_cinder_backend_netapp_nfs - configure NFS
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+_XTRACE_CINDER_NETAPP=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -70,7 +70,7 @@
 
 
 # Restore xtrace
-$MY_XTRACE
+$_XTRACE_CINDER_NETAPP
 
 # Local variables:
 # mode: shell-script
diff --git a/lib/cinder_backends/nfs b/lib/cinder_backends/nfs
index fc51b2b..89a37a1 100644
--- a/lib/cinder_backends/nfs
+++ b/lib/cinder_backends/nfs
@@ -19,7 +19,7 @@
 # configure_cinder_backend_nfs - Configure Cinder for NFS backends
 
 # Save trace setting
-NFS_XTRACE=$(set +o | grep xtrace)
+_XTRACE_CINDER_NFS=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -38,7 +38,7 @@
 
 
 # Restore xtrace
-$NFS_XTRACE
+$_XTRACE_CINDER_NFS
 
 # Local variables:
 # mode: shell-script
diff --git a/lib/cinder_backends/solidfire b/lib/cinder_backends/solidfire
index 7cc70fc..16bc527 100644
--- a/lib/cinder_backends/solidfire
+++ b/lib/cinder_backends/solidfire
@@ -17,7 +17,7 @@
 # configure_cinder_driver - make configuration changes, including those to other services
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+_XTRACE_CINDER_SOLIDFIRE=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -42,7 +42,7 @@
 
 
 # Restore xtrace
-$MY_XTRACE
+$_XTRACE_CINDER_SOLIDFIRE
 
 # Local variables:
 # mode: shell-script
diff --git a/lib/cinder_backends/vmdk b/lib/cinder_backends/vmdk
index d5b9453..3a6a5cf 100644
--- a/lib/cinder_backends/vmdk
+++ b/lib/cinder_backends/vmdk
@@ -15,7 +15,7 @@
 # configure_cinder_backend_vmdk - Configure Cinder for VMware vmdk backends
 
 # Save trace setting
-VMDK_XTRACE=$(set +o | grep xtrace)
+_XTRACE_CINDER_VMDK=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -40,7 +40,7 @@
 
 
 # Restore xtrace
-$VMDK_XTRACE
+$_XTRACE_CINDER_VMDK
 
 # Local variables:
 # mode: shell-script
diff --git a/lib/cinder_backends/xiv b/lib/cinder_backends/xiv
index 6eadaae..e8b5da0 100644
--- a/lib/cinder_backends/xiv
+++ b/lib/cinder_backends/xiv
@@ -42,7 +42,7 @@
 # configure_cinder_backend_xiv - Configure Cinder for xiv backends
 
 # Save trace setting
-XIV_XTRACE=$(set +o | grep xtrace)
+_XTRACE_CINDER_XIV=$(set +o | grep xtrace)
 set +o xtrace
 
 # Defaults
@@ -79,7 +79,7 @@
 }
 
 # Restore xtrace
-$XIV_XTRACE
+$_XTRACE_CINDER_XIV
 
 # Local variables:
 # mode: shell-script
diff --git a/lib/cinder_plugins/XenAPINFS b/lib/cinder_plugins/XenAPINFS
index f730695..92135e7 100644
--- a/lib/cinder_plugins/XenAPINFS
+++ b/lib/cinder_plugins/XenAPINFS
@@ -15,7 +15,7 @@
 # configure_cinder_driver - make configuration changes, including those to other services
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+_XTRACE_CINDER_XENAPINFS=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -39,7 +39,7 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$_XTRACE_CINDER_XENAPINFS
 
 # Local variables:
 # mode: shell-script
diff --git a/lib/cinder_plugins/glusterfs b/lib/cinder_plugins/glusterfs
index 35ceb27..329dd6c 100644
--- a/lib/cinder_plugins/glusterfs
+++ b/lib/cinder_plugins/glusterfs
@@ -15,7 +15,7 @@
 # configure_cinder_driver - make configuration changes, including those to other services
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+_XTRACE_CINDER_GLUSTERFS=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -45,7 +45,7 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$_XTRACE_CINDER_GLUSTERFS
 
 # Local variables:
 # mode: shell-script
diff --git a/lib/cinder_plugins/nfs b/lib/cinder_plugins/nfs
index 83b3993..6e4ffe0 100644
--- a/lib/cinder_plugins/nfs
+++ b/lib/cinder_plugins/nfs
@@ -15,7 +15,7 @@
 # configure_cinder_driver - make configuration changes, including those to other services
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+_XTRACE_CINDER_NFS=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -36,7 +36,7 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$_XTRACE_CINDER_NFS
 
 # Local variables:
 # mode: shell-script
diff --git a/lib/cinder_plugins/sheepdog b/lib/cinder_plugins/sheepdog
index ca343f7..558de46 100644
--- a/lib/cinder_plugins/sheepdog
+++ b/lib/cinder_plugins/sheepdog
@@ -15,7 +15,7 @@
 # configure_cinder_driver - make configuration changes, including those to other services
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+_XTRACE_CINDER_SHEEPDOG=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -34,7 +34,7 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$_XTRACE_CINDER_SHEEPDOG
 
 # Local variables:
 # mode: shell-script
diff --git a/lib/cinder_plugins/vsphere b/lib/cinder_plugins/vsphere
index f14ddf0..1b28ffe 100644
--- a/lib/cinder_plugins/vsphere
+++ b/lib/cinder_plugins/vsphere
@@ -15,7 +15,7 @@
 # configure_cinder_driver - make configuration changes, including those to other services
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+_XTRACE_CINDER_VSPHERE=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -37,7 +37,7 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$_XTRACE_CINDER_VSPHERE
 
 # Local variables:
 # mode: shell-script
diff --git a/lib/database b/lib/database
index 13740b9..0d72052 100644
--- a/lib/database
+++ b/lib/database
@@ -20,7 +20,7 @@
 # and call register_database $DATABASE_TYPE
 
 # Save trace setting
-XTRACE=$(set +o | grep xtrace)
+_XTRACE_LIB_DB=$(set +o | grep xtrace)
 set +o xtrace
 
 DATABASE_BACKENDS=""
@@ -137,7 +137,7 @@
 
 
 # Restore xtrace
-$XTRACE
+$_XTRACE_LIB_DB
 
 # Tell emacs to use shell-script-mode
 ## Local variables:
diff --git a/lib/databases/mysql b/lib/databases/mysql
index 510da75..f6cc922 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -8,7 +8,7 @@
 # - DATABASE_{HOST,USER,PASSWORD} must be defined
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+_XTRACE_DB_MYSQL=$(set +o | grep xtrace)
 set +o xtrace
 
 MYSQL_DRIVER=${MYSQL_DRIVER:-PyMySQL}
@@ -143,7 +143,7 @@
 [client]
 user=$DATABASE_USER
 password=$DATABASE_PASSWORD
-host=$DATABASE_HOST
+host=$MYSQL_HOST
 EOF
         chmod 0600 $HOME/.my.cnf
     fi
@@ -179,7 +179,7 @@
 
 
 # Restore xtrace
-$MY_XTRACE
+$_XTRACE_DB_MYSQL
 
 # Local variables:
 # mode: shell-script
diff --git a/lib/databases/postgresql b/lib/databases/postgresql
index 261574f..204c257 100644
--- a/lib/databases/postgresql
+++ b/lib/databases/postgresql
@@ -8,7 +8,7 @@
 # - DATABASE_{HOST,USER,PASSWORD} must be defined
 
 # Save trace setting
-PG_XTRACE=$(set +o | grep xtrace)
+_XTRACE_PG=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -122,7 +122,7 @@
 
 
 # Restore xtrace
-$PG_XTRACE
+$_XTRACE_PG
 
 # Local variables:
 # mode: shell-script
diff --git a/lib/dlm b/lib/dlm
new file mode 100644
index 0000000..74eb67e
--- /dev/null
+++ b/lib/dlm
@@ -0,0 +1,108 @@
+#!/bin/bash
+#
+# lib/dlm
+#
+# Functions to control the installation and configuration of software
+# that provides a dlm (and possibly other functions). The default is
+# **zookeeper**, and is going to be the only backend supported in the
+# devstack tree.
+
+# Dependencies:
+#
+# - ``functions`` file
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# - is_dlm_enabled
+# - install_dlm
+# - configure_dlm
+# - cleanup_dlm
+
+# Save trace setting
+_XTRACE_DLM=$(set +o | grep xtrace)
+set +o xtrace
+
+
+# Defaults
+# --------
+
+# <define global variables here that belong to this project>
+
+# Set up default directories
+ZOOKEEPER_DATA_DIR=$DEST/data/zookeeper
+ZOOKEEPER_CONF_DIR=/etc/zookeeper
+
+
+# Entry Points
+# ------------
+#
+# NOTE(sdague): it is expected that when someone wants to implement
+# another one of these out of tree, they'll implement the following
+# functions:
+#
+# - dlm_backend
+# - install_dlm
+# - configure_dlm
+# - cleanup_dlm
+
+# This should be declared in the settings file of any plugin or
+# service that needs to have a dlm in their environment.
+function use_dlm {
+    enable_service $(dlm_backend)
+}
+
+# A function to return the name of the backend in question, some users
+# are going to need to know this.
+function dlm_backend {
+    echo "zookeeper"
+}
+
+# Test if a dlm is enabled (defaults to a zookeeper specific check)
+function is_dlm_enabled {
+    [[ ,${ENABLED_SERVICES}, =~ ,"$(dlm_backend)", ]] && return 0
+    return 1
+}
+
+# cleanup_dlm() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_dlm {
+    # NOTE(sdague): we don't check for is_enabled here because we
+    # should just delete this regardless. Some times users updated
+    # their service list before they run cleanup.
+    sudo rm -rf $ZOOKEEPER_DATA_DIR
+}
+
+# configure_dlm() - Set config files, create data dirs, etc
+function configure_dlm {
+    if is_dlm_enabled; then
+        sudo cp $FILES/zookeeper/* $ZOOKEEPER_CONF_DIR
+        sudo sed -i -e 's|.*dataDir.*|dataDir='$ZOOKEEPER_DATA_DIR'|' $ZOOKEEPER_CONF_DIR/zoo.cfg
+        # clean up from previous (possibly aborted) runs
+        # create required data files
+        sudo rm -rf $ZOOKEEPER_DATA_DIR
+        sudo mkdir -p $ZOOKEEPER_DATA_DIR
+        # restart after configuration, there is no reason to make this
+        # another step, because having data files that don't match the
+        # zookeeper running is just going to cause tears.
+        restart_service zookeeper
+    fi
+}
+
+# install_dlm() - Collect source and prepare
+function install_dlm {
+    if is_dlm_enabled; then
+        if is_ubuntu; then
+            install_package zookeeperd
+        else
+            die $LINENO "Don't know how to install zookeeper on this platform"
+        fi
+    fi
+}
+
+# Restore xtrace
+$_XTRACE_DLM
+
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/dstat b/lib/dstat
index f11bfa5..b705948 100644
--- a/lib/dstat
+++ b/lib/dstat
@@ -13,14 +13,13 @@
 # - stop_dstat
 
 # Save trace setting
-XTRACE=$(set +o | grep xtrace)
+_XTRACE_DSTAT=$(set +o | grep xtrace)
 set +o xtrace
 
 # start_dstat() - Start running processes, including screen
 function start_dstat {
     # A better kind of sysstat, with the top process per time slice
-    DSTAT_OPTS="-tcmndrylpg --top-cpu-adv --top-io-adv"
-    run_process dstat "dstat $DSTAT_OPTS"
+    run_process dstat "$TOP_DIR/tools/dstat.sh $LOGDIR"
 
     # To enable peakmem_tracker add:
     #    enable_service peakmem_tracker
@@ -35,4 +34,4 @@
 }
 
 # Restore xtrace
-$XTRACE
+$_XTRACE_DSTAT
diff --git a/lib/glance b/lib/glance
index 7be3a84..19e7937 100644
--- a/lib/glance
+++ b/lib/glance
@@ -21,7 +21,7 @@
 # - cleanup_glance
 
 # Save trace setting
-XTRACE=$(set +o | grep xtrace)
+_XTRACE_GLANCE=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -106,7 +106,8 @@
     iniset $GLANCE_REGISTRY_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
     iniset $GLANCE_REGISTRY_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS
     inicomment $GLANCE_REGISTRY_CONF DEFAULT log_file
-    local dburl=`database_connection_url glance`
+    local dburl
+    dburl=`database_connection_url glance`
     iniset $GLANCE_REGISTRY_CONF database connection $dburl
     iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG
     iniset $GLANCE_REGISTRY_CONF DEFAULT workers "$API_WORKERS"
@@ -114,6 +115,7 @@
     configure_auth_token_middleware $GLANCE_REGISTRY_CONF glance $GLANCE_AUTH_CACHE_DIR/registry
     iniset $GLANCE_REGISTRY_CONF DEFAULT notification_driver messaging
     iniset_rpc_backend glance $GLANCE_REGISTRY_CONF
+    iniset $GLANCE_REGISTRY_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT"
 
     cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF
     iniset $GLANCE_API_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
@@ -148,6 +150,7 @@
         iniset $GLANCE_API_CONF glance_store swift_store_config_file $GLANCE_SWIFT_STORE_CONF
         iniset $GLANCE_API_CONF glance_store default_swift_reference ref1
         iniset $GLANCE_API_CONF glance_store stores "file, http, swift"
+        iniset $GLANCE_API_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT"
 
         iniset $GLANCE_SWIFT_STORE_CONF ref1 user $SERVICE_TENANT_NAME:glance-swift
         iniset $GLANCE_SWIFT_STORE_CONF ref1 key $SERVICE_PASSWORD
@@ -166,6 +169,9 @@
         iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT
         iniset $GLANCE_API_CONF DEFAULT public_endpoint $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT
         iniset $GLANCE_REGISTRY_CONF DEFAULT bind_port $GLANCE_REGISTRY_PORT_INT
+
+        iniset $GLANCE_API_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI
+        iniset $GLANCE_REGISTRY_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI
     fi
 
     # Register SSL certificates if provided
@@ -265,7 +271,8 @@
         # required for swift access
         if is_service_enabled s-proxy; then
 
-            local glance_swift_user=$(get_or_create_user "glance-swift" \
+            local glance_swift_user
+            glance_swift_user=$(get_or_create_user "glance-swift" \
                 "$SERVICE_PASSWORD" "default" "glance-swift@example.com")
             get_or_add_user_project_role "ResellerAdmin" $glance_swift_user $SERVICE_TENANT_NAME
         fi
@@ -401,7 +408,7 @@
 }
 
 # Restore xtrace
-$XTRACE
+$_XTRACE_GLANCE
 
 # Tell emacs to use shell-script-mode
 ## Local variables:
diff --git a/lib/heat b/lib/heat
index 3e6975a..df44b76 100644
--- a/lib/heat
+++ b/lib/heat
@@ -16,13 +16,14 @@
 # - install_heat
 # - configure_heatclient
 # - configure_heat
+# - _config_heat_apache_wsgi
 # - init_heat
 # - start_heat
 # - stop_heat
 # - cleanup_heat
 
 # Save trace setting
-XTRACE=$(set +o | grep xtrace)
+_XTRACE_HEAT=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -32,6 +33,9 @@
 # set up default directories
 GITDIR["python-heatclient"]=$DEST/python-heatclient
 
+# Toggle for deploying Heat-API under HTTPD + mod_wsgi
+HEAT_USE_MOD_WSGI=${HEAT_USE_MOD_WSGI:-False}
+
 HEAT_DIR=$DEST/heat
 HEAT_CFNTOOLS_DIR=$DEST/heat-cfntools
 HEAT_TEMPLATES_REPO_DIR=$DEST/heat-templates
@@ -52,6 +56,10 @@
 HEAT_TEMPLATES_DIR=$HEAT_CONF_DIR/templates
 HEAT_API_HOST=${HEAT_API_HOST:-$HOST_IP}
 HEAT_API_PORT=${HEAT_API_PORT:-8004}
+HEAT_SERVICE_USER=${HEAT_SERVICE_USER:-heat}
+HEAT_TRUSTEE_USER=${HEAT_TRUSTEE_USER:-$HEAT_SERVICE_USER}
+HEAT_TRUSTEE_PASSWORD=${HEAT_TRUSTEE_PASSWORD:-$SERVICE_PASSWORD}
+HEAT_TRUSTEE_DOMAIN=${HEAT_TRUSTEE_DOMAIN:-default}
 
 # Support entry points installation of console scripts
 HEAT_BIN_DIR=$(get_python_exec_prefix)
@@ -59,12 +67,20 @@
 # other default options
 if [[ "$HEAT_STANDALONE" = "True" ]]; then
     # for standalone, use defaults which require no service user
-    HEAT_STACK_DOMAIN=`trueorfalse False $HEAT_STACK_DOMAIN`
+    HEAT_STACK_DOMAIN=$(trueorfalse False HEAT_STACK_DOMAIN)
     HEAT_DEFERRED_AUTH=${HEAT_DEFERRED_AUTH:-password}
+    if [[ ${HEAT_DEFERRED_AUTH} != "password" ]]; then
+        # Heat does not support keystone trusts when deployed in
+        # standalone mode
+        die $LINENO \
+            'HEAT_DEFERRED_AUTH can only be set to "password" when HEAT_STANDALONE is True.'
+    fi
 else
-    HEAT_STACK_DOMAIN=`trueorfalse True $HEAT_STACK_DOMAIN`
-    HEAT_DEFERRED_AUTH=${HEAT_DEFERRED_AUTH:-trusts}
+    HEAT_STACK_DOMAIN=$(trueorfalse True HEAT_STACK_DOMAIN)
+    HEAT_DEFERRED_AUTH=${HEAT_DEFERRED_AUTH:-}
 fi
+HEAT_PLUGIN_DIR=${HEAT_PLUGIN_DIR:-$DATA_DIR/heat/plugins}
+ENABLE_HEAT_PLUGINS=${ENABLE_HEAT_PLUGINS:-}
 
 # Functions
 # ---------
@@ -117,31 +133,39 @@
     # logging
     iniset $HEAT_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
     iniset $HEAT_CONF DEFAULT use_syslog $SYSLOG
-    if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
+    if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$HEAT_USE_MOD_WSGI" == "False" ]  ; then
         # Add color to logging output
         setup_colorized_logging $HEAT_CONF DEFAULT tenant user
     fi
 
-    iniset $HEAT_CONF DEFAULT deferred_auth_method $HEAT_DEFERRED_AUTH
+    if [ ! -z "$HEAT_DEFERRED_AUTH" ]; then
+        iniset $HEAT_CONF DEFAULT deferred_auth_method $HEAT_DEFERRED_AUTH
+    fi
 
-    # NOTE(jamielennox): heat re-uses specific values from the
-    # keystone_authtoken middleware group and so currently fails when using the
-    # auth plugin setup. This should be fixed in heat.  Heat is also the only
-    # service that requires the auth_uri to include a /v2.0. Remove this custom
-    # setup when bug #1300246 is resolved.
-    iniset $HEAT_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_URI/v2.0
+    if [ "$HEAT_USE_MOD_WSGI" == "True" ]; then
+        _config_heat_apache_wsgi
+    fi
+
     if [[ "$HEAT_STANDALONE" = "True" ]]; then
         iniset $HEAT_CONF paste_deploy flavor standalone
         iniset $HEAT_CONF clients_heat url "http://$HEAT_API_HOST:$HEAT_API_PORT/v1/%(tenant_id)s"
     else
-        iniset $HEAT_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI
-        iniset $HEAT_CONF keystone_authtoken admin_user heat
-        iniset $HEAT_CONF keystone_authtoken admin_password $SERVICE_PASSWORD
-        iniset $HEAT_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
-        iniset $HEAT_CONF keystone_authtoken cafile $SSL_BUNDLE_FILE
-        iniset $HEAT_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR
+        configure_auth_token_middleware $HEAT_CONF heat $HEAT_AUTH_CACHE_DIR
     fi
 
+    # If HEAT_DEFERRED_AUTH is unset or explicitly set to trusts, configure
+    # the section for the client plugin associated with the trustee
+    if [ -z "$HEAT_DEFERRED_AUTH" -o "trusts" == "$HEAT_DEFERRED_AUTH" ]; then
+        iniset $HEAT_CONF trustee auth_plugin password
+        iniset $HEAT_CONF trustee auth_url $KEYSTONE_AUTH_URI
+        iniset $HEAT_CONF trustee username $HEAT_TRUSTEE_USER
+        iniset $HEAT_CONF trustee password $HEAT_TRUSTEE_PASSWORD
+        iniset $HEAT_CONF trustee user_domain_id $HEAT_TRUSTEE_DOMAIN
+    fi
+
+    # clients_keystone
+    iniset $HEAT_CONF clients_keystone auth_uri $KEYSTONE_AUTH_URI
+
     # ec2authtoken
     iniset $HEAT_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_URI/v2.0
 
@@ -180,6 +204,35 @@
     # copy the default templates
     cp $HEAT_DIR/etc/heat/templates/* $HEAT_TEMPLATES_DIR/
 
+    # Enable heat plugins.
+    # NOTE(nic): The symlink nonsense is necessary because when
+    # plugins are installed in "developer mode", the final component
+    # of their target directory is always "resources", which confuses
+    # Heat's plugin loader into believing that all plugins are named
+    # "resources", and therefore are all the same plugin; so it
+    # will only load one of them.  Linking them all to a common
+    # location with unique names avoids that type of collision,
+    # while still allowing the plugins to be edited in-tree.
+    local err_count=0
+
+    if [ -n "$ENABLE_HEAT_PLUGINS" ]; then
+        mkdir -p $HEAT_PLUGIN_DIR
+        # Clean up cruft from any previous runs
+        rm -f $HEAT_PLUGIN_DIR/*
+        iniset $HEAT_CONF DEFAULT plugin_dirs $HEAT_PLUGIN_DIR
+    fi
+
+    for heat_plugin in $ENABLE_HEAT_PLUGINS; do
+        if [ -d $HEAT_DIR/contrib/$heat_plugin ]; then
+            setup_package $HEAT_DIR/contrib/$heat_plugin -e
+            ln -s $HEAT_DIR/contrib/$heat_plugin/$heat_plugin/resources $HEAT_PLUGIN_DIR/$heat_plugin
+        else
+            : # clear retval on the test so that we can roll up errors
+            err $LINENO "Requested Heat plugin(${heat_plugin}) not found."
+            err_count=$(($err_count + 1))
+        fi
+    done
+    [ $err_count -eq 0 ] || die $LINENO "$err_count of the requested Heat plugins could not be installed."
 }
 
 # init_heat() - Initialize database
@@ -211,6 +264,9 @@
 function install_heat {
     git_clone $HEAT_REPO $HEAT_DIR $HEAT_BRANCH
     setup_develop $HEAT_DIR
+    if [ "$HEAT_USE_MOD_WSGI" == "True" ]; then
+        install_apache_wsgi
+    fi
 }
 
 # install_heat_other() - Collect source and prepare
@@ -226,20 +282,106 @@
 # start_heat() - Start running processes, including screen
 function start_heat {
     run_process h-eng "$HEAT_BIN_DIR/heat-engine --config-file=$HEAT_CONF"
-    run_process h-api "$HEAT_BIN_DIR/heat-api --config-file=$HEAT_CONF"
-    run_process h-api-cfn "$HEAT_BIN_DIR/heat-api-cfn --config-file=$HEAT_CONF"
-    run_process h-api-cw "$HEAT_BIN_DIR/heat-api-cloudwatch --config-file=$HEAT_CONF"
+
+    # If the site is not enabled then we are in a grenade scenario
+    local enabled_site_file
+    enabled_site_file=$(apache_site_config_for heat-api)
+    if [ -f ${enabled_site_file} ] && [ "$HEAT_USE_MOD_WSGI" == "True" ]; then
+        enable_apache_site heat-api
+        enable_apache_site heat-api-cfn
+        enable_apache_site heat-api-cloudwatch
+        restart_apache_server
+        tail_log heat-api /var/log/$APACHE_NAME/heat-api.log
+        tail_log heat-api-cfn /var/log/$APACHE_NAME/heat-api-cfn.log
+        tail_log heat-api-cloudwatch /var/log/$APACHE_NAME/heat-api-cloudwatch.log
+    else
+        run_process h-api "$HEAT_BIN_DIR/heat-api --config-file=$HEAT_CONF"
+        run_process h-api-cfn "$HEAT_BIN_DIR/heat-api-cfn --config-file=$HEAT_CONF"
+        run_process h-api-cw "$HEAT_BIN_DIR/heat-api-cloudwatch --config-file=$HEAT_CONF"
+    fi
 }
 
 # stop_heat() - Stop running processes
 function stop_heat {
     # Kill the screen windows
-    local serv
-    for serv in h-eng h-api h-api-cfn h-api-cw; do
-        stop_process $serv
-    done
+    stop_process h-eng
+
+    if [ "$HEAT_USE_MOD_WSGI" == "True" ]; then
+        disable_apache_site heat-api
+        disable_apache_site heat-api-cfn
+        disable_apache_site heat-api-cloudwatch
+        restart_apache_server
+    else
+        local serv
+        for serv in h-api h-api-cfn h-api-cw; do
+            stop_process $serv
+        done
+    fi
+
 }
 
+# _cleanup_heat_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file
+function _cleanup_heat_apache_wsgi {
+    sudo rm -f $(apache_site_config_for heat-api)
+    sudo rm -f $(apache_site_config_for heat-api-cfn)
+    sudo rm -f $(apache_site_config_for heat-api-cloudwatch)
+}
+
+# _config_heat_apache_wsgi() - Set WSGI config files of Heat
+function _config_heat_apache_wsgi {
+
+    local heat_apache_conf
+    heat_apache_conf=$(apache_site_config_for heat-api)
+    local heat_cfn_apache_conf
+    heat_cfn_apache_conf=$(apache_site_config_for heat-api-cfn)
+    local heat_cloudwatch_apache_conf
+    heat_cloudwatch_apache_conf=$(apache_site_config_for heat-api-cloudwatch)
+    local heat_ssl=""
+    local heat_certfile=""
+    local heat_keyfile=""
+    local heat_api_port=$HEAT_API_PORT
+    local heat_cfn_api_port=$HEAT_API_CFN_PORT
+    local heat_cw_api_port=$HEAT_API_CW_PORT
+    local venv_path=""
+
+    sudo cp $FILES/apache-heat-api.template $heat_apache_conf
+    sudo sed -e "
+        s|%PUBLICPORT%|$heat_api_port|g;
+        s|%APACHE_NAME%|$APACHE_NAME|g;
+        s|%HEAT_BIN_DIR%|$HEAT_BIN_DIR|g;
+        s|%SSLENGINE%|$heat_ssl|g;
+        s|%SSLCERTFILE%|$heat_certfile|g;
+        s|%SSLKEYFILE%|$heat_keyfile|g;
+        s|%USER%|$STACK_USER|g;
+        s|%VIRTUALENV%|$venv_path|g
+    " -i $heat_apache_conf
+
+    sudo cp $FILES/apache-heat-api-cfn.template $heat_cfn_apache_conf
+    sudo sed -e "
+        s|%PUBLICPORT%|$heat_cfn_api_port|g;
+        s|%APACHE_NAME%|$APACHE_NAME|g;
+        s|%HEAT_BIN_DIR%|$HEAT_BIN_DIR|g;
+        s|%SSLENGINE%|$heat_ssl|g;
+        s|%SSLCERTFILE%|$heat_certfile|g;
+        s|%SSLKEYFILE%|$heat_keyfile|g;
+        s|%USER%|$STACK_USER|g;
+        s|%VIRTUALENV%|$venv_path|g
+    " -i $heat_cfn_apache_conf
+
+    sudo cp $FILES/apache-heat-api-cloudwatch.template $heat_cloudwatch_apache_conf
+    sudo sed -e "
+        s|%PUBLICPORT%|$heat_cw_api_port|g;
+        s|%APACHE_NAME%|$APACHE_NAME|g;
+        s|%HEAT_BIN_DIR%|$HEAT_BIN_DIR|g;
+        s|%SSLENGINE%|$heat_ssl|g;
+        s|%SSLCERTFILE%|$heat_certfile|g;
+        s|%SSLKEYFILE%|$heat_keyfile|g;
+        s|%USER%|$STACK_USER|g;
+        s|%VIRTUALENV%|$venv_path|g
+    " -i $heat_cloudwatch_apache_conf
+}
+
+
 # create_heat_accounts() - Set up common required heat accounts
 function create_heat_accounts {
     if [[ "$HEAT_STANDALONE" != "True" ]]; then
@@ -268,28 +410,13 @@
     fi
 
     if [[ "$HEAT_STACK_DOMAIN" == "True" ]]; then
-        # Note we have to pass token/endpoint here because the current endpoint and
-        # version negotiation in OSC means just --os-identity-api-version=3 won't work
-        D_ID=$(openstack --os-token $OS_TOKEN --os-url=$KEYSTONE_SERVICE_URI_V3 \
-            --os-identity-api-version=3 domain list | grep ' heat ' | get_field 1)
-
-        if [[ -z "$D_ID" ]]; then
-            D_ID=$(openstack --os-token $OS_TOKEN --os-url=$KEYSTONE_SERVICE_URI_V3 \
-                --os-identity-api-version=3 domain create heat \
-                --description "Owns users and projects created by heat" \
-                | grep ' id ' | get_field 2)
-            iniset $HEAT_CONF DEFAULT stack_user_domain_id ${D_ID}
-
-            openstack --os-token $OS_TOKEN --os-url=$KEYSTONE_SERVICE_URI_V3 \
-                --os-identity-api-version=3 user create --password $SERVICE_PASSWORD \
-                --domain $D_ID heat_domain_admin \
-                --description "Manages users and projects created by heat"
-            openstack --os-token $OS_TOKEN --os-url=$KEYSTONE_SERVICE_URI_V3 \
-                --os-identity-api-version=3 role add \
-                --user heat_domain_admin --domain ${D_ID} admin
-            iniset $HEAT_CONF DEFAULT stack_domain_admin heat_domain_admin
-            iniset $HEAT_CONF DEFAULT stack_domain_admin_password $SERVICE_PASSWORD
-        fi
+        # domain -> heat and user -> heat_domain_admin
+        domain_id=$(get_or_create_domain heat 'Owns users and projects created by heat')
+        iniset $HEAT_CONF DEFAULT stack_user_domain_id ${domain_id}
+        get_or_create_user heat_domain_admin $SERVICE_PASSWORD heat
+        get_or_add_user_domain_role admin heat_domain_admin heat
+        iniset $HEAT_CONF DEFAULT stack_domain_admin heat_domain_admin
+        iniset $HEAT_CONF DEFAULT stack_domain_admin_password $SERVICE_PASSWORD
     fi
 }
 
@@ -321,7 +448,8 @@
 
     echo "</body></html>" >> $HEAT_PIP_REPO/index.html
 
-    local heat_pip_repo_apache_conf=$(apache_site_config_for heat_pip_repo)
+    local heat_pip_repo_apache_conf
+    heat_pip_repo_apache_conf=$(apache_site_config_for heat_pip_repo)
 
     sudo cp $FILES/apache-heat-pip-repo.template $heat_pip_repo_apache_conf
     sudo sed -e "
@@ -335,7 +463,7 @@
 }
 
 # Restore xtrace
-$XTRACE
+$_XTRACE_HEAT
 
 # Tell emacs to use shell-script-mode
 ## Local variables:
diff --git a/lib/horizon b/lib/horizon
index b2539d1..67181fc 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -19,7 +19,7 @@
 # - cleanup_horizon
 
 # Save trace setting
-XTRACE=$(set +o | grep xtrace)
+_XTRACE_HORIZON=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -49,7 +49,8 @@
         sed -e "/^$option/d" -i $local_settings
         echo -e "\n$option=$value" >> $file
     elif grep -q "^$section" $file; then
-        local line=$(sed -ne "/^$section/,/^}/ { /^ *'$option':/ p; }" $file)
+        local line
+        line=$(sed -ne "/^$section/,/^}/ { /^ *'$option':/ p; }" $file)
         if [ -n "$line" ]; then
             sed -i -e "/^$section/,/^}/ s/^\( *'$option'\) *:.*$/\1: $value,/" $file
         else
@@ -68,7 +69,8 @@
 # cleanup_horizon() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
 function cleanup_horizon {
-    local horizon_conf=$(apache_site_config_for horizon)
+    local horizon_conf
+    horizon_conf=$(apache_site_config_for horizon)
     sudo rm -f $horizon_conf
 }
 
@@ -97,13 +99,8 @@
 
     _horizon_config_set $local_settings "" OPENSTACK_HOST \"${KEYSTONE_SERVICE_HOST}\"
 
-    if [ "$ENABLE_IDENTITY_V2" == "False" ]; then
-        # Only Identity v3 API is available; then use it with v3 auth tokens
-        _horizon_config_set $local_settings "" OPENSTACK_API_VERSIONS {\"identity\":3}
-        _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}:${KEYSTONE_SERVICE_PORT}/v3\""
-    else
-        _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}:${KEYSTONE_SERVICE_PORT}/v2.0\""
-    fi
+    _horizon_config_set $local_settings "" OPENSTACK_API_VERSIONS {\"identity\":3}
+    _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}:${KEYSTONE_SERVICE_PORT}/v3\""
 
     if [ -f $SSL_BUNDLE_FILE ]; then
         _horizon_config_set $local_settings "" OPENSTACK_SSL_CACERT \"${SSL_BUNDLE_FILE}\"
@@ -112,7 +109,8 @@
     # Create an empty directory that apache uses as docroot
     sudo mkdir -p $HORIZON_DIR/.blackhole
 
-    local horizon_conf=$(apache_site_config_for horizon)
+    local horizon_conf
+    horizon_conf=$(apache_site_config_for horizon)
 
     # Configure apache to run horizon
     sudo sh -c "sed -e \"
@@ -195,7 +193,7 @@
 
 
 # Restore xtrace
-$XTRACE
+$_XTRACE_HORIZON
 
 # Tell emacs to use shell-script-mode
 ## Local variables:
diff --git a/lib/infra b/lib/infra
index ab32efe..cf003cc 100644
--- a/lib/infra
+++ b/lib/infra
@@ -15,7 +15,7 @@
 # - install_infra
 
 # Save trace setting
-XTRACE=$(set +o | grep xtrace)
+_XTRACE_INFRA=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -50,7 +50,7 @@
 }
 
 # Restore xtrace
-$XTRACE
+$_XTRACE_INFRA
 
 # Tell emacs to use shell-script-mode
 ## Local variables:
diff --git a/lib/ironic b/lib/ironic
deleted file mode 100644
index 40475e0..0000000
--- a/lib/ironic
+++ /dev/null
@@ -1,843 +0,0 @@
-#!/bin/bash
-#
-# lib/ironic
-# Functions to control the configuration and operation of the **Ironic** service
-
-# Dependencies:
-#
-# - ``functions`` file
-# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
-# - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined
-# - ``SERVICE_HOST``
-# - ``KEYSTONE_TOKEN_FORMAT`` must be defined
-
-# ``stack.sh`` calls the entry points in this order:
-#
-# - install_ironic
-# - install_ironicclient
-# - init_ironic
-# - start_ironic
-# - stop_ironic
-# - cleanup_ironic
-
-# Save trace and pipefail settings
-XTRACE=$(set +o | grep xtrace)
-PIPEFAIL=$(set +o | grep pipefail)
-set +o xtrace
-set +o pipefail
-
-# Defaults
-# --------
-
-# Set up default directories
-GITDIR["python-ironicclient"]=$DEST/python-ironicclient
-GITDIR["ironic-lib"]=$DEST/ironic-lib
-
-IRONIC_DIR=$DEST/ironic
-IRONIC_PYTHON_AGENT_DIR=$DEST/ironic-python-agent
-IRONIC_DATA_DIR=$DATA_DIR/ironic
-IRONIC_STATE_PATH=/var/lib/ironic
-IRONIC_AUTH_CACHE_DIR=${IRONIC_AUTH_CACHE_DIR:-/var/cache/ironic}
-IRONIC_CONF_DIR=${IRONIC_CONF_DIR:-/etc/ironic}
-IRONIC_CONF_FILE=$IRONIC_CONF_DIR/ironic.conf
-IRONIC_ROOTWRAP_CONF=$IRONIC_CONF_DIR/rootwrap.conf
-IRONIC_POLICY_JSON=$IRONIC_CONF_DIR/policy.json
-
-# Deploy callback timeout can be changed from its default (1800), if required.
-IRONIC_CALLBACK_TIMEOUT=${IRONIC_CALLBACK_TIMEOUT:-}
-
-# Deploy to hardware platform
-IRONIC_HW_NODE_CPU=${IRONIC_HW_NODE_CPU:-1}
-IRONIC_HW_NODE_RAM=${IRONIC_HW_NODE_RAM:-512}
-IRONIC_HW_NODE_DISK=${IRONIC_HW_NODE_DISK:-10}
-IRONIC_HW_EPHEMERAL_DISK=${IRONIC_HW_EPHEMERAL_DISK:-0}
-# The file is composed of multiple lines, each line includes four field
-# separated by white space: IPMI address, MAC address, IPMI username
-# and IPMI password.
-#
-#   192.168.110.107 00:1e:67:57:50:4c root otc123
-IRONIC_IPMIINFO_FILE=${IRONIC_IPMIINFO_FILE:-$IRONIC_DATA_DIR/hardware_info}
-
-# Set up defaults for functional / integration testing
-IRONIC_NODE_UUID=${IRONIC_NODE_UUID:-`uuidgen`}
-IRONIC_SCRIPTS_DIR=${IRONIC_SCRIPTS_DIR:-$TOP_DIR/tools/ironic/scripts}
-IRONIC_TEMPLATES_DIR=${IRONIC_TEMPLATES_DIR:-$TOP_DIR/tools/ironic/templates}
-IRONIC_BAREMETAL_BASIC_OPS=$(trueorfalse False IRONIC_BAREMETAL_BASIC_OPS)
-IRONIC_ENABLED_DRIVERS=${IRONIC_ENABLED_DRIVERS:-fake,pxe_ssh,pxe_ipmitool}
-IRONIC_SSH_USERNAME=${IRONIC_SSH_USERNAME:-`whoami`}
-IRONIC_SSH_TIMEOUT=${IRONIC_SSH_TIMEOUT:-15}
-IRONIC_SSH_KEY_DIR=${IRONIC_SSH_KEY_DIR:-$IRONIC_DATA_DIR/ssh_keys}
-IRONIC_SSH_KEY_FILENAME=${IRONIC_SSH_KEY_FILENAME:-ironic_key}
-IRONIC_KEY_FILE=${IRONIC_KEY_FILE:-$IRONIC_SSH_KEY_DIR/$IRONIC_SSH_KEY_FILENAME}
-IRONIC_SSH_VIRT_TYPE=${IRONIC_SSH_VIRT_TYPE:-virsh}
-IRONIC_TFTPBOOT_DIR=${IRONIC_TFTPBOOT_DIR:-$IRONIC_DATA_DIR/tftpboot}
-IRONIC_TFTPSERVER_IP=${IRONIC_TFTPSERVER_IP:-$HOST_IP}
-IRONIC_VM_SSH_PORT=${IRONIC_VM_SSH_PORT:-22}
-IRONIC_VM_SSH_ADDRESS=${IRONIC_VM_SSH_ADDRESS:-$HOST_IP}
-IRONIC_VM_COUNT=${IRONIC_VM_COUNT:-1}
-IRONIC_VM_SPECS_CPU=${IRONIC_VM_SPECS_CPU:-1}
-IRONIC_VM_SPECS_RAM=${IRONIC_VM_SPECS_RAM:-512}
-IRONIC_VM_SPECS_DISK=${IRONIC_VM_SPECS_DISK:-10}
-IRONIC_VM_EPHEMERAL_DISK=${IRONIC_VM_EPHEMERAL_DISK:-0}
-IRONIC_VM_EMULATOR=${IRONIC_VM_EMULATOR:-/usr/bin/qemu-system-x86_64}
-IRONIC_VM_NETWORK_BRIDGE=${IRONIC_VM_NETWORK_BRIDGE:-brbm}
-IRONIC_VM_NETWORK_RANGE=${IRONIC_VM_NETWORK_RANGE:-192.0.2.0/24}
-IRONIC_VM_MACS_CSV_FILE=${IRONIC_VM_MACS_CSV_FILE:-$IRONIC_DATA_DIR/ironic_macs.csv}
-IRONIC_AUTHORIZED_KEYS_FILE=${IRONIC_AUTHORIZED_KEYS_FILE:-$HOME/.ssh/authorized_keys}
-
-# By default, baremetal VMs will console output to file.
-IRONIC_VM_LOG_CONSOLE=${IRONIC_VM_LOG_CONSOLE:-True}
-IRONIC_VM_LOG_DIR=${IRONIC_VM_LOG_DIR:-$IRONIC_DATA_DIR/logs/}
-
-# Use DIB to create deploy ramdisk and kernel.
-IRONIC_BUILD_DEPLOY_RAMDISK=$(trueorfalse True IRONIC_BUILD_DEPLOY_RAMDISK)
-# If not use DIB, these files are used as deploy ramdisk/kernel.
-# (The value must be a absolute path)
-IRONIC_DEPLOY_RAMDISK=${IRONIC_DEPLOY_RAMDISK:-}
-IRONIC_DEPLOY_KERNEL=${IRONIC_DEPLOY_KERNEL:-}
-IRONIC_DEPLOY_ELEMENT=${IRONIC_DEPLOY_ELEMENT:-deploy-ironic}
-
-IRONIC_AGENT_KERNEL_URL=${IRONIC_AGENT_KERNEL_URL:-http://tarballs.openstack.org/ironic-python-agent/coreos/files/coreos_production_pxe.vmlinuz}
-IRONIC_AGENT_RAMDISK_URL=${IRONIC_AGENT_RAMDISK_URL:-http://tarballs.openstack.org/ironic-python-agent/coreos/files/coreos_production_pxe_image-oem.cpio.gz}
-
-# Which deploy driver to use - valid choices right now
-# are ``pxe_ssh``, ``pxe_ipmitool``, ``agent_ssh`` and ``agent_ipmitool``.
-IRONIC_DEPLOY_DRIVER=${IRONIC_DEPLOY_DRIVER:-pxe_ssh}
-
-# TODO(agordeev): replace 'ubuntu' with host distro name getting
-IRONIC_DEPLOY_FLAVOR=${IRONIC_DEPLOY_FLAVOR:-ubuntu $IRONIC_DEPLOY_ELEMENT}
-
-# Support entry points installation of console scripts
-IRONIC_BIN_DIR=$(get_python_exec_prefix)
-
-# Ironic connection info.  Note the port must be specified.
-IRONIC_SERVICE_PROTOCOL=http
-IRONIC_SERVICE_PORT=${IRONIC_SERVICE_PORT:-6385}
-IRONIC_HOSTPORT=${IRONIC_HOSTPORT:-$SERVICE_HOST:$IRONIC_SERVICE_PORT}
-
-# Enable iPXE
-IRONIC_IPXE_ENABLED=$(trueorfalse False IRONIC_IPXE_ENABLED)
-IRONIC_HTTP_DIR=${IRONIC_HTTP_DIR:-$IRONIC_DATA_DIR/httpboot}
-IRONIC_HTTP_SERVER=${IRONIC_HTTP_SERVER:-$HOST_IP}
-IRONIC_HTTP_PORT=${IRONIC_HTTP_PORT:-8088}
-
-# NOTE(lucasagomes): This flag is used to differentiate the nodes that
-# uses IPA as their deploy ramdisk from nodes that uses the agent_* drivers
-# (which also uses IPA but depends on Swift Temp URLs to work). At present,
-# all drivers that uses the iSCSI approach for their deployment supports
-# using both, IPA or bash ramdisks for the deployment. In the future we
-# want to remove the support for the bash ramdisk in favor of IPA, once
-# we get there this flag can be removed, and all conditionals that uses
-# it should just run by default.
-IRONIC_DEPLOY_DRIVER_ISCSI_WITH_IPA=$(trueorfalse False IRONIC_DEPLOY_DRIVER_ISCSI_WITH_IPA)
-
-# get_pxe_boot_file() - Get the PXE/iPXE boot file path
-function get_pxe_boot_file {
-    local relpath=syslinux/pxelinux.0
-    if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then
-        relpath=ipxe/undionly.kpxe
-    fi
-
-    local pxe_boot_file
-    if is_ubuntu; then
-        pxe_boot_file=/usr/lib/$relpath
-    elif is_fedora || is_suse; then
-        pxe_boot_file=/usr/share/$relpath
-    fi
-
-    echo $pxe_boot_file
-}
-
-# PXE boot image
-IRONIC_PXE_BOOT_IMAGE=${IRONIC_PXE_BOOT_IMAGE:-$(get_pxe_boot_file)}
-
-
-# Functions
-# ---------
-
-# Test if any Ironic services are enabled
-# is_ironic_enabled
-function is_ironic_enabled {
-    [[ ,${ENABLED_SERVICES} =~ ,"ir-" ]] && return 0
-    return 1
-}
-
-function is_ironic_hardware {
-    is_ironic_enabled && [[ -n "${IRONIC_DEPLOY_DRIVER##*_ssh}" ]] && return 0
-    return 1
-}
-
-function is_deployed_by_agent {
-    [[ -z "${IRONIC_DEPLOY_DRIVER%%agent*}" ]] && return 0
-    return 1
-}
-
-function is_deployed_with_ipa_ramdisk {
-    is_deployed_by_agent || [[ "$IRONIC_DEPLOY_DRIVER_ISCSI_WITH_IPA" == "True" ]] && return 0
-    return 1
-}
-
-# install_ironic() - Collect source and prepare
-function install_ironic {
-    # make sure all needed service were enabled
-    local req_services="key"
-    if [[ "$VIRT_DRIVER" == "ironic" ]]; then
-        req_services+=" nova glance neutron"
-    fi
-    for srv in $req_services; do
-        if ! is_service_enabled "$srv"; then
-            die $LINENO "$srv should be enabled for Ironic."
-        fi
-    done
-
-    if use_library_from_git "ironic-lib"; then
-        git_clone_by_name "ironic-lib"
-        setup_dev_lib "ironic-lib"
-    fi
-
-    git_clone $IRONIC_REPO $IRONIC_DIR $IRONIC_BRANCH
-    setup_develop $IRONIC_DIR
-
-    if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then
-        install_apache_wsgi
-    fi
-}
-
-# install_ironicclient() - Collect sources and prepare
-function install_ironicclient {
-    if use_library_from_git "python-ironicclient"; then
-        git_clone_by_name "python-ironicclient"
-        setup_dev_lib "python-ironicclient"
-        sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-ironicclient"]}/tools/,/etc/bash_completion.d/}ironic.bash_completion
-    else
-        # nothing actually "requires" ironicclient, so force instally from pypi
-        pip_install_gr python-ironicclient
-    fi
-}
-
-# _cleanup_ironic_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file
-function _cleanup_ironic_apache_wsgi {
-    sudo rm -rf $IRONIC_HTTP_DIR
-    disable_apache_site ironic
-    sudo rm -f $(apache_site_config_for ironic)
-    restart_apache_server
-}
-
-# _config_ironic_apache_wsgi() - Set WSGI config files of Ironic
-function _config_ironic_apache_wsgi {
-    local ironic_apache_conf=$(apache_site_config_for ironic)
-    sudo cp $FILES/apache-ironic.template $ironic_apache_conf
-    sudo sed -e "
-        s|%PUBLICPORT%|$IRONIC_HTTP_PORT|g;
-        s|%HTTPROOT%|$IRONIC_HTTP_DIR|g;
-    " -i $ironic_apache_conf
-    enable_apache_site ironic
-}
-
-# cleanup_ironic() - Remove residual data files, anything left over from previous
-# runs that would need to clean up.
-function cleanup_ironic {
-    sudo rm -rf $IRONIC_AUTH_CACHE_DIR $IRONIC_CONF_DIR
-}
-
-# configure_ironic_dirs() - Create all directories required by Ironic and
-# associated services.
-function configure_ironic_dirs {
-    sudo install -d -o $STACK_USER $IRONIC_CONF_DIR $STACK_USER $IRONIC_DATA_DIR \
-        $IRONIC_STATE_PATH $IRONIC_TFTPBOOT_DIR $IRONIC_TFTPBOOT_DIR/pxelinux.cfg
-    sudo chown -R $STACK_USER:$LIBVIRT_GROUP $IRONIC_TFTPBOOT_DIR
-
-    if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then
-        sudo install -d -o $STACK_USER -g $LIBVIRT_GROUP $IRONIC_HTTP_DIR
-    fi
-
-    if [ ! -f $IRONIC_PXE_BOOT_IMAGE ]; then
-        die $LINENO "PXE boot file $IRONIC_PXE_BOOT_IMAGE not found."
-    fi
-
-    # Copy PXE binary
-    if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then
-        cp $IRONIC_PXE_BOOT_IMAGE $IRONIC_TFTPBOOT_DIR
-    else
-        # Syslinux >= 5.00 pxelinux.0 binary is not "stand-alone" anymore,
-        # it depends on some c32 modules to work correctly.
-        # More info: http://www.syslinux.org/wiki/index.php/Library_modules
-        cp -aR $(dirname $IRONIC_PXE_BOOT_IMAGE)/*.{c32,0} $IRONIC_TFTPBOOT_DIR
-    fi
-}
-
-# configure_ironic() - Set config files, create data dirs, etc
-function configure_ironic {
-    configure_ironic_dirs
-
-    # Copy over ironic configuration file and configure common parameters.
-    cp $IRONIC_DIR/etc/ironic/ironic.conf.sample $IRONIC_CONF_FILE
-    iniset $IRONIC_CONF_FILE DEFAULT debug True
-    inicomment $IRONIC_CONF_FILE DEFAULT log_file
-    iniset $IRONIC_CONF_FILE database connection `database_connection_url ironic`
-    iniset $IRONIC_CONF_FILE DEFAULT state_path $IRONIC_STATE_PATH
-    iniset $IRONIC_CONF_FILE DEFAULT use_syslog $SYSLOG
-    # Configure Ironic conductor, if it was enabled.
-    if is_service_enabled ir-cond; then
-        configure_ironic_conductor
-    fi
-
-    # Configure Ironic API, if it was enabled.
-    if is_service_enabled ir-api; then
-        configure_ironic_api
-    fi
-
-    # Format logging
-    if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then
-        setup_colorized_logging $IRONIC_CONF_FILE DEFAULT tenant user
-    fi
-
-    if [[ "$IRONIC_IPXE_ENABLED" == "True" ]]; then
-        _config_ironic_apache_wsgi
-    fi
-}
-
-# configure_ironic_api() - Is used by configure_ironic(). Performs
-# API specific configuration.
-function configure_ironic_api {
-    iniset $IRONIC_CONF_FILE DEFAULT auth_strategy keystone
-    iniset $IRONIC_CONF_FILE oslo_policy policy_file $IRONIC_POLICY_JSON
-
-    # TODO(Yuki Nishiwaki): This is a temporary work-around until Ironic is fixed(bug#1422632).
-    # These codes need to be changed to use the function of configure_auth_token_middleware
-    # after Ironic conforms to the new auth plugin.
-    iniset $IRONIC_CONF_FILE keystone_authtoken identity_uri $KEYSTONE_AUTH_URI
-    iniset $IRONIC_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_URI/v2.0
-    iniset $IRONIC_CONF_FILE keystone_authtoken admin_user ironic
-    iniset $IRONIC_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD
-    iniset $IRONIC_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME
-    iniset $IRONIC_CONF_FILE keystone_authtoken cafile $SSL_BUNDLE_FILE
-    iniset $IRONIC_CONF_FILE keystone_authtoken signing_dir $IRONIC_AUTH_CACHE_DIR/api
-
-    iniset_rpc_backend ironic $IRONIC_CONF_FILE
-    iniset $IRONIC_CONF_FILE api port $IRONIC_SERVICE_PORT
-
-    cp -p $IRONIC_DIR/etc/ironic/policy.json $IRONIC_POLICY_JSON
-}
-
-# configure_ironic_conductor() - Is used by configure_ironic().
-# Sets conductor specific settings.
-function configure_ironic_conductor {
-    cp $IRONIC_DIR/etc/ironic/rootwrap.conf $IRONIC_ROOTWRAP_CONF
-    cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_CONF_DIR
-    local ironic_rootwrap=$(get_rootwrap_location ironic)
-    local rootwrap_isudoer_cmd="$ironic_rootwrap $IRONIC_CONF_DIR/rootwrap.conf *"
-
-    # Set up the rootwrap sudoers for ironic
-    local tempfile=`mktemp`
-    echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_isudoer_cmd" >$tempfile
-    chmod 0440 $tempfile
-    sudo chown root:root $tempfile
-    sudo mv $tempfile /etc/sudoers.d/ironic-rootwrap
-
-    iniset $IRONIC_CONF_FILE DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF
-    iniset $IRONIC_CONF_FILE DEFAULT enabled_drivers $IRONIC_ENABLED_DRIVERS
-    iniset $IRONIC_CONF_FILE conductor api_url $IRONIC_SERVICE_PROTOCOL://$HOST_IP:$IRONIC_SERVICE_PORT
-    if [[ -n "$IRONIC_CALLBACK_TIMEOUT" ]]; then
-        iniset $IRONIC_CONF_FILE conductor deploy_callback_timeout $IRONIC_CALLBACK_TIMEOUT
-    fi
-    iniset $IRONIC_CONF_FILE pxe tftp_server $IRONIC_TFTPSERVER_IP
-    iniset $IRONIC_CONF_FILE pxe tftp_root $IRONIC_TFTPBOOT_DIR
-    iniset $IRONIC_CONF_FILE pxe tftp_master_path $IRONIC_TFTPBOOT_DIR/master_images
-
-    local pxe_params=""
-    if [[ "$IRONIC_VM_LOG_CONSOLE" == "True" ]] ; then
-        pxe_params+="nofb nomodeset vga=normal console=ttyS0"
-        if is_deployed_with_ipa_ramdisk; then
-            pxe_params+=" systemd.journald.forward_to_console=yes"
-        fi
-    fi
-    # When booting with less than 1GB, we need to switch from default tmpfs
-    # to ramfs for ramdisks to decompress successfully.
-    if (is_ironic_hardware && [[ "$IRONIC_HW_NODE_RAM" -lt 1024 ]]) ||
-        (! is_ironic_hardware && [[ "$IRONIC_VM_SPECS_RAM" -lt 1024 ]]); then
-        pxe_params+=" rootfstype=ramfs"
-    fi
-    if [[ -n "$pxe_params" ]]; then
-        iniset $IRONIC_CONF_FILE pxe pxe_append_params "$pxe_params"
-    fi
-
-    if is_deployed_by_agent; then
-        if [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]] ; then
-            iniset $IRONIC_CONF_FILE glance swift_temp_url_key $SWIFT_TEMPURL_KEY
-        else
-            die $LINENO "SWIFT_ENABLE_TEMPURLS must be True to use agent_ssh driver in Ironic."
-        fi
-        iniset $IRONIC_CONF_FILE glance swift_endpoint_url http://${HOST_IP}:${SWIFT_DEFAULT_BIND_PORT:-8080}
-        iniset $IRONIC_CONF_FILE glance swift_api_version v1
-        local tenant_id=$(get_or_create_project $SERVICE_TENANT_NAME default)
-        iniset $IRONIC_CONF_FILE glance swift_account AUTH_${tenant_id}
-        iniset $IRONIC_CONF_FILE glance swift_container glance
-        iniset $IRONIC_CONF_FILE glance swift_temp_url_duration 3600
-        iniset $IRONIC_CONF_FILE agent heartbeat_timeout 30
-        iniset $IRONIC_CONF_FILE agent agent_erase_devices_priority 0
-    fi
-
-    if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then
-        local pxebin=`basename $IRONIC_PXE_BOOT_IMAGE`
-        iniset $IRONIC_CONF_FILE pxe ipxe_enabled True
-        iniset $IRONIC_CONF_FILE pxe pxe_config_template '\$pybasedir/drivers/modules/ipxe_config.template'
-        iniset $IRONIC_CONF_FILE pxe pxe_bootfile_name $pxebin
-        iniset $IRONIC_CONF_FILE pxe http_root $IRONIC_HTTP_DIR
-        iniset $IRONIC_CONF_FILE pxe http_url "http://$IRONIC_HTTP_SERVER:$IRONIC_HTTP_PORT"
-    fi
-}
-
-# create_ironic_cache_dir() - Part of the init_ironic() process
-function create_ironic_cache_dir {
-    # Create cache dir
-    sudo mkdir -p $IRONIC_AUTH_CACHE_DIR/api
-    sudo chown $STACK_USER $IRONIC_AUTH_CACHE_DIR/api
-    rm -f $IRONIC_AUTH_CACHE_DIR/api/*
-    sudo mkdir -p $IRONIC_AUTH_CACHE_DIR/registry
-    sudo chown $STACK_USER $IRONIC_AUTH_CACHE_DIR/registry
-    rm -f $IRONIC_AUTH_CACHE_DIR/registry/*
-}
-
-# create_ironic_accounts() - Set up common required ironic accounts
-
-# Tenant               User       Roles
-# ------------------------------------------------------------------
-# service              ironic     admin        # if enabled
-function create_ironic_accounts {
-
-    # Ironic
-    if [[ "$ENABLED_SERVICES" =~ "ir-api" ]]; then
-        # Get ironic user if exists
-
-        # NOTE(Shrews): This user MUST have admin level privileges!
-        create_service_user "ironic" "admin"
-
-        if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
-
-            get_or_create_service "ironic" "baremetal" "Ironic baremetal provisioning service"
-            get_or_create_endpoint "baremetal" \
-                "$REGION_NAME" \
-                "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" \
-                "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" \
-                "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT"
-        fi
-    fi
-}
-
-
-# init_ironic() - Initialize databases, etc.
-function init_ironic {
-    # Save private network as cleaning network
-    local cleaning_network_uuid
-    cleaning_network_uuid=$(neutron net-list | grep private | get_field 1)
-    iniset $IRONIC_CONF_FILE neutron cleaning_network_uuid ${cleaning_network_uuid}
-
-    # (Re)create  ironic database
-    recreate_database ironic
-
-    # Migrate ironic database
-    $IRONIC_BIN_DIR/ironic-dbsync --config-file=$IRONIC_CONF_FILE
-
-    create_ironic_cache_dir
-}
-
-# _ironic_bm_vm_names() - Generates list of names for baremetal VMs.
-function _ironic_bm_vm_names {
-    local idx
-    local num_vms=$(($IRONIC_VM_COUNT - 1))
-    for idx in $(seq 0 $num_vms); do
-        echo "baremetal${IRONIC_VM_NETWORK_BRIDGE}_${idx}"
-    done
-}
-
-# start_ironic() - Start running processes, including screen
-function start_ironic {
-    # Start Ironic API server, if enabled.
-    if is_service_enabled ir-api; then
-        start_ironic_api
-    fi
-
-    # Start Ironic conductor, if enabled.
-    if is_service_enabled ir-cond; then
-        start_ironic_conductor
-    fi
-
-    # Start Apache if iPXE is enabled
-    if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then
-        restart_apache_server
-    fi
-}
-
-# start_ironic_api() - Used by start_ironic().
-# Starts Ironic API server.
-function start_ironic_api {
-    run_process ir-api "$IRONIC_BIN_DIR/ironic-api --config-file=$IRONIC_CONF_FILE"
-    echo "Waiting for ir-api ($IRONIC_HOSTPORT) to start..."
-    if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- $IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT; do sleep 1; done"; then
-        die $LINENO "ir-api did not start"
-    fi
-}
-
-# start_ironic_conductor() - Used by start_ironic().
-# Starts Ironic conductor.
-function start_ironic_conductor {
-    run_process ir-cond "$IRONIC_BIN_DIR/ironic-conductor --config-file=$IRONIC_CONF_FILE"
-    # TODO(romcheg): Find a way to check whether the conductor has started.
-}
-
-# stop_ironic() - Stop running processes
-function stop_ironic {
-    stop_process ir-api
-    stop_process ir-cond
-
-    # Cleanup the WSGI files
-    if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then
-        _cleanup_ironic_apache_wsgi
-    fi
-}
-
-function create_ovs_taps {
-    local ironic_net_id=$(neutron net-list | grep private | get_field 1)
-
-    # Work around: No netns exists on host until a Neutron port is created.  We
-    # need to create one in Neutron to know what netns to tap into prior to the
-    # first node booting.
-    local port_id=$(neutron port-create private | grep " id " | get_field 2)
-
-    # intentional sleep to make sure the tag has been set to port
-    sleep 10
-
-    if  [[ "$Q_USE_NAMESPACE" = "True" ]]; then
-        local tapdev=$(sudo ip netns exec qdhcp-${ironic_net_id} ip link list | grep " tap" | cut -d':' -f2 | cut -d'@' -f1 | cut -b2-)
-    else
-        local tapdev=$(sudo ip link list | grep " tap" | cut -d':' -f2 | cut -d'@' -f1 | cut -b2-)
-    fi
-    local tag_id=$(sudo ovs-vsctl show |grep ${tapdev} -A1 -m1 | grep tag | cut -d':' -f2 | cut -b2-)
-
-    # make sure veth pair is not existing, otherwise delete its links
-    sudo ip link show ovs-tap1 && sudo ip link delete ovs-tap1
-    sudo ip link show brbm-tap1 && sudo ip link delete brbm-tap1
-    # create veth pair for future interconnection between br-int and brbm
-    sudo ip link add brbm-tap1 type veth peer name ovs-tap1
-    sudo ip link set dev brbm-tap1 up
-    sudo ip link set dev ovs-tap1 up
-
-    sudo ovs-vsctl -- --if-exists del-port ovs-tap1 -- add-port br-int ovs-tap1 tag=$tag_id
-    sudo ovs-vsctl -- --if-exists del-port brbm-tap1 -- add-port $IRONIC_VM_NETWORK_BRIDGE brbm-tap1
-
-    # Remove the port needed only for workaround.
-    neutron port-delete $port_id
-
-    # Finally, share the fixed tenant network across all tenants.  This allows the host
-    # to serve TFTP to a single network namespace via the tap device created above.
-    neutron net-update $ironic_net_id --shared true
-}
-
-function create_bridge_and_vms {
-    # Call libvirt setup scripts in a new shell to ensure any new group membership
-    sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/setup-network"
-    if [[ "$IRONIC_VM_LOG_CONSOLE" == "True" ]] ; then
-        local log_arg="$IRONIC_VM_LOG_DIR"
-    else
-        local log_arg=""
-    fi
-    local vm_name
-    for vm_name in $(_ironic_bm_vm_names); do
-        sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/create-node $vm_name \
-            $IRONIC_VM_SPECS_CPU $IRONIC_VM_SPECS_RAM $IRONIC_VM_SPECS_DISK \
-            amd64 $IRONIC_VM_NETWORK_BRIDGE $IRONIC_VM_EMULATOR \
-            $log_arg" >> $IRONIC_VM_MACS_CSV_FILE
-    done
-    create_ovs_taps
-}
-
-function wait_for_nova_resources {
-    # After nodes have been enrolled, we need to wait for both ironic and
-    # nova's periodic tasks to populate the resource tracker with available
-    # nodes and resources. Wait up to 2 minutes for a given resource before
-    # timing out.
-    local resource=$1
-    local expected_count=$2
-    echo_summary "Waiting 2 minutes for Nova resource tracker to pick up $resource >= $expected_count"
-    for i in $(seq 1 120); do
-        if [ $(nova hypervisor-stats | grep " $resource " | get_field 2) -ge $expected_count ]; then
-            return 0
-        fi
-        sleep 1
-    done
-    die $LINENO "Timed out waiting for Nova hypervisor-stats $resource >= $expected_count"
-}
-
-function enroll_nodes {
-    local chassis_id=$(ironic chassis-create -d "ironic test chassis" | grep " uuid " | get_field 2)
-
-    if ! is_ironic_hardware; then
-        local ironic_node_cpu=$IRONIC_VM_SPECS_CPU
-        local ironic_node_ram=$IRONIC_VM_SPECS_RAM
-        local ironic_node_disk=$IRONIC_VM_SPECS_DISK
-        local ironic_ephemeral_disk=$IRONIC_VM_EPHEMERAL_DISK
-        local ironic_hwinfo_file=$IRONIC_VM_MACS_CSV_FILE
-        local node_options="\
-            -i deploy_kernel=$IRONIC_DEPLOY_KERNEL_ID \
-            -i deploy_ramdisk=$IRONIC_DEPLOY_RAMDISK_ID \
-            -i ssh_virt_type=$IRONIC_SSH_VIRT_TYPE \
-            -i ssh_address=$IRONIC_VM_SSH_ADDRESS \
-            -i ssh_port=$IRONIC_VM_SSH_PORT \
-            -i ssh_username=$IRONIC_SSH_USERNAME \
-            -i ssh_key_filename=$IRONIC_KEY_FILE"
-    else
-        local ironic_node_cpu=$IRONIC_HW_NODE_CPU
-        local ironic_node_ram=$IRONIC_HW_NODE_RAM
-        local ironic_node_disk=$IRONIC_HW_NODE_DISK
-        local ironic_ephemeral_disk=$IRONIC_HW_EPHEMERAL_DISK
-        if [[ -z "${IRONIC_DEPLOY_DRIVER##*_ipmitool}" ]]; then
-            local ironic_hwinfo_file=$IRONIC_IPMIINFO_FILE
-        fi
-    fi
-
-    local total_nodes=0
-    local total_cpus=0
-    while read hardware_info; do
-        if ! is_ironic_hardware; then
-            local mac_address=$hardware_info
-        elif [[ -z "${IRONIC_DEPLOY_DRIVER##*_ipmitool}" ]]; then
-            local ipmi_address=$(echo $hardware_info |awk  '{print $1}')
-            local mac_address=$(echo $hardware_info |awk '{print $2}')
-            local ironic_ipmi_username=$(echo $hardware_info |awk '{print $3}')
-            local ironic_ipmi_passwd=$(echo $hardware_info |awk '{print $4}')
-            # Currently we require all hardware platform have same CPU/RAM/DISK info
-            # in future, this can be enhanced to support different type, and then
-            # we create the bare metal flavor with minimum value
-            local node_options="-i ipmi_address=$ipmi_address -i ipmi_password=$ironic_ipmi_passwd\
-                -i ipmi_username=$ironic_ipmi_username"
-            node_options+=" -i deploy_kernel=$IRONIC_DEPLOY_KERNEL_ID"
-            node_options+=" -i deploy_ramdisk=$IRONIC_DEPLOY_RAMDISK_ID"
-        fi
-
-        # First node created will be used for testing in ironic w/o glance
-        # scenario, so we need to know its UUID.
-        local standalone_node_uuid=$([ $total_nodes -eq 0 ] && echo "--uuid $IRONIC_NODE_UUID")
-
-        local node_id=$(ironic node-create $standalone_node_uuid\
-            --chassis_uuid $chassis_id \
-            --driver $IRONIC_DEPLOY_DRIVER \
-            --name node-$total_nodes \
-            -p cpus=$ironic_node_cpu\
-            -p memory_mb=$ironic_node_ram\
-            -p local_gb=$ironic_node_disk\
-            -p cpu_arch=x86_64 \
-            $node_options \
-            | grep " uuid " | get_field 2)
-
-        ironic port-create --address $mac_address --node $node_id
-
-        total_nodes=$((total_nodes+1))
-        total_cpus=$((total_cpus+$ironic_node_cpu))
-    done < $ironic_hwinfo_file
-
-    # create the nova flavor
-    # NOTE(adam_g): Attempting to use an autogenerated UUID for flavor id here uncovered
-    # bug (LP: #1333852) in Trove.  This can be changed to use an auto flavor id when the
-    # bug is fixed in Juno.
-    local adjusted_disk=$(($ironic_node_disk - $ironic_ephemeral_disk))
-    nova flavor-create --ephemeral $ironic_ephemeral_disk baremetal 551 $ironic_node_ram $adjusted_disk $ironic_node_cpu
-
-    nova flavor-key baremetal set "cpu_arch"="x86_64"
-
-    if [ "$VIRT_DRIVER" == "ironic" ]; then
-        wait_for_nova_resources "count" $total_nodes
-        wait_for_nova_resources "vcpus" $total_cpus
-    fi
-}
-
-function configure_iptables {
-    # enable tftp natting for allowing connections to HOST_IP's tftp server
-    sudo modprobe nf_conntrack_tftp
-    sudo modprobe nf_nat_tftp
-    # nodes boot from TFTP and callback to the API server listening on $HOST_IP
-    sudo iptables -I INPUT -d $HOST_IP -p udp --dport 69 -j ACCEPT || true
-    sudo iptables -I INPUT -d $HOST_IP -p tcp --dport $IRONIC_SERVICE_PORT -j ACCEPT || true
-    if is_deployed_by_agent; then
-        # agent ramdisk gets instance image from swift
-        sudo iptables -I INPUT -d $HOST_IP -p tcp --dport ${SWIFT_DEFAULT_BIND_PORT:-8080} -j ACCEPT || true
-    fi
-
-    if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then
-        sudo iptables -I INPUT -d $HOST_IP -p tcp --dport $IRONIC_HTTP_PORT -j ACCEPT || true
-    fi
-}
-
-function configure_tftpd {
-    # stop tftpd and setup serving via xinetd
-    stop_service tftpd-hpa || true
-    [ -f /etc/init/tftpd-hpa.conf ] && echo "manual" | sudo tee /etc/init/tftpd-hpa.override
-    sudo cp $IRONIC_TEMPLATES_DIR/tftpd-xinetd.template /etc/xinetd.d/tftp
-    sudo sed -e "s|%TFTPBOOT_DIR%|$IRONIC_TFTPBOOT_DIR|g" -i /etc/xinetd.d/tftp
-
-    # setup tftp file mapping to satisfy requests at the root (booting) and
-    # /tftpboot/ sub-dir (as per deploy-ironic elements)
-    echo "r ^([^/]) $IRONIC_TFTPBOOT_DIR/\1" >$IRONIC_TFTPBOOT_DIR/map-file
-    echo "r ^(/tftpboot/) $IRONIC_TFTPBOOT_DIR/\2" >>$IRONIC_TFTPBOOT_DIR/map-file
-
-    chmod -R 0755 $IRONIC_TFTPBOOT_DIR
-    restart_service xinetd
-}
-
-function configure_ironic_ssh_keypair {
-    if [[ ! -d $HOME/.ssh ]]; then
-        mkdir -p $HOME/.ssh
-        chmod 700 $HOME/.ssh
-    fi
-    if [[ ! -e $IRONIC_KEY_FILE ]]; then
-        if [[ ! -d $(dirname $IRONIC_KEY_FILE) ]]; then
-            mkdir -p $(dirname $IRONIC_KEY_FILE)
-        fi
-        echo -e 'n\n' | ssh-keygen -q -t rsa -P '' -f $IRONIC_KEY_FILE
-    fi
-    cat $IRONIC_KEY_FILE.pub | tee -a $IRONIC_AUTHORIZED_KEYS_FILE
-}
-
-function ironic_ssh_check {
-    local key_file=$1
-    local floating_ip=$2
-    local port=$3
-    local default_instance_user=$4
-    local active_timeout=$5
-    if ! timeout $active_timeout sh -c "while ! ssh -p $port -o StrictHostKeyChecking=no -i $key_file ${default_instance_user}@$floating_ip echo success; do sleep 1; done"; then
-        die $LINENO "server didn't become ssh-able!"
-    fi
-}
-
-function configure_ironic_auxiliary {
-    configure_ironic_ssh_keypair
-    ironic_ssh_check $IRONIC_KEY_FILE $IRONIC_VM_SSH_ADDRESS $IRONIC_VM_SSH_PORT $IRONIC_SSH_USERNAME $IRONIC_SSH_TIMEOUT
-}
-
-function build_ipa_coreos_ramdisk {
-    echo "Building ironic-python-agent deploy ramdisk"
-    local kernel_path=$1
-    local ramdisk_path=$2
-    git_clone $IRONIC_PYTHON_AGENT_REPO $IRONIC_PYTHON_AGENT_DIR $IRONIC_PYTHON_AGENT_BRANCH
-    cd $IRONIC_PYTHON_AGENT_DIR
-    imagebuild/coreos/build_coreos_image.sh
-    cp imagebuild/coreos/UPLOAD/coreos_production_pxe_image-oem.cpio.gz $ramdisk_path
-    cp imagebuild/coreos/UPLOAD/coreos_production_pxe.vmlinuz $kernel_path
-    sudo rm -rf UPLOAD
-    cd -
-}
-
-# build deploy kernel+ramdisk, then upload them to glance
-# this function sets ``IRONIC_DEPLOY_KERNEL_ID``, ``IRONIC_DEPLOY_RAMDISK_ID``
-function upload_baremetal_ironic_deploy {
-    declare -g IRONIC_DEPLOY_KERNEL_ID IRONIC_DEPLOY_RAMDISK_ID
-    echo_summary "Creating and uploading baremetal images for ironic"
-
-    # install diskimage-builder
-    if [[ $(type -P ramdisk-image-create) == "" ]]; then
-        pip_install_gr "diskimage-builder"
-    fi
-
-    if [ -z "$IRONIC_DEPLOY_KERNEL" -o -z "$IRONIC_DEPLOY_RAMDISK" ]; then
-        local IRONIC_DEPLOY_KERNEL_PATH=$TOP_DIR/files/ir-deploy-$IRONIC_DEPLOY_DRIVER.kernel
-        local IRONIC_DEPLOY_RAMDISK_PATH=$TOP_DIR/files/ir-deploy-$IRONIC_DEPLOY_DRIVER.initramfs
-    else
-        local IRONIC_DEPLOY_KERNEL_PATH=$IRONIC_DEPLOY_KERNEL
-        local IRONIC_DEPLOY_RAMDISK_PATH=$IRONIC_DEPLOY_RAMDISK
-    fi
-
-    if [ ! -e "$IRONIC_DEPLOY_RAMDISK_PATH" -o ! -e "$IRONIC_DEPLOY_KERNEL_PATH" ]; then
-        # files don't exist, need to build them
-        if [ "$IRONIC_BUILD_DEPLOY_RAMDISK" = "True" ]; then
-            # we can build them only if we're not offline
-            if [ "$OFFLINE" != "True" ]; then
-                if is_deployed_with_ipa_ramdisk; then
-                    build_ipa_coreos_ramdisk $IRONIC_DEPLOY_KERNEL_PATH $IRONIC_DEPLOY_RAMDISK_PATH
-                else
-                    ramdisk-image-create $IRONIC_DEPLOY_FLAVOR \
-                        -o $TOP_DIR/files/ir-deploy-$IRONIC_DEPLOY_DRIVER
-                fi
-            else
-                die $LINENO "Deploy kernel+ramdisk files don't exist and cannot be build in OFFLINE mode"
-            fi
-        else
-            if is_deployed_with_ipa_ramdisk; then
-                # download the agent image tarball
-                wget "$IRONIC_AGENT_KERNEL_URL" -O $IRONIC_DEPLOY_KERNEL_PATH
-                wget "$IRONIC_AGENT_RAMDISK_URL" -O $IRONIC_DEPLOY_RAMDISK_PATH
-            else
-                die $LINENO "Deploy kernel+ramdisk files don't exist and their building was disabled explicitly by IRONIC_BUILD_DEPLOY_RAMDISK"
-            fi
-        fi
-    fi
-
-    local token=$(openstack token issue -c id -f value)
-    die_if_not_set $LINENO token "Keystone fail to get token"
-
-    # load them into glance
-    IRONIC_DEPLOY_KERNEL_ID=$(openstack \
-        --os-token $token \
-        --os-url http://$GLANCE_HOSTPORT \
-        image create \
-        $(basename $IRONIC_DEPLOY_KERNEL_PATH) \
-        --public --disk-format=aki \
-        --container-format=aki \
-        < $IRONIC_DEPLOY_KERNEL_PATH  | grep ' id ' | get_field 2)
-    IRONIC_DEPLOY_RAMDISK_ID=$(openstack \
-        --os-token $token \
-        --os-url http://$GLANCE_HOSTPORT \
-        image create \
-        $(basename $IRONIC_DEPLOY_RAMDISK_PATH) \
-        --public --disk-format=ari \
-        --container-format=ari \
-        < $IRONIC_DEPLOY_RAMDISK_PATH  | grep ' id ' | get_field 2)
-}
-
-function prepare_baremetal_basic_ops {
-    if ! is_ironic_hardware; then
-        configure_ironic_auxiliary
-    fi
-    upload_baremetal_ironic_deploy
-    if ! is_ironic_hardware; then
-        create_bridge_and_vms
-    fi
-    enroll_nodes
-    configure_tftpd
-    configure_iptables
-}
-
-function cleanup_baremetal_basic_ops {
-    rm -f $IRONIC_VM_MACS_CSV_FILE
-    if [ -f $IRONIC_KEY_FILE ]; then
-        local key=$(cat $IRONIC_KEY_FILE.pub)
-        # remove public key from authorized_keys
-        grep -v "$key" $IRONIC_AUTHORIZED_KEYS_FILE > temp && mv temp $IRONIC_AUTHORIZED_KEYS_FILE
-        chmod 0600 $IRONIC_AUTHORIZED_KEYS_FILE
-    fi
-    sudo rm -rf $IRONIC_DATA_DIR $IRONIC_STATE_PATH
-
-    local vm_name
-    for vm_name in $(_ironic_bm_vm_names); do
-        sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/cleanup-node $vm_name $IRONIC_VM_NETWORK_BRIDGE"
-    done
-
-    sudo rm -rf /etc/xinetd.d/tftp /etc/init/tftpd-hpa.override
-    restart_service xinetd
-    sudo iptables -D INPUT -d $HOST_IP -p udp --dport 69 -j ACCEPT || true
-    sudo iptables -D INPUT -d $HOST_IP -p tcp --dport $IRONIC_SERVICE_PORT -j ACCEPT || true
-    if is_deployed_by_agent; then
-        # agent ramdisk gets instance image from swift
-        sudo iptables -D INPUT -d $HOST_IP -p tcp --dport ${SWIFT_DEFAULT_BIND_PORT:-8080} -j ACCEPT || true
-    fi
-    sudo rmmod nf_conntrack_tftp || true
-    sudo rmmod nf_nat_tftp || true
-}
-
-# Restore xtrace + pipefail
-$XTRACE
-$PIPEFAIL
-
-# Tell emacs to use shell-script-mode
-## Local variables:
-## mode: shell-script
-## End:
diff --git a/lib/keystone b/lib/keystone
index ec28b46..d033a94 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -28,7 +28,7 @@
 # - _cleanup_keystone_apache_wsgi
 
 # Save trace setting
-XTRACE=$(set +o | grep xtrace)
+_XTRACE_KEYSTONE=$(set +o | grep xtrace)
 set +o xtrace
 
 # Defaults
@@ -52,10 +52,6 @@
 KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf
 KEYSTONE_PASTE_INI=${KEYSTONE_PASTE_INI:-$KEYSTONE_CONF_DIR/keystone-paste.ini}
 
-# Set up additional extensions, such as oauth1, federation
-# Example of KEYSTONE_EXTENSIONS=oauth1,federation
-KEYSTONE_EXTENSIONS=${KEYSTONE_EXTENSIONS:-}
-
 # Toggle for deploying Keystone under HTTPD + mod_wsgi
 KEYSTONE_USE_MOD_WSGI=${KEYSTONE_USE_MOD_WSGI:-${ENABLE_HTTPD_MOD_WSGI_SERVICES}}
 
@@ -132,7 +128,8 @@
 
 # _config_keystone_apache_wsgi() - Set WSGI config files of Keystone
 function _config_keystone_apache_wsgi {
-    local keystone_apache_conf=$(apache_site_config_for keystone)
+    local keystone_apache_conf
+    keystone_apache_conf=$(apache_site_config_for keystone)
     local keystone_ssl=""
     local keystone_certfile=""
     local keystone_keyfile=""
@@ -191,8 +188,6 @@
         inidelete $KEYSTONE_PASTE_INI composite:admin \\/v2.0
     fi
 
-    configure_keystone_extensions
-
     # Rewrite stock ``keystone.conf``
 
     if is_service_enabled ldap; then
@@ -217,8 +212,6 @@
 
     iniset_rpc_backend keystone $KEYSTONE_CONF
 
-    iniset $KEYSTONE_CONF eventlet_server admin_bind_host "$KEYSTONE_ADMIN_BIND_HOST"
-
     # Register SSL certificates if provided
     if is_ssl_enabled_service key; then
         ensure_certificates KEYSTONE
@@ -232,6 +225,9 @@
         # Set the service ports for a proxy to take the originals
         iniset $KEYSTONE_CONF eventlet_server public_port $KEYSTONE_SERVICE_PORT_INT
         iniset $KEYSTONE_CONF eventlet_server admin_port $KEYSTONE_AUTH_PORT_INT
+
+        iniset $KEYSTONE_CONF DEFAULT public_endpoint $KEYSTONE_SERVICE_URI
+        iniset $KEYSTONE_CONF DEFAULT admin_endpoint $KEYSTONE_AUTH_URI
     fi
 
     iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN"
@@ -253,9 +249,9 @@
 
         # Add swift endpoints to service catalog if swift is enabled
         if is_service_enabled s-proxy; then
-            echo "catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
-            echo "catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:8080/" >> $KEYSTONE_CATALOG
-            echo "catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
+            echo "catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
+            echo "catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:$SWIFT_DEFAULT_BIND_PORT/" >> $KEYSTONE_CATALOG
+            echo "catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG
             echo "catalog.RegionOne.object_store.name = Swift Service" >> $KEYSTONE_CATALOG
         fi
 
@@ -295,35 +291,17 @@
         iniset $KEYSTONE_CONF DEFAULT logging_debug_format_suffix "%(funcName)s %(pathname)s:%(lineno)d"
         iniset $KEYSTONE_CONF DEFAULT logging_exception_prefix "%(process)d TRACE %(name)s %(instance)s"
         _config_keystone_apache_wsgi
+    else
+        iniset $KEYSTONE_CONF eventlet_server admin_bind_host "$KEYSTONE_ADMIN_BIND_HOST"
+        iniset $KEYSTONE_CONF eventlet_server admin_workers "$API_WORKERS"
+        # Public workers will use the server default, typically number of CPU.
     fi
 
     iniset $KEYSTONE_CONF DEFAULT max_token_size 16384
 
-    iniset $KEYSTONE_CONF eventlet_server admin_workers "$API_WORKERS"
-    # Public workers will use the server default, typically number of CPU.
-
     iniset $KEYSTONE_CONF fernet_tokens key_repository "$KEYSTONE_CONF_DIR/fernet-keys/"
 }
 
-function configure_keystone_extensions {
-    # Add keystone extension into keystone v3 application pipeline
-    local extension_value
-    local api_v3
-    local extension
-    local api_v3_extension
-    for extension_value in ${KEYSTONE_EXTENSIONS//,/ }; do
-        if [[ -z "${extension_value}" ]]; then
-            continue
-        fi
-        api_v3=$(iniget $KEYSTONE_PASTE_INI pipeline:api_v3 pipeline)
-        extension=$(echo $api_v3 | sed -ne "/${extension_value}/ p;" )
-        if [[ -z $extension ]]; then
-            api_v3_extension=$(echo $api_v3 | sed -ne "s/service_v3/${extension_value}_extension service_v3/p;" )
-            iniset $KEYSTONE_PASTE_INI pipeline:api_v3 pipeline "$api_v3_extension"
-        fi
-    done
-}
-
 # create_keystone_accounts() - Sets up common required keystone accounts
 
 # Tenant               User       Roles
@@ -347,9 +325,12 @@
 function create_keystone_accounts {
 
     # admin
-    local admin_tenant=$(get_or_create_project "admin" default)
-    local admin_user=$(get_or_create_user "admin" "$ADMIN_PASSWORD" default)
-    local admin_role=$(get_or_create_role "admin")
+    local admin_tenant
+    admin_tenant=$(get_or_create_project "admin" default)
+    local admin_user
+    admin_user=$(get_or_create_user "admin" "$ADMIN_PASSWORD" default)
+    local admin_role
+    admin_role=$(get_or_create_role "admin")
     get_or_add_user_project_role $admin_role $admin_user $admin_tenant
 
     # Create service project/role
@@ -365,18 +346,23 @@
     get_or_create_role ResellerAdmin
 
     # The Member role is used by Horizon and Swift so we need to keep it:
-    local member_role=$(get_or_create_role "Member")
+    local member_role
+    member_role=$(get_or_create_role "Member")
 
     # another_role demonstrates that an arbitrary role may be created and used
     # TODO(sleepsonthefloor): show how this can be used for rbac in the future!
-    local another_role=$(get_or_create_role "anotherrole")
+    local another_role
+    another_role=$(get_or_create_role "anotherrole")
 
     # invisible tenant - admin can't see this one
-    local invis_tenant=$(get_or_create_project "invisible_to_admin" default)
+    local invis_tenant
+    invis_tenant=$(get_or_create_project "invisible_to_admin" default)
 
     # demo
-    local demo_tenant=$(get_or_create_project "demo" default)
-    local demo_user=$(get_or_create_user "demo" \
+    local demo_tenant
+    demo_tenant=$(get_or_create_project "demo" default)
+    local demo_user
+    demo_user=$(get_or_create_user "demo" \
         "$ADMIN_PASSWORD" "default" "demo@example.com")
 
     get_or_add_user_project_role $member_role $demo_user $demo_tenant
@@ -384,9 +370,11 @@
     get_or_add_user_project_role $another_role $demo_user $demo_tenant
     get_or_add_user_project_role $member_role $demo_user $invis_tenant
 
-    local admin_group=$(get_or_create_group "admins" \
+    local admin_group
+    admin_group=$(get_or_create_group "admins" \
         "default" "openstack admin group")
-    local non_admin_group=$(get_or_create_group "nonadmins" \
+    local non_admin_group
+    non_admin_group=$(get_or_create_group "nonadmins" \
         "default" "non-admin group")
 
     get_or_add_group_project_role $member_role $non_admin_group $demo_tenant
@@ -415,7 +403,8 @@
 function create_service_user {
     local role=${2:-service}
 
-    local user=$(get_or_create_user "$1" "$SERVICE_PASSWORD" default)
+    local user
+    user=$(get_or_create_user "$1" "$SERVICE_PASSWORD" default)
     get_or_add_user_project_role "$role" "$user" "$SERVICE_TENANT_NAME"
 }
 
@@ -432,7 +421,7 @@
     local signing_dir=$3
     local section=${4:-keystone_authtoken}
 
-    iniset $conf_file $section auth_plugin password
+    iniset $conf_file $section auth_type password
     iniset $conf_file $section auth_url $KEYSTONE_AUTH_URI
     iniset $conf_file $section username $admin_user
     iniset $conf_file $section password $SERVICE_PASSWORD
@@ -457,14 +446,6 @@
     # Initialize keystone database
     $KEYSTONE_BIN_DIR/keystone-manage db_sync
 
-    local extension_value
-    for extension_value in ${KEYSTONE_EXTENSIONS//,/ }; do
-        if [[ -z "${extension_value}" ]]; then
-            continue
-        fi
-        $KEYSTONE_BIN_DIR/keystone-manage db_sync --extension "${extension_value}"
-    done
-
     if [[ "$KEYSTONE_TOKEN_FORMAT" == "pki" || "$KEYSTONE_TOKEN_FORMAT" == "pkiz" ]]; then
         # Set up certificates
         rm -rf $KEYSTONE_CONF_DIR/ssl
@@ -580,7 +561,7 @@
 
 
 # Restore xtrace
-$XTRACE
+$_XTRACE_KEYSTONE
 
 # Tell emacs to use shell-script-mode
 ## Local variables:
diff --git a/lib/ldap b/lib/ldap
index d2dbc3b..65056ae 100644
--- a/lib/ldap
+++ b/lib/ldap
@@ -8,7 +8,7 @@
 # - install_ldap()
 
 # Save trace setting
-XTRACE=$(set +o | grep xtrace)
+_XTRACE_LDAP=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -82,7 +82,8 @@
 function init_ldap {
     local keystone_ldif
 
-    local tmp_ldap_dir=$(mktemp -d -t ldap.$$.XXXXXXXXXX)
+    local tmp_ldap_dir
+    tmp_ldap_dir=$(mktemp -d -t ldap.$$.XXXXXXXXXX)
 
     # Remove data but not schemas
     clear_ldap_state
@@ -113,7 +114,8 @@
     echo "Installing LDAP inside function"
     echo "os_VENDOR is $os_VENDOR"
 
-    local tmp_ldap_dir=$(mktemp -d -t ldap.$$.XXXXXXXXXX)
+    local tmp_ldap_dir
+    tmp_ldap_dir=$(mktemp -d -t ldap.$$.XXXXXXXXXX)
 
     printf "installing OpenLDAP"
     if is_ubuntu; then
@@ -129,7 +131,8 @@
     fi
 
     echo "LDAP_PASSWORD is $LDAP_PASSWORD"
-    local slappass=$(slappasswd -s $LDAP_PASSWORD)
+    local slappass
+    slappass=$(slappasswd -s $LDAP_PASSWORD)
     printf "LDAP secret is $slappass\n"
 
     # Create manager.ldif and add to olcdb
@@ -163,7 +166,7 @@
 }
 
 # Restore xtrace
-$XTRACE
+$_XTRACE_LDAP
 
 # Tell emacs to use shell-script-mode
 ## Local variables:
diff --git a/lib/lvm b/lib/lvm
index 8afd543..ae6023a 100644
--- a/lib/lvm
+++ b/lib/lvm
@@ -16,7 +16,7 @@
 
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+_XTRACE_LVM=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -56,7 +56,8 @@
 
     # If the backing physical device is a loop device, it was probably setup by DevStack
     if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then
-        local vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/'$BACKING_FILE_SUFFIX'/ { print $1}')
+        local vg_dev
+        vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/'$BACKING_FILE_SUFFIX'/ { print $1}')
         sudo losetup -d $vg_dev
         rm -f $backing_file
     fi
@@ -89,7 +90,8 @@
     if ! sudo vgs $vg; then
         # Only create if the file doesn't already exists
         [[ -f $backing_file ]] || truncate -s $size $backing_file
-        local vg_dev=`sudo losetup -f --show $backing_file`
+        local vg_dev
+        vg_dev=`sudo losetup -f --show $backing_file`
 
         # Only create volume group if it doesn't already exist
         if ! sudo vgs $vg; then
@@ -180,7 +182,7 @@
 }
 
 # Restore xtrace
-$MY_XTRACE
+$_XTRACE_LVM
 
 # mode: shell-script
 # End:
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index e67bd4a..692d5ea 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -112,6 +112,12 @@
 NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf
 export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"}
 
+# Default provider for load balancer service
+DEFAULT_LB_PROVIDER=LOADBALANCER:Haproxy:neutron_lbaas.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
+
+# Default provider for VPN service
+DEFAULT_VPN_PROVIDER=VPN:openswan:neutron_vpnaas.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
+
 # Agent binaries.  Note, binary paths for other agents are set in per-service
 # scripts in lib/neutron_plugins/services/
 AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent"
@@ -144,8 +150,6 @@
 Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-neutron}
 # Default auth strategy
 Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone}
-# Use namespace or not
-Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True}
 # RHEL's support for namespaces requires using veths with ovs
 Q_OVS_USE_VETH=${Q_OVS_USE_VETH:-False}
 Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True}
@@ -167,6 +171,9 @@
 
 ## Provider Network Information
 PROVIDER_SUBNET_NAME=${PROVIDER_SUBNET_NAME:-"provider_net"}
+IPV6_PROVIDER_SUBNET_NAME=${IPV6_PROVIDER_SUBNET_NAME:-"provider_net_v6"}
+IPV6_PROVIDER_FIXED_RANGE=${IPV6_PROVIDER_FIXED_RANGE:-}
+IPV6_PROVIDER_NETWORK_GATEWAY=${IPV6_PROVIDER_NETWORK_GATEWAY:-}
 
 # Define the public bridge that will transmit traffic from VMs to the
 # physical network - used by both the OVS and Linux Bridge drivers.
@@ -208,7 +215,7 @@
 # The plugin supports L3.
 Q_L3_ENABLED=${Q_L3_ENABLED:-False}
 # L3 routers exist per tenant
-Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-False}
+Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-True}
 
 # List of config file names in addition to the main plugin config file
 # See _configure_neutron_common() for details about setting it up
@@ -258,7 +265,7 @@
 
 # If using GRE tunnels for tenant networks, specify the range of
 # tunnel IDs from which tenant networks are allocated. Can be
-# overriden in ``localrc`` in necesssary.
+# overridden in ``localrc`` in necessary.
 TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGES:-1:1000}
 
 # To use VLANs for tenant networks, set to True in localrc. VLANs
@@ -359,7 +366,7 @@
 fi
 
 # Save trace setting
-XTRACE=$(set +o | grep xtrace)
+_XTRACE_NEUTRON=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -468,19 +475,13 @@
 function create_nova_conf_neutron {
     iniset $NOVA_CONF DEFAULT network_api_class "nova.network.neutronv2.api.API"
 
-
-    if [ "$ENABLE_IDENTITY_V2" == "False" ]; then
-        iniset $NOVA_CONF neutron auth_plugin "v3password"
-        iniset $NOVA_CONF neutron auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v3"
-        iniset $NOVA_CONF neutron username "$Q_ADMIN_USERNAME"
-        iniset $NOVA_CONF neutron password "$SERVICE_PASSWORD"
-        iniset $NOVA_CONF neutron user_domain_name "default"
-    else
-        iniset $NOVA_CONF neutron admin_username "$Q_ADMIN_USERNAME"
-        iniset $NOVA_CONF neutron admin_password "$SERVICE_PASSWORD"
-        iniset $NOVA_CONF neutron admin_auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0"
-        iniset $NOVA_CONF neutron admin_tenant_name "$SERVICE_TENANT_NAME"
-    fi
+    iniset $NOVA_CONF neutron auth_plugin "v3password"
+    iniset $NOVA_CONF neutron auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v3"
+    iniset $NOVA_CONF neutron username "$Q_ADMIN_USERNAME"
+    iniset $NOVA_CONF neutron password "$SERVICE_PASSWORD"
+    iniset $NOVA_CONF neutron user_domain_name "Default"
+    iniset $NOVA_CONF neutron project_name "$SERVICE_TENANT_NAME"
+    iniset $NOVA_CONF neutron project_domain_name "Default"
     iniset $NOVA_CONF neutron auth_strategy "$Q_AUTH_STRATEGY"
     iniset $NOVA_CONF neutron region_name "$REGION_NAME"
     iniset $NOVA_CONF neutron url "${Q_PROTOCOL}://$Q_HOST:$Q_PORT"
@@ -494,7 +495,6 @@
     # optionally set options in nova_conf
     neutron_plugin_create_nova_conf
 
-    iniset $NOVA_CONF DEFAULT linuxnet_interface_driver "$LINUXNET_VIF_DRIVER"
     if is_service_enabled q-meta; then
         iniset $NOVA_CONF neutron service_metadata_proxy "True"
     fi
@@ -545,23 +545,25 @@
 
     if is_provider_network; then
         die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK"
-        die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specifiy the PROVIDER_NETWORK_TYPE"
+        die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specify the PROVIDER_NETWORK_TYPE"
         NET_ID=$(neutron net-create $PHYSICAL_NETWORK --tenant_id $TENANT_ID --provider:network_type $PROVIDER_NETWORK_TYPE --provider:physical_network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider:segmentation_id $SEGMENTATION_ID} --shared | grep ' id ' | get_field 2)
         die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $TENANT_ID"
 
         if [[ "$IP_VERSION" =~ 4.* ]]; then
-            SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --name $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2)
+            SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --name $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY --subnetpool None $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2)
             die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME $TENANT_ID"
         fi
 
-        if [[ "$IP_VERSION" =~ .*6 ]]; then
-            SUBNET_V6_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 6 --ipv6-address-mode slaac --gateway $V6_NETWORK_GATEWAY --name $PROVIDER_SUBNET_NAME_V6 $NET_ID $FIXED_RANGE_V6 | grep 'id' | get_field 2)
-            die_if_not_set $LINENO SUBNET_V6_ID "Failure creating SUBNET_V6_ID for $PROVIDER_SUBNET_NAME_V6 $TENANT_ID"
+        if [[ "$IP_VERSION" =~ .*6 ]] && [[ -n "$IPV6_PROVIDER_FIXED_RANGE" ]] && [[ -n "$IPV6_PROVIDER_NETWORK_GATEWAY" ]]; then
+            SUBNET_V6_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 6 --ipv6-address-mode $IPV6_ADDRESS_MODE --gateway $IPV6_PROVIDER_NETWORK_GATEWAY --name $IPV6_PROVIDER_SUBNET_NAME --subnetpool None $NET_ID $IPV6_PROVIDER_FIXED_RANGE | grep 'id' | get_field 2)
+            die_if_not_set $LINENO SUBNET_V6_ID "Failure creating SUBNET_V6_ID for $IPV6_PROVIDER_SUBNET_NAME $TENANT_ID"
         fi
 
-        sudo ip link set $OVS_PHYSICAL_BRIDGE up
-        sudo ip link set br-int up
-        sudo ip link set $PUBLIC_INTERFACE up
+        if [[ $Q_AGENT == "openvswitch" ]]; then
+            sudo ip link set $OVS_PHYSICAL_BRIDGE up
+            sudo ip link set br-int up
+            sudo ip link set $PUBLIC_INTERFACE up
+        fi
     else
         NET_ID=$(neutron net-create --tenant-id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2)
         die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME $TENANT_ID"
@@ -647,7 +649,7 @@
         plugin_dir=$($ssh_dom0 "$xen_functions; set -eux; xapi_plugin_location")
 
         # install neutron plugins to dom0
-        tar -czf - -C $NEUTRON_DIR/neutron/plugins/openvswitch/agent/xenapi/etc/xapi.d/plugins/ ./ |
+        tar -czf - -C $NEUTRON_DIR/neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/etc/xapi.d/plugins/ ./ |
             $ssh_dom0 "tar -xzf - -C $plugin_dir && chmod a+x $plugin_dir/*"
     fi
 }
@@ -707,7 +709,7 @@
 function start_neutron_l2_agent {
     run_process q-agt "$AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE"
 
-    if is_provider_network; then
+    if is_provider_network && [[ $Q_AGENT == "openvswitch" ]]; then
         sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE
         sudo ip link set $OVS_PHYSICAL_BRIDGE up
         sudo ip link set br-int up
@@ -781,6 +783,10 @@
     if is_service_enabled q-metering; then
         neutron_metering_stop
     fi
+
+    if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
+        sudo pkill -9 -f $NEUTRON_ROOTWRAP-daemon || :
+    fi
 }
 
 # stop_neutron() - Stop running processes (non-screen)
@@ -796,7 +802,8 @@
     local from_intf=$1
     local to_intf=$2
     local add_ovs_port=$3
-    local af=$4
+    local del_ovs_port=$4
+    local af=$5
 
     if [[ -n "$from_intf" && -n "$to_intf" ]]; then
         # Remove the primary IP address from $from_intf and add it to $to_intf,
@@ -806,31 +813,35 @@
 
         local IP_ADD=""
         local IP_DEL=""
-        local DEFAULT_ROUTE_GW=$(ip r | awk "/default.+$from_intf/ { print \$3; exit }")
+        local IP_UP=""
+        local DEFAULT_ROUTE_GW
+        DEFAULT_ROUTE_GW=$(ip -f $af r | awk "/default.+$from_intf/ { print \$3; exit }")
         local ADD_OVS_PORT=""
+        local DEL_OVS_PORT=""
 
-        if [[ $af == "inet" ]]; then
-            IP_BRD=$(ip -f $af a s dev $from_intf | grep $HOST_IP | awk '{ print $2, $3, $4; exit }')
-        fi
-
-        if [[ $af == "inet6" ]]; then
-            IP_BRD=$(ip -f $af a s dev $from_intf | grep $HOST_IPV6 | awk '{ print $2, $3, $4; exit }')
-        fi
+        IP_BRD=$(ip -f $af a s dev $from_intf scope global primary | grep inet | awk '{ print $2, $3, $4; exit }')
 
         if [ "$DEFAULT_ROUTE_GW" != "" ]; then
-            ADD_DEFAULT_ROUTE="sudo ip r replace default via $DEFAULT_ROUTE_GW dev $to_intf"
+            ADD_DEFAULT_ROUTE="sudo ip -f $af r replace default via $DEFAULT_ROUTE_GW dev $to_intf"
         fi
 
         if [[ "$add_ovs_port" == "True" ]]; then
             ADD_OVS_PORT="sudo ovs-vsctl --may-exist add-port $to_intf $from_intf"
         fi
 
+        if [[ "$del_ovs_port" == "True" ]]; then
+            DEL_OVS_PORT="sudo ovs-vsctl --if-exists del-port $from_intf $to_intf"
+        fi
+
         if [[ "$IP_BRD" != "" ]]; then
             IP_DEL="sudo ip addr del $IP_BRD dev $from_intf"
             IP_ADD="sudo ip addr add $IP_BRD dev $to_intf"
+            IP_UP="sudo ip link set $to_intf up"
         fi
 
-        $IP_DEL; $IP_ADD; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE
+        # The add/del OVS port calls have to happen either before or
+        # after the address is moved in order to not leave it orphaned.
+        $DEL_OVS_PORT; $IP_DEL; $IP_ADD; $IP_UP; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE
     fi
 }
 
@@ -838,18 +849,24 @@
 # runs that a clean run would need to clean up
 function cleanup_neutron {
 
-    _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False "inet"
+    if [[ -n "$OVS_PHYSICAL_BRIDGE" ]]; then
+        _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False True "inet"
 
-    if [[ $(ip -f inet6 a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then
-        _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False "inet6"
-    fi
+        if [[ $(ip -f inet6 a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then
+            # ip(8) wants the prefix length when deleting
+            local v6_gateway
+            v6_gateway=$(ip -6 a s dev $OVS_PHYSICAL_BRIDGE | grep $IPV6_PUBLIC_NETWORK_GATEWAY | awk '{ print $2 }')
+            sudo ip -6 addr del $v6_gateway dev $OVS_PHYSICAL_BRIDGE
+            _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False False "inet6"
+        fi
 
-    if is_provider_network && is_ironic_hardware; then
-        for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do
-            sudo ip addr del $IP dev $OVS_PHYSICAL_BRIDGE
-            sudo ip addr add $IP dev $PUBLIC_INTERFACE
-        done
-        sudo route del -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE
+        if is_provider_network && is_ironic_hardware; then
+            for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do
+                sudo ip addr del $IP dev $OVS_PHYSICAL_BRIDGE
+                sudo ip addr add $IP dev $PUBLIC_INTERFACE
+            done
+            sudo route del -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE
+        fi
     fi
 
     if is_neutron_ovs_base_plugin; then
@@ -878,7 +895,10 @@
 function _configure_neutron_common {
     _create_neutron_conf_dir
 
-    cp $NEUTRON_DIR/etc/neutron.conf $NEUTRON_CONF
+    # Uses oslo config generator to generate core sample configuration files
+    (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh)
+
+    cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF
 
     Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json
     cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE
@@ -903,7 +923,9 @@
     Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME
     # NOTE(hichihara): Some neutron vendor plugins were already decomposed and
     # there is no config file in Neutron tree. They should prepare the file in each plugin.
-    if [ -f $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE ]; then
+    if [ -f "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" ]; then
+        cp "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" /$Q_PLUGIN_CONF_FILE
+    elif [ -f $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE ]; then
         cp $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE
     fi
 
@@ -911,6 +933,8 @@
     iniset $NEUTRON_CONF DEFAULT state_path $DATA_DIR/neutron
     iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG
     iniset $NEUTRON_CONF DEFAULT bind_host $Q_LISTEN_ADDRESS
+    iniset $NEUTRON_CONF oslo_concurrency lock_path $DATA_DIR/neutron/lock
+
     # If addition config files are set, make sure their path name is set as well
     if [[ ${#Q_PLUGIN_EXTRA_CONF_FILES[@]} > 0 && $Q_PLUGIN_EXTRA_CONF_PATH == '' ]]; then
         die $LINENO "Neutron additional plugin config not set.. exiting"
@@ -967,11 +991,10 @@
         return
     fi
 
-    cp $NEUTRON_DIR/etc/l3_agent.ini $NEUTRON_TEST_CONFIG_FILE
+    cp $NEUTRON_DIR/etc/l3_agent.ini.sample $NEUTRON_TEST_CONFIG_FILE
 
     iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT verbose False
     iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT debug False
-    iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
     iniset $NEUTRON_TEST_CONFIG_FILE AGENT root_helper "$Q_RR_COMMAND"
     if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
         iniset $NEUTRON_TEST_CONFIG_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND"
@@ -984,11 +1007,10 @@
 
 function _configure_neutron_dhcp_agent {
 
-    cp $NEUTRON_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE
+    cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $Q_DHCP_CONF_FILE
 
     iniset $Q_DHCP_CONF_FILE DEFAULT verbose True
     iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-    iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
     iniset $Q_DHCP_CONF_FILE AGENT root_helper "$Q_RR_COMMAND"
     if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
         iniset $Q_DHCP_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND"
@@ -1012,18 +1034,15 @@
 
 function _configure_neutron_l3_agent {
     Q_L3_ENABLED=True
-    # for l3-agent, only use per tenant router if we have namespaces
-    Q_L3_ROUTER_PER_TENANT=$Q_USE_NAMESPACE
 
     if is_service_enabled q-vpn; then
         neutron_vpn_configure_agent
     fi
 
-    cp $NEUTRON_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE
+    cp $NEUTRON_DIR/etc/l3_agent.ini.sample $Q_L3_CONF_FILE
 
     iniset $Q_L3_CONF_FILE DEFAULT verbose True
     iniset $Q_L3_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-    iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE
     iniset $Q_L3_CONF_FILE AGENT root_helper "$Q_RR_COMMAND"
     if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
         iniset $Q_L3_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND"
@@ -1033,15 +1052,15 @@
 
     neutron_plugin_configure_l3_agent
 
-    _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True "inet"
+    _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet"
 
     if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then
-        _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False "inet6"
+        _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6"
     fi
 }
 
 function _configure_neutron_metadata_agent {
-    cp $NEUTRON_DIR/etc/metadata_agent.ini $Q_META_CONF_FILE
+    cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $Q_META_CONF_FILE
 
     iniset $Q_META_CONF_FILE DEFAULT verbose True
     iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
@@ -1050,11 +1069,6 @@
     if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
         iniset $Q_META_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND"
     fi
-
-    # Configures keystone for metadata_agent
-    # The third argument "True" sets auth_url needed to communicate with keystone
-    _neutron_setup_keystone $Q_META_CONF_FILE DEFAULT True
-
 }
 
 function _configure_neutron_ceilometer_notifications {
@@ -1062,8 +1076,12 @@
 }
 
 function _configure_neutron_lbaas {
-    if [ -f $NEUTRON_LBAAS_DIR/etc/neutron_lbaas.conf ]; then
-        cp $NEUTRON_LBAAS_DIR/etc/neutron_lbaas.conf $NEUTRON_CONF_DIR
+    # Uses oslo config generator to generate LBaaS sample configuration files
+    (cd $NEUTRON_LBAAS_DIR && exec ./tools/generate_config_file_samples.sh)
+
+    if [ -f $NEUTRON_LBAAS_DIR/etc/neutron_lbaas.conf.sample ]; then
+        cp $NEUTRON_LBAAS_DIR/etc/neutron_lbaas.conf.sample $NEUTRON_CONF_DIR/neutron_lbaas.conf
+        iniset $NEUTRON_CONF_DIR/neutron_lbaas.conf service_providers service_provider $DEFAULT_LB_PROVIDER
     fi
     neutron_agent_lbaas_configure_common
     neutron_agent_lbaas_configure_agent
@@ -1083,8 +1101,11 @@
 }
 
 function _configure_neutron_vpn {
-    if [ -f $NEUTRON_VPNAAS_DIR/etc/neutron_vpnaas.conf ]; then
-        cp $NEUTRON_VPNAAS_DIR/etc/neutron_vpnaas.conf $NEUTRON_CONF_DIR
+    # Uses oslo config generator to generate VPNaaS sample configuration files
+    (cd $NEUTRON_VPNAAS_DIR && exec ./tools/generate_config_file_samples.sh)
+    if [ -f $NEUTRON_VPNAAS_DIR/etc/neutron_vpnaas.conf.sample ]; then
+        cp $NEUTRON_VPNAAS_DIR/etc/neutron_vpnaas.conf.sample $NEUTRON_CONF_DIR/neutron_vpnaas.conf
+        iniset $NEUTRON_CONF_DIR/neutron_vpnaas.conf service_providers service_provider $DEFAULT_VPN_PROVIDER
     fi
     neutron_vpn_install_agent_packages
     neutron_vpn_configure_common
@@ -1133,7 +1154,7 @@
     iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY
     _neutron_setup_keystone $NEUTRON_CONF keystone_authtoken
 
-    # Configuration for neutron notifations to nova.
+    # Configuration for neutron notifications to nova.
     iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES
     iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES
 
@@ -1165,6 +1186,9 @@
 
 # _neutron_deploy_rootwrap_filters() - deploy rootwrap filters to $Q_CONF_ROOTWRAP_D (owned by root).
 function _neutron_deploy_rootwrap_filters {
+    if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then
+        return
+    fi
     local srcdir=$1
     sudo install -d -o root -m 755 $Q_CONF_ROOTWRAP_D
     sudo install -o root -m 644 $srcdir/etc/neutron/rootwrap.d/* $Q_CONF_ROOTWRAP_D/
@@ -1212,17 +1236,10 @@
     fi
 }
 
-# Configures keystone integration for neutron service and agents
+# Configures keystone integration for neutron service
 function _neutron_setup_keystone {
     local conf_file=$1
     local section=$2
-    local use_auth_url=$3
-
-    # Configures keystone for metadata_agent
-    # metadata_agent needs auth_url to communicate with keystone
-    if [[ "$use_auth_url" == "True" ]]; then
-        iniset $conf_file $section auth_url $KEYSTONE_SERVICE_URI/v2.0
-    fi
 
     create_neutron_cache_dir
     configure_auth_token_middleware $conf_file $Q_ADMIN_USERNAME $NEUTRON_AUTH_CACHE_DIR $section
@@ -1243,8 +1260,10 @@
     subnet_params+="--ip_version 4 "
     subnet_params+="--gateway $NETWORK_GATEWAY "
     subnet_params+="--name $PRIVATE_SUBNET_NAME "
+    subnet_params+="--subnetpool None "
     subnet_params+="$NET_ID $FIXED_RANGE"
-    local subnet_id=$(neutron subnet-create $subnet_params | grep ' id ' | get_field 2)
+    local subnet_id
+    subnet_id=$(neutron subnet-create $subnet_params | grep ' id ' | get_field 2)
     die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet for $TENANT_ID"
     echo $subnet_id
 }
@@ -1258,8 +1277,10 @@
     subnet_params+="--ip_version 6 "
     subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY "
     subnet_params+="--name $IPV6_PRIVATE_SUBNET_NAME "
+    subnet_params+="--subnetpool None "
     subnet_params+="$NET_ID $FIXED_RANGE_V6 $ipv6_modes"
-    local ipv6_subnet_id=$(neutron subnet-create $subnet_params | grep ' id ' | get_field 2)
+    local ipv6_subnet_id
+    ipv6_subnet_id=$(neutron subnet-create $subnet_params | grep ' id ' | get_field 2)
     die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet for $TENANT_ID"
     echo $ipv6_subnet_id
 }
@@ -1270,9 +1291,11 @@
     subnet_params+="${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} "
     subnet_params+="--gateway $PUBLIC_NETWORK_GATEWAY "
     subnet_params+="--name $PUBLIC_SUBNET_NAME "
+    subnet_params+="--subnetpool None "
     subnet_params+="$EXT_NET_ID $FLOATING_RANGE "
     subnet_params+="-- --enable_dhcp=False"
-    local id_and_ext_gw_ip=$(neutron subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ')
+    local id_and_ext_gw_ip
+    id_and_ext_gw_ip=$(neutron subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ')
     die_if_not_set $LINENO id_and_ext_gw_ip "Failure creating public IPv4 subnet"
     echo $id_and_ext_gw_ip
 }
@@ -1282,9 +1305,11 @@
     local subnet_params="--ip_version 6 "
     subnet_params+="--gateway $IPV6_PUBLIC_NETWORK_GATEWAY "
     subnet_params+="--name $IPV6_PUBLIC_SUBNET_NAME "
+    subnet_params+="--subnetpool None "
     subnet_params+="$EXT_NET_ID $IPV6_PUBLIC_RANGE "
     subnet_params+="-- --enable_dhcp=False"
-    local ipv6_id_and_ext_gw_ip=$(neutron subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ')
+    local ipv6_id_and_ext_gw_ip
+    ipv6_id_and_ext_gw_ip=$(neutron subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ')
     die_if_not_set $LINENO ipv6_id_and_ext_gw_ip "Failure creating an IPv6 public subnet"
     echo $ipv6_id_and_ext_gw_ip
 }
@@ -1293,8 +1318,10 @@
 function _neutron_configure_router_v4 {
     neutron router-interface-add $ROUTER_ID $SUBNET_ID
     # Create a public subnet on the external network
-    local id_and_ext_gw_ip=$(_neutron_create_public_subnet_v4 $EXT_NET_ID)
-    local ext_gw_ip=$(echo $id_and_ext_gw_ip  | get_field 2)
+    local id_and_ext_gw_ip
+    id_and_ext_gw_ip=$(_neutron_create_public_subnet_v4 $EXT_NET_ID)
+    local ext_gw_ip
+    ext_gw_ip=$(echo $id_and_ext_gw_ip  | get_field 2)
     PUB_SUBNET_ID=$(echo $id_and_ext_gw_ip | get_field 5)
     # Configure the external network as the default router gateway
     neutron router-gateway-set $ROUTER_ID $EXT_NET_ID
@@ -1303,7 +1330,7 @@
     if is_service_enabled q-l3; then
         # Configure and enable public bridge
         local ext_gw_interface="none"
-        if is_neutron_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then
+        if is_neutron_ovs_base_plugin; then
             ext_gw_interface=$(_neutron_get_ext_gw_interface)
         elif [[ "$Q_AGENT" = "linuxbridge" ]]; then
             # Search for the brq device the neutron router and network for $FIXED_RANGE
@@ -1319,7 +1346,7 @@
                 sudo ip addr add $ext_gw_ip/$cidr_len dev $ext_gw_interface
                 sudo ip link set $ext_gw_interface up
             fi
-            ROUTER_GW_IP=`neutron port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' -v subnet_id=$PUB_SUBNET_ID '$4 == subnet_id { print $8; }'`
+            ROUTER_GW_IP=`neutron port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F'ip_address'  '{ print $2 }' | cut -f3 -d\" | tr '\n' ' '`
             die_if_not_set $LINENO ROUTER_GW_IP "Failure retrieving ROUTER_GW_IP"
             sudo ip route replace  $FIXED_RANGE via $ROUTER_GW_IP
         fi
@@ -1331,9 +1358,12 @@
 function _neutron_configure_router_v6 {
     neutron router-interface-add $ROUTER_ID $IPV6_SUBNET_ID
     # Create a public subnet on the external network
-    local ipv6_id_and_ext_gw_ip=$(_neutron_create_public_subnet_v6 $EXT_NET_ID)
-    local ipv6_ext_gw_ip=$(echo $ipv6_id_and_ext_gw_ip | get_field 2)
-    local ipv6_pub_subnet_id=$(echo $ipv6_id_and_ext_gw_ip | get_field 5)
+    local ipv6_id_and_ext_gw_ip
+    ipv6_id_and_ext_gw_ip=$(_neutron_create_public_subnet_v6 $EXT_NET_ID)
+    local ipv6_ext_gw_ip
+    ipv6_ext_gw_ip=$(echo $ipv6_id_and_ext_gw_ip | get_field 2)
+    local ipv6_pub_subnet_id
+    ipv6_pub_subnet_id=$(echo $ipv6_id_and_ext_gw_ip | get_field 5)
 
     # If the external network has not already been set as the default router
     # gateway when configuring an IPv4 public subnet, do so now
@@ -1347,11 +1377,12 @@
         sudo sysctl -w net.ipv6.conf.all.forwarding=1
         # Configure and enable public bridge
         # Override global IPV6_ROUTER_GW_IP with the true value from neutron
-        IPV6_ROUTER_GW_IP=`neutron port-list -c fixed_ips | grep $ipv6_pub_subnet_id | awk -F '"' -v subnet_id=$ipv6_pub_subnet_id '$4 == subnet_id { print $8; }'`
+        IPV6_ROUTER_GW_IP=`neutron port-list -c fixed_ips | grep $ipv6_pub_subnet_id | awk -F'ip_address' '{ print $2 }' | cut -f3 -d\" | tr '\n' ' '`
         die_if_not_set $LINENO IPV6_ROUTER_GW_IP "Failure retrieving IPV6_ROUTER_GW_IP"
 
-        if is_neutron_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then
-            local ext_gw_interface=$(_neutron_get_ext_gw_interface)
+        if is_neutron_ovs_base_plugin; then
+            local ext_gw_interface
+            ext_gw_interface=$(_neutron_get_ext_gw_interface)
             local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/}
 
             # Configure interface for public bridge
@@ -1364,7 +1395,7 @@
 
 # Explicitly set router id in l3 agent configuration
 function _neutron_set_router_id {
-    if [[ "$Q_USE_NAMESPACE" == "False" ]]; then
+    if [[ "$Q_L3_ROUTER_PER_TENANT" == "False" ]]; then
         iniset $Q_L3_CONF_FILE DEFAULT router_id $ROUTER_ID
     fi
 }
@@ -1488,7 +1519,7 @@
 
 
 # Restore xtrace
-$XTRACE
+$_XTRACE_NEUTRON
 
 # Tell emacs to use shell-script-mode
 ## Local variables:
diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight
index 4166131..586ded7 100644
--- a/lib/neutron_plugins/bigswitch_floodlight
+++ b/lib/neutron_plugins/bigswitch_floodlight
@@ -4,7 +4,7 @@
 # ------------------------------------
 
 # Save trace setting
-BS_XTRACE=$(set +o | grep xtrace)
+_XTRACE_NEUTRON_BIGSWITCH=$(set +o | grep xtrace)
 set +o xtrace
 
 source $TOP_DIR/lib/neutron_plugins/ovs_base
@@ -58,9 +58,9 @@
 function neutron_plugin_setup_interface_driver {
     local conf_file=$1
     if [ "$BS_FL_VIF_DRIVER" = "ivs" ]; then
-        iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.IVSInterfaceDriver
+        iniset $conf_file DEFAULT interface_driver ivs
     else
-        iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
+        iniset $conf_file DEFAULT interface_driver openvswitch
     fi
 }
 
@@ -75,4 +75,4 @@
 }
 
 # Restore xtrace
-$BS_XTRACE
+$_XTRACE_NEUTRON_BIGSWITCH
diff --git a/lib/neutron_plugins/brocade b/lib/neutron_plugins/brocade
index 557b94d..6ba0a66 100644
--- a/lib/neutron_plugins/brocade
+++ b/lib/neutron_plugins/brocade
@@ -4,7 +4,7 @@
 # ----------------------
 
 # Save trace setting
-BRCD_XTRACE=$(set +o | grep xtrace)
+_XTRACE_NEUTRON_BROCADE=$(set +o | grep xtrace)
 set +o xtrace
 
 function is_neutron_ovs_base_plugin {
@@ -68,7 +68,7 @@
 
 function neutron_plugin_setup_interface_driver {
     local conf_file=$1
-    iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.BridgeInterfaceDriver
+    iniset $conf_file DEFAULT interface_driver linuxbridge
 }
 
 function has_neutron_plugin_security_group {
@@ -81,4 +81,4 @@
 }
 
 # Restore xtrace
-$BRCD_XTRACE
+$_XTRACE_NEUTRON_BROCADE
diff --git a/lib/neutron_plugins/cisco b/lib/neutron_plugins/cisco
index 90dcd57..fc2cb8a 100644
--- a/lib/neutron_plugins/cisco
+++ b/lib/neutron_plugins/cisco
@@ -4,7 +4,7 @@
 # ---------------------------
 
 # Save trace setting
-CISCO_XTRACE=$(set +o | grep xtrace)
+_XTRACE_NEUTRON_CISCO=$(set +o | grep xtrace)
 set +o xtrace
 
 # Scecify the VSM parameters
@@ -150,8 +150,8 @@
 
 function neutron_plugin_setup_interface_driver {
     local conf_file=$1
-    iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
+    iniset $conf_file DEFAULT interface_driver openvswitch
 }
 
 # Restore xtrace
-$CISCO_XTRACE
+$_XTRACE_NEUTRON_CISCO
diff --git a/lib/neutron_plugins/embrane b/lib/neutron_plugins/embrane
index 6b4819e..385dab8 100644
--- a/lib/neutron_plugins/embrane
+++ b/lib/neutron_plugins/embrane
@@ -4,13 +4,14 @@
 # ---------------------------
 
 # Save trace setting
-EMBR_XTRACE=$(set +o | grep xtrace)
+_XTRACE_NEUTRON_EMBR=$(set +o | grep xtrace)
 set +o xtrace
 
 source $TOP_DIR/lib/neutron_plugins/openvswitch
 
 function save_function {
-    local ORIG_FUNC=$(declare -f $1)
+    local ORIG_FUNC
+    ORIG_FUNC=$(declare -f $1)
     local NEW_FUNC="$2${ORIG_FUNC#$1}"
     eval "$NEW_FUNC"
 }
@@ -38,4 +39,5 @@
 }
 
 # Restore xtrace
-$EMBR_XTRACE
+$_XTRACE_NEUTRON_EMBR
+
diff --git a/lib/neutron_plugins/ibm b/lib/neutron_plugins/ibm
deleted file mode 100644
index dd5cfa6..0000000
--- a/lib/neutron_plugins/ibm
+++ /dev/null
@@ -1,133 +0,0 @@
-#!/bin/bash
-#
-# Neutron IBM SDN-VE plugin
-# ---------------------------
-
-# Save trace setting
-IBM_XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-source $TOP_DIR/lib/neutron_plugins/ovs_base
-
-function neutron_plugin_install_agent_packages {
-    _neutron_ovs_base_install_agent_packages
-}
-
-function _neutron_interface_setup {
-    # Setup one interface on the integration bridge if needed
-    # The plugin agent to be used if more than one interface is used
-    local bridge=$1
-    local interface=$2
-    sudo ovs-vsctl --no-wait -- --may-exist add-port $bridge $interface
-}
-
-function neutron_setup_integration_bridge {
-    # Setup integration bridge if needed
-    if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then
-        neutron_ovs_base_cleanup
-        _neutron_ovs_base_setup_bridge $SDNVE_INTEGRATION_BRIDGE
-        if [[ "$SDNVE_INTERFACE_MAPPINGS" != "" ]]; then
-            interfaces=(${SDNVE_INTERFACE_MAPPINGS//[,:]/ })
-            _neutron_interface_setup $SDNVE_INTEGRATION_BRIDGE ${interfaces[1]}
-        fi
-    fi
-
-    # Set controller to SDNVE controller (1st of list) if exists
-    if [[ "$SDNVE_CONTROLLER_IPS" != "" ]]; then
-        # Get the first controller
-        controllers=(${SDNVE_CONTROLLER_IPS//[\[,\]]/ })
-        SDNVE_IP=${controllers[0]}
-        sudo ovs-vsctl set-controller $SDNVE_INTEGRATION_BRIDGE tcp:$SDNVE_IP
-    fi
-}
-
-function neutron_plugin_create_nova_conf {
-    # if n-cpu is enabled, then setup integration bridge
-    if is_service_enabled n-cpu; then
-        neutron_setup_integration_bridge
-    fi
-}
-
-function is_neutron_ovs_base_plugin {
-    if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then
-        # Yes, we use OVS.
-        return 0
-    else
-        # No, we do not use OVS.
-        return 1
-    fi
-}
-
-function neutron_plugin_configure_common {
-    Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ibm
-    Q_PLUGIN_CONF_FILENAME=sdnve_neutron_plugin.ini
-    Q_PLUGIN_CLASS="neutron.plugins.ibm.sdnve_neutron_plugin.SdnvePluginV2"
-}
-
-function neutron_plugin_configure_service {
-    # Define extra "SDNVE" configuration options when q-svc is configured
-
-    iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver
-
-    if [[ "$SDNVE_CONTROLLER_IPS" != "" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE sdnve controller_ips $SDNVE_CONTROLLER_IPS
-    fi
-
-    if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE sdnve integration_bridge $SDNVE_INTEGRATION_BRIDGE
-    fi
-
-    if [[ "$SDNVE_RESET_BRIDGE" != "" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE sdnve reset_bridge $SDNVE_RESET_BRIDGE
-    fi
-
-    if [[ "$SDNVE_OUT_OF_BAND" != "" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE sdnve out_of_band $SDNVE_OUT_OF_BAND
-    fi
-
-    if [[ "$SDNVE_INTERFACE_MAPPINGS" != "" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE sdnve interface_mappings $SDNVE_INTERFACE_MAPPINGS
-    fi
-
-    if [[ "$SDNVE_FAKE_CONTROLLER" != "" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE sdnve use_fake_controller $SDNVE_FAKE_CONTROLLER
-    fi
-
-
-    iniset $NEUTRON_CONF DEFAULT notification_driver neutron.openstack.common.notifier.no_op_notifier
-
-}
-
-function neutron_plugin_configure_plugin_agent {
-    AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-ibm-agent"
-}
-
-function neutron_plugin_configure_debug_command {
-    :
-}
-
-function neutron_plugin_setup_interface_driver {
-    return 0
-}
-
-function has_neutron_plugin_security_group {
-    # Does not support Security Groups
-    return 1
-}
-
-function neutron_ovs_base_cleanup {
-    if [[ "$SDNVE_RESET_BRIDGE" != False ]]; then
-        # remove all OVS ports that look like Neutron created ports
-        for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do
-            sudo ovs-vsctl del-port ${port}
-        done
-
-        # remove integration bridge created by Neutron
-        for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${SDNVE_INTEGRATION_BRIDGE}); do
-            sudo ovs-vsctl del-br ${bridge}
-        done
-    fi
-}
-
-# Restore xtrace
-$IBM_XTRACE
diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent
index bd4438d..096722b 100644
--- a/lib/neutron_plugins/linuxbridge_agent
+++ b/lib/neutron_plugins/linuxbridge_agent
@@ -4,7 +4,7 @@
 # -----------------------------
 
 # Save trace setting
-PLUGIN_XTRACE=$(set +o | grep xtrace)
+_XTRACE_NEUTRON_LB=$(set +o | grep xtrace)
 set +o xtrace
 
 function neutron_lb_cleanup {
@@ -85,7 +85,7 @@
 
 function neutron_plugin_setup_interface_driver {
     local conf_file=$1
-    iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.BridgeInterfaceDriver
+    iniset $conf_file DEFAULT interface_driver linuxbridge
 }
 
 function neutron_plugin_check_adv_test_requirements {
@@ -93,4 +93,4 @@
 }
 
 # Restore xtrace
-$PLUGIN_XTRACE
+$_XTRACE_NEUTRON_LB
diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2
index ace5335..30e1b03 100644
--- a/lib/neutron_plugins/ml2
+++ b/lib/neutron_plugins/ml2
@@ -4,7 +4,7 @@
 # ------------------------------
 
 # Save trace setting
-ML2_XTRACE=$(set +o | grep xtrace)
+_XTRACE_NEUTRON_ML2=$(set +o | grep xtrace)
 set +o xtrace
 
 # Enable this to simply and quickly enable tunneling with ML2.
@@ -137,4 +137,4 @@
 }
 
 # Restore xtrace
-$ML2_XTRACE
+$_XTRACE_NEUTRON_ML2
diff --git a/lib/neutron_plugins/nuage b/lib/neutron_plugins/nuage
index 9e5307b..61e634e 100644
--- a/lib/neutron_plugins/nuage
+++ b/lib/neutron_plugins/nuage
@@ -4,7 +4,7 @@
 # ----------------------
 
 # Save trace setting
-NU_XTRACE=$(set +o | grep xtrace)
+_XTRACE_NEUTRON_NU=$(set +o | grep xtrace)
 set +o xtrace
 
 function neutron_plugin_create_nova_conf {
@@ -66,4 +66,4 @@
 }
 
 # Restore xtrace
-$NU_XTRACE
+$_XTRACE_NEUTRON_NU
diff --git a/lib/neutron_plugins/oneconvergence b/lib/neutron_plugins/oneconvergence
deleted file mode 100644
index 0c570e5..0000000
--- a/lib/neutron_plugins/oneconvergence
+++ /dev/null
@@ -1,77 +0,0 @@
-#!/bin/bash
-#
-# Neutron One Convergence plugin
-# ------------------------------
-
-# Save trace setting
-OC_XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-source $TOP_DIR/lib/neutron_plugins/ovs_base
-
-Q_L3_ENABLED=true
-Q_L3_ROUTER_PER_TENANT=true
-Q_USE_NAMESPACE=true
-
-function neutron_plugin_install_agent_packages {
-    _neutron_ovs_base_install_agent_packages
-}
-# Configure common parameters
-function neutron_plugin_configure_common {
-
-    Q_PLUGIN_CONF_PATH=etc/neutron/plugins/oneconvergence
-    Q_PLUGIN_CONF_FILENAME=nvsdplugin.ini
-    Q_PLUGIN_CLASS="neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2"
-}
-
-# Configure plugin specific information
-function neutron_plugin_configure_service {
-    iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_ip $NVSD_IP
-    iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_port $NVSD_PORT
-    iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_user $NVSD_USER
-    iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_passwd $NVSD_PASSWD
-}
-
-function neutron_plugin_configure_debug_command {
-    _neutron_ovs_base_configure_debug_command
-}
-
-function neutron_plugin_setup_interface_driver {
-    local conf_file=$1
-    iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
-}
-
-function has_neutron_plugin_security_group {
-    # 1 means False here
-    return 0
-}
-
-function setup_integration_bridge {
-    _neutron_ovs_base_setup_bridge $OVS_BRIDGE
-}
-
-function neutron_plugin_configure_dhcp_agent {
-    setup_integration_bridge
-    iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport
-}
-
-function neutron_plugin_configure_l3_agent {
-    _neutron_ovs_base_configure_l3_agent
-    iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport
-}
-
-function neutron_plugin_configure_plugin_agent {
-
-    AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-nvsd-agent"
-
-    _neutron_ovs_base_configure_firewall_driver
-}
-
-function neutron_plugin_create_nova_conf {
-    if ( is_service_enabled n-cpu && ! ( is_service_enabled q-dhcp )) ; then
-        setup_integration_bridge
-    fi
-}
-
-# Restore xtrace
-$OC_XTRACE
diff --git a/lib/neutron_plugins/openvswitch b/lib/neutron_plugins/openvswitch
index 891ab49..130eaac 100644
--- a/lib/neutron_plugins/openvswitch
+++ b/lib/neutron_plugins/openvswitch
@@ -7,7 +7,7 @@
 # which has been removed in Juno.
 
 # Save trace setting
-OVS_XTRACE=$(set +o | grep xtrace)
+_XTRACE_NEUTRON_OVS=$(set +o | grep xtrace)
 set +o xtrace
 
 source $TOP_DIR/lib/neutron_plugins/openvswitch_agent
@@ -56,4 +56,5 @@
 }
 
 # Restore xtrace
-$OVS_XTRACE
+$_XTRACE_NEUTRON_OVS
+
diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent
index 48e47b3..b1acacd 100644
--- a/lib/neutron_plugins/openvswitch_agent
+++ b/lib/neutron_plugins/openvswitch_agent
@@ -4,7 +4,7 @@
 # -----------------------------
 
 # Save trace setting
-OVSA_XTRACE=$(set +o | grep xtrace)
+_XTRACE_NEUTRON_OVSL2=$(set +o | grep xtrace)
 set +o xtrace
 
 source $TOP_DIR/lib/neutron_plugins/ovs_base
@@ -71,6 +71,9 @@
         # Make a copy of our config for domU
         sudo cp /$Q_PLUGIN_CONF_FILE "/$Q_PLUGIN_CONF_FILE.domU"
 
+        # change domU's config file to STACK_USER
+        sudo chown $STACK_USER:$STACK_USER /$Q_PLUGIN_CONF_FILE.domU
+
         # Deal with Dom0's L2 Agent:
         Q_RR_DOM0_COMMAND="$NEUTRON_BIN_DIR/neutron-rootwrap-xen-dom0 $Q_RR_CONF_FILE"
 
@@ -82,7 +85,14 @@
         # Under XS/XCP, the ovs agent needs to target the dom0
         # integration bridge.  This is enabled by using a root wrapper
         # that executes commands on dom0 via a XenAPI plugin.
+        # XenAPI does not support daemon rootwrap now, so set root_helper_daemon empty
         iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_DOM0_COMMAND"
+        iniset /$Q_PLUGIN_CONF_FILE agent root_helper_daemon ""
+
+        # Disable minimize polling, so that it can always detect OVS and Port changes
+        # This is a problem of xenserver + neutron, bug has been reported
+        # https://bugs.launchpad.net/neutron/+bug/1495423
+        iniset /$Q_PLUGIN_CONF_FILE agent minimize_polling False
 
         # Set "physical" mapping
         iniset /$Q_PLUGIN_CONF_FILE ovs bridge_mappings "physnet1:$FLAT_NETWORK_BRIDGE"
@@ -95,10 +105,14 @@
         # Create a bridge "br-$GUEST_INTERFACE_DEFAULT"
         _neutron_ovs_base_add_bridge "br-$GUEST_INTERFACE_DEFAULT"
         # Add $GUEST_INTERFACE_DEFAULT to that bridge
-        sudo ovs-vsctl add-port "br-$GUEST_INTERFACE_DEFAULT" $GUEST_INTERFACE_DEFAULT
+        sudo ovs-vsctl -- --may-exist add-port "br-$GUEST_INTERFACE_DEFAULT" $GUEST_INTERFACE_DEFAULT
+
+        # Create external bridge and add port
+        _neutron_ovs_base_add_bridge $PUBLIC_BRIDGE
+        sudo ovs-vsctl -- --may-exist add-port $PUBLIC_BRIDGE $PUBLIC_INTERFACE_DEFAULT
 
         # Set bridge mappings to "physnet1:br-$GUEST_INTERFACE_DEFAULT"
-        iniset "/$Q_PLUGIN_CONF_FILE.domU" ovs bridge_mappings "physnet1:br-$GUEST_INTERFACE_DEFAULT"
+        iniset "/$Q_PLUGIN_CONF_FILE.domU" ovs bridge_mappings "physnet1:br-$GUEST_INTERFACE_DEFAULT,physnet-ex:$PUBLIC_BRIDGE"
         # Set integration bridge to domU's
         iniset "/$Q_PLUGIN_CONF_FILE.domU" ovs integration_bridge $OVS_BRIDGE
         # Set root wrap
@@ -110,7 +124,7 @@
 
 function neutron_plugin_setup_interface_driver {
     local conf_file=$1
-    iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver
+    iniset $conf_file DEFAULT interface_driver openvswitch
 }
 
 function neutron_plugin_check_adv_test_requirements {
@@ -118,4 +132,4 @@
 }
 
 # Restore xtrace
-$OVSA_XTRACE
+$_XTRACE_NEUTRON_OVSL2
diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base
index 849d0b7..59c7737 100644
--- a/lib/neutron_plugins/ovs_base
+++ b/lib/neutron_plugins/ovs_base
@@ -4,7 +4,7 @@
 # -------------------------------------
 
 # Save trace setting
-OVSB_XTRACE=$(set +o | grep xtrace)
+_XTRACE_NEUTRON_OVS_BASE=$(set +o | grep xtrace)
 set +o xtrace
 
 OVS_BRIDGE=${OVS_BRIDGE:-br-int}
@@ -49,8 +49,10 @@
 
 function _neutron_ovs_base_install_ubuntu_dkms {
     # install Dynamic Kernel Module Support packages if needed
-    local kernel_version=$(uname -r)
-    local kernel_major_minor=`echo $kernel_version | cut -d. -f1-2`
+    local kernel_version
+    kernel_version=$(uname -r)
+    local kernel_major_minor
+    kernel_major_minor=`echo $kernel_version | cut -d. -f1-2`
     # From kernel 3.13 on, openvswitch-datapath-dkms is not needed
     if [ `vercmp_numbers "$kernel_major_minor" "3.13"` -lt "0" ]; then
         install_package "dkms openvswitch-datapath-dkms linux-headers-$kernel_version"
@@ -113,4 +115,4 @@
 }
 
 # Restore xtrace
-$OVSB_XTRACE
+$_XTRACE_NEUTRON_OVS_BASE
diff --git a/lib/neutron_plugins/plumgrid b/lib/neutron_plugins/plumgrid
deleted file mode 100644
index 0d711fe..0000000
--- a/lib/neutron_plugins/plumgrid
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/bin/bash
-#
-# PLUMgrid Neutron Plugin
-# Edgar Magana emagana@plumgrid.com
-# ------------------------------------
-
-# Save trace settings
-PG_XTRACE=$(set +o | grep xtrace)
-set +o xtrace
-
-function neutron_plugin_create_nova_conf {
-    :
-}
-
-function neutron_plugin_setup_interface_driver {
-    :
-}
-
-function neutron_plugin_configure_common {
-    Q_PLUGIN_CONF_PATH=etc/neutron/plugins/plumgrid
-    Q_PLUGIN_CONF_FILENAME=plumgrid.ini
-    Q_PLUGIN_CLASS="neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2"
-    PLUMGRID_DIRECTOR_IP=${PLUMGRID_DIRECTOR_IP:-localhost}
-    PLUMGRID_DIRECTOR_PORT=${PLUMGRID_DIRECTOR_PORT:-7766}
-    PLUMGRID_ADMIN=${PLUMGRID_ADMIN:-username}
-    PLUMGRID_PASSWORD=${PLUMGRID_PASSWORD:-password}
-    PLUMGRID_TIMEOUT=${PLUMGRID_TIMEOUT:-70}
-    PLUMGRID_DRIVER=${PLUMGRID_DRIVER:-neutron.plugins.plumgrid.drivers.fake_plumlib.Plumlib}
-}
-
-function neutron_plugin_configure_service {
-    iniset /$Q_PLUGIN_CONF_FILE plumgriddirector director_server $PLUMGRID_DIRECTOR_IP
-    iniset /$Q_PLUGIN_CONF_FILE plumgriddirector director_server_port $PLUMGRID_DIRECTOR_PORT
-    iniset /$Q_PLUGIN_CONF_FILE plumgriddirector username $PLUMGRID_ADMIN
-    iniset /$Q_PLUGIN_CONF_FILE plumgriddirector password $PLUMGRID_PASSWORD
-    iniset /$Q_PLUGIN_CONF_FILE plumgriddirector servertimeout $PLUMGRID_TIMEOUT
-    iniset /$Q_PLUGIN_CONF_FILE plumgriddirector driver $PLUMGRID_DRIVER
-}
-
-function neutron_plugin_configure_debug_command {
-    :
-}
-
-function is_neutron_ovs_base_plugin {
-    # False
-    return 1
-}
-
-function has_neutron_plugin_security_group {
-    # return 0 means enabled
-    return 0
-}
-
-function neutron_plugin_check_adv_test_requirements {
-    is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
-}
-# Restore xtrace
-$PG_XTRACE
diff --git a/lib/neutron_plugins/services/firewall b/lib/neutron_plugins/services/firewall
index 61a148e..2b7f32d 100644
--- a/lib/neutron_plugins/services/firewall
+++ b/lib/neutron_plugins/services/firewall
@@ -1,8 +1,10 @@
+#!/bin/bash
+
 # Neutron firewall plugin
 # ---------------------------
 
 # Save trace setting
-FW_XTRACE=$(set +o | grep xtrace)
+_XTRACE_NEUTRON_FIREWALL=$(set +o | grep xtrace)
 set +o xtrace
 
 FWAAS_PLUGIN=neutron_fwaas.services.firewall.fwaas_plugin.FirewallPlugin
@@ -12,8 +14,11 @@
 }
 
 function neutron_fwaas_configure_driver {
+    # Uses oslo config generator to generate FWaaS sample configuration files
+    (cd $NEUTRON_FWAAS_DIR && exec ./tools/generate_config_file_samples.sh)
+
     FWAAS_DRIVER_CONF_FILENAME=/etc/neutron/fwaas_driver.ini
-    cp $NEUTRON_FWAAS_DIR/etc/fwaas_driver.ini $FWAAS_DRIVER_CONF_FILENAME
+    cp $NEUTRON_FWAAS_DIR/etc/fwaas_driver.ini.sample $FWAAS_DRIVER_CONF_FILENAME
 
     iniset_multiline $FWAAS_DRIVER_CONF_FILENAME fwaas enabled True
     iniset_multiline $FWAAS_DRIVER_CONF_FILENAME fwaas driver "neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver"
@@ -24,4 +29,4 @@
 }
 
 # Restore xtrace
-$FW_XTRACE
+$_XTRACE_NEUTRON_FIREWALL
diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer
index 34190f9..30e9480 100644
--- a/lib/neutron_plugins/services/loadbalancer
+++ b/lib/neutron_plugins/services/loadbalancer
@@ -1,8 +1,10 @@
+#!/bin/bash
+
 # Neutron loadbalancer plugin
 # ---------------------------
 
 # Save trace setting
-LB_XTRACE=$(set +o | grep xtrace)
+_XTRACE_NEUTRON_LB=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -26,7 +28,7 @@
 
     LBAAS_AGENT_CONF_FILENAME="$LBAAS_AGENT_CONF_PATH/lbaas_agent.ini"
 
-    cp $NEUTRON_LBAAS_DIR/etc/lbaas_agent.ini $LBAAS_AGENT_CONF_FILENAME
+    cp $NEUTRON_LBAAS_DIR/etc/lbaas_agent.ini.sample $LBAAS_AGENT_CONF_FILENAME
 
     # ovs_use_veth needs to be set before the plugin configuration
     # occurs to allow plugins to override the setting.
@@ -46,4 +48,4 @@
 }
 
 # Restore xtrace
-$LB_XTRACE
+$_XTRACE_NEUTRON_LB
diff --git a/lib/neutron_plugins/services/metering b/lib/neutron_plugins/services/metering
index 37ba019..5b32468 100644
--- a/lib/neutron_plugins/services/metering
+++ b/lib/neutron_plugins/services/metering
@@ -1,8 +1,10 @@
+#!/bin/bash
+
 # Neutron metering plugin
 # ---------------------------
 
 # Save trace setting
-METER_XTRACE=$(set +o | grep xtrace)
+_XTRACE_NETURON_METER=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -19,7 +21,7 @@
 
     METERING_AGENT_CONF_FILENAME="$METERING_AGENT_CONF_PATH/metering_agent.ini"
 
-    cp $NEUTRON_DIR/etc/metering_agent.ini $METERING_AGENT_CONF_FILENAME
+    cp $NEUTRON_DIR/etc/metering_agent.ini.sample $METERING_AGENT_CONF_FILENAME
 }
 
 function neutron_metering_stop {
@@ -27,4 +29,5 @@
 }
 
 # Restore xtrace
-$METER_XTRACE
+$_XTRACE_NETURON_METER
+
diff --git a/lib/neutron_plugins/services/vpn b/lib/neutron_plugins/services/vpn
index 4d6a2bf..e790913 100644
--- a/lib/neutron_plugins/services/vpn
+++ b/lib/neutron_plugins/services/vpn
@@ -1,8 +1,10 @@
+#!/bin/bash
+
 # Neutron VPN plugin
 # ---------------------------
 
 # Save trace setting
-VPN_XTRACE=$(set +o | grep xtrace)
+_XTRACE_NEUTRON_VPN=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -27,7 +29,9 @@
 }
 
 function neutron_vpn_configure_agent {
-    cp $NEUTRON_VPNAAS_DIR/etc/vpn_agent.ini $Q_VPN_CONF_FILE
+    # Uses oslo config generator to generate LBaaS sample configuration files
+    (cd $NEUTRON_VPNAAS_DIR && exec ./tools/generate_config_file_samples.sh)
+    cp $NEUTRON_VPNAAS_DIR/etc/vpn_agent.ini.sample $Q_VPN_CONF_FILE
     if [[ "$IPSEC_PACKAGE" == "strongswan" ]]; then
         iniset_multiline $Q_VPN_CONF_FILE vpnagent vpn_device_driver neutron_vpnaas.services.vpn.device_drivers.strongswan_ipsec.StrongSwanDriver
         if is_fedora; then
@@ -51,4 +55,4 @@
 }
 
 # Restore xtrace
-$VPN_XTRACE
+$_XTRACE_NEUTRON_VPN
diff --git a/lib/neutron_thirdparty/bigswitch_floodlight b/lib/neutron_thirdparty/bigswitch_floodlight
index e3f4689..45a4f2e 100644
--- a/lib/neutron_thirdparty/bigswitch_floodlight
+++ b/lib/neutron_thirdparty/bigswitch_floodlight
@@ -4,7 +4,7 @@
 # ------------------------------------------
 
 # Save trace setting
-BS3_XTRACE=$(set +o | grep xtrace)
+_XTRACE_NEUTRON_BIGSWITCH=$(set +o | grep xtrace)
 set +o xtrace
 
 BS_FL_CONTROLLERS_PORT=${BS_FL_CONTROLLERS_PORT:-localhost:80}
@@ -51,4 +51,4 @@
 }
 
 # Restore xtrace
-$BS3_XTRACE
+$_XTRACE_NEUTRON_BIGSWITCH
diff --git a/lib/neutron_thirdparty/vmware_nsx b/lib/neutron_thirdparty/vmware_nsx
index 03853a9..e182fca 100644
--- a/lib/neutron_thirdparty/vmware_nsx
+++ b/lib/neutron_thirdparty/vmware_nsx
@@ -1,2 +1,4 @@
+#!/bin/bash
+
 # REVISIT(roeyc): this file left empty so that 'enable_service vmware_nsx'
 # continues to work.
diff --git a/lib/nova b/lib/nova
index 9830276..c97f517 100644
--- a/lib/nova
+++ b/lib/nova
@@ -7,6 +7,7 @@
 #
 # - ``functions`` file
 # - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
+# - ``FILES``
 # - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined
 # - ``LIBVIRT_TYPE`` must be defined
 # - ``INSTANCE_NAME_PREFIX``, ``VOLUME_NAME_PREFIX`` must be defined
@@ -24,7 +25,7 @@
 # - cleanup_nova
 
 # Save trace setting
-XTRACE=$(set +o | grep xtrace)
+_XTRACE_LIB_NOVA=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -87,6 +88,7 @@
 NOVA_SERVICE_LISTEN_ADDRESS=${NOVA_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS}
 EC2_SERVICE_PORT=${EC2_SERVICE_PORT:-8773}
 EC2_SERVICE_PORT_INT=${EC2_SERVICE_PORT_INT:-18773}
+METADATA_SERVICE_PORT=${METADATA_SERVICE_PORT:-8775}
 
 # Option to enable/disable config drive
 # NOTE: Set ``FORCE_CONFIG_DRIVE="False"`` to turn OFF config drive
@@ -96,6 +98,10 @@
 # should work in most cases.
 SCHEDULER=${SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler}
 
+# The following FILTERS contains SameHostFilter and DifferentHostFilter with
+# the default filters.
+FILTERS="RetryFilter,AvailabilityZoneFilter,RamFilter,DiskFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter"
+
 QEMU_CONF=/etc/libvirt/qemu.conf
 
 # Set default defaults here as some hypervisor drivers override these
@@ -202,14 +208,16 @@
         clean_iptables
 
         # Destroy old instances
-        local instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"`
+        local instances
+        instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"`
         if [ ! "$instances" = "" ]; then
             echo $instances | xargs -n1 sudo virsh destroy || true
             echo $instances | xargs -n1 sudo virsh undefine --managed-save || true
         fi
 
         # Logout and delete iscsi sessions
-        local tgts=$(sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d ' ' -f2)
+        local tgts
+        tgts=$(sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d ' ' -f2)
         local target
         for target in $tgts; do
             sudo iscsiadm --mode node -T $target --logout || true
@@ -238,20 +246,22 @@
 function _cleanup_nova_apache_wsgi {
     sudo rm -f $NOVA_WSGI_DIR/*
     sudo rm -f $(apache_site_config_for nova-api)
-    sudo rm -f $(apache_site_config_for nova-ec2-api)
+    sudo rm -f $(apache_site_config_for nova-metadata)
 }
 
 # _config_nova_apache_wsgi() - Set WSGI config files of Keystone
 function _config_nova_apache_wsgi {
     sudo mkdir -p $NOVA_WSGI_DIR
 
-    local nova_apache_conf=$(apache_site_config_for nova-api)
-    local nova_ec2_apache_conf=$(apache_site_config_for nova-ec2-api)
+    local nova_apache_conf
+    nova_apache_conf=$(apache_site_config_for nova-api)
+    local nova_metadata_apache_conf
+    nova_metadata_apache_conf=$(apache_site_config_for nova-metadata)
     local nova_ssl=""
     local nova_certfile=""
     local nova_keyfile=""
     local nova_api_port=$NOVA_SERVICE_PORT
-    local nova_ec2_api_port=$EC2_SERVICE_PORT
+    local nova_metadata_port=$METADATA_SERVICE_PORT
     local venv_path=""
 
     if is_ssl_enabled_service nova-api; then
@@ -265,7 +275,7 @@
 
     # copy proxy vhost and wsgi helper files
     sudo cp $NOVA_DIR/nova/wsgi/nova-api.py $NOVA_WSGI_DIR/nova-api
-    sudo cp $NOVA_DIR/nova/wsgi/nova-ec2-api.py $NOVA_WSGI_DIR/nova-ec2-api
+    sudo cp $NOVA_DIR/nova/wsgi/nova-metadata.py $NOVA_WSGI_DIR/nova-metadata
 
     sudo cp $FILES/apache-nova-api.template $nova_apache_conf
     sudo sed -e "
@@ -280,18 +290,18 @@
         s|%APIWORKERS%|$API_WORKERS|g
     " -i $nova_apache_conf
 
-    sudo cp $FILES/apache-nova-ec2-api.template $nova_ec2_apache_conf
+    sudo cp $FILES/apache-nova-metadata.template $nova_metadata_apache_conf
     sudo sed -e "
-        s|%PUBLICPORT%|$nova_ec2_api_port|g;
+        s|%PUBLICPORT%|$nova_metadata_port|g;
         s|%APACHE_NAME%|$APACHE_NAME|g;
-        s|%PUBLICWSGI%|$NOVA_WSGI_DIR/nova-ec2-api|g;
+        s|%PUBLICWSGI%|$NOVA_WSGI_DIR/nova-metadata|g;
         s|%SSLENGINE%|$nova_ssl|g;
         s|%SSLCERTFILE%|$nova_certfile|g;
         s|%SSLKEYFILE%|$nova_keyfile|g;
         s|%USER%|$STACK_USER|g;
         s|%VIRTUALENV%|$venv_path|g
         s|%APIWORKERS%|$API_WORKERS|g
-    " -i $nova_ec2_apache_conf
+    " -i $nova_metadata_apache_conf
 }
 
 # configure_nova() - Set config files, create data dirs, etc
@@ -437,17 +447,6 @@
             # swift through the s3 api.
             get_or_add_user_project_role ResellerAdmin nova $SERVICE_TENANT_NAME
         fi
-
-        # EC2
-        if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then
-
-            get_or_create_service "ec2" "ec2" "EC2 Compatibility Layer"
-            get_or_create_endpoint "ec2" \
-                "$REGION_NAME" \
-                "$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/" \
-                "$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/" \
-                "$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/"
-        fi
     fi
 
     # S3
@@ -471,7 +470,6 @@
 
     # (Re)create ``nova.conf``
     rm -f $NOVA_CONF
-    iniset $NOVA_CONF DEFAULT verbose "True"
     iniset $NOVA_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL"
     if [ "$NOVA_ALLOW_MOVE_TO_SAME_HOST" == "True" ]; then
         iniset $NOVA_CONF DEFAULT allow_resize_to_same_host "True"
@@ -479,6 +477,7 @@
     iniset $NOVA_CONF DEFAULT api_paste_config "$NOVA_API_PASTE_INI"
     iniset $NOVA_CONF DEFAULT rootwrap_config "$NOVA_CONF_DIR/rootwrap.conf"
     iniset $NOVA_CONF DEFAULT scheduler_driver "$SCHEDULER"
+    iniset $NOVA_CONF DEFAULT scheduler_default_filters "$FILTERS"
     iniset $NOVA_CONF DEFAULT dhcpbridge_flagfile "$NOVA_CONF"
     iniset $NOVA_CONF DEFAULT force_dhcp_release "True"
     iniset $NOVA_CONF DEFAULT default_floating_pool "$PUBLIC_NETWORK_NAME"
@@ -494,7 +493,6 @@
     iniset $NOVA_CONF api_database connection `database_connection_url nova_api`
     iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x"
     iniset $NOVA_CONF DEFAULT osapi_compute_listen "$NOVA_SERVICE_LISTEN_ADDRESS"
-    iniset $NOVA_CONF DEFAULT ec2_listen "$NOVA_SERVICE_LISTEN_ADDRESS"
     iniset $NOVA_CONF DEFAULT metadata_listen "$NOVA_SERVICE_LISTEN_ADDRESS"
     iniset $NOVA_CONF DEFAULT s3_listen "$NOVA_SERVICE_LISTEN_ADDRESS"
 
@@ -601,12 +599,10 @@
     fi
 
     iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST"
-    iniset $NOVA_CONF DEFAULT keystone_ec2_url $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens
     iniset_rpc_backend nova $NOVA_CONF
     iniset $NOVA_CONF glance api_servers "${GLANCE_SERVICE_PROTOCOL}://${GLANCE_HOSTPORT}"
 
     iniset $NOVA_CONF DEFAULT osapi_compute_workers "$API_WORKERS"
-    iniset $NOVA_CONF DEFAULT ec2_workers "$API_WORKERS"
     iniset $NOVA_CONF DEFAULT metadata_workers "$API_WORKERS"
     # don't let the conductor get out of control now that we're using a pure python db driver
     iniset $NOVA_CONF conductor workers "$API_WORKERS"
@@ -632,14 +628,11 @@
         iniset $NOVA_CONF DEFAULT enabled_ssl_apis "$NOVA_ENABLED_APIS"
     fi
 
-    if is_service_enabled tls-proxy; then
-        iniset $NOVA_CONF DEFAULT ec2_listen_port $EC2_SERVICE_PORT_INT
-    fi
-
     if is_service_enabled n-sproxy; then
         iniset $NOVA_CONF serial_console serialproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
         iniset $NOVA_CONF serial_console enabled True
     fi
+    iniset $NOVA_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT"
 }
 
 function init_nova_cells {
@@ -784,13 +777,14 @@
     export PATH=$NOVA_BIN_DIR:$PATH
 
     # If the site is not enabled then we are in a grenade scenario
-    local enabled_site_file=$(apache_site_config_for nova-api)
+    local enabled_site_file
+    enabled_site_file=$(apache_site_config_for nova-api)
     if [ -f ${enabled_site_file} ] && [ "$NOVA_USE_MOD_WSGI" == "True" ]; then
         enable_apache_site nova-api
-        enable_apache_site nova-ec2-api
+        enable_apache_site nova-metadata
         restart_apache_server
         tail_log nova-api /var/log/$APACHE_NAME/nova-api.log
-        tail_log nova-ec2-api /var/log/$APACHE_NAME/nova-ec2-api.log
+        tail_log nova-metadata /var/log/$APACHE_NAME/nova-metadata.log
     else
         run_process n-api "$NOVA_BIN_DIR/nova-api"
     fi
@@ -905,7 +899,7 @@
 function stop_nova_rest {
     if [ "$NOVA_USE_MOD_WSGI" == "True" ]; then
         disable_apache_site nova-api
-        disable_apache_site nova-ec2-api
+        disable_apache_site nova-metadata
         restart_apache_server
     else
         stop_process n-api
@@ -926,7 +920,7 @@
 
 
 # Restore xtrace
-$XTRACE
+$_XTRACE_LIB_NOVA
 
 # Tell emacs to use shell-script-mode
 ## Local variables:
diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt
index 5525cfd..dae55c6 100644
--- a/lib/nova_plugins/functions-libvirt
+++ b/lib/nova_plugins/functions-libvirt
@@ -8,7 +8,7 @@
 # ``STACK_USER`` has to be defined
 
 # Save trace setting
-LV_XTRACE=$(set +o | grep xtrace)
+_XTRACE_NOVA_FN_LIBVIRT=$(set +o | grep xtrace)
 set +o xtrace
 
 # Defaults
@@ -23,7 +23,7 @@
 # Installs required distro-specific libvirt packages.
 function install_libvirt {
     if is_ubuntu; then
-        if is_arch "aarch64" && [[ ${DISTRO} =~ (trusty|utopic) ]]; then
+        if is_arch "aarch64" && [[ ${DISTRO} == "trusty" ]]; then
             install_package qemu-system
         else
             install_package qemu-kvm
@@ -31,6 +31,11 @@
         fi
         install_package libvirt-bin libvirt-dev
         pip_install_gr libvirt-python
+        if [[ "$EBTABLES_RACE_FIX" == "True" ]]; then
+            # Work around for bug #1501558. We can remove this once we
+            # get to a version of Ubuntu that has new enough libvirt.
+            TOP_DIR=$TOP_DIR $TOP_DIR/tools/install_ebtables_workaround.sh
+        fi
         #pip_install_gr <there-si-no-guestfs-in-pypi>
     elif is_fedora || is_suse; then
         install_package kvm
@@ -129,7 +134,7 @@
 
 
 # Restore xtrace
-$LV_XTRACE
+$_XTRACE_NOVA_FN_LIBVIRT
 
 # Local variables:
 # mode: shell-script
diff --git a/lib/nova_plugins/hypervisor-fake b/lib/nova_plugins/hypervisor-fake
index 3180d91..2434dce 100644
--- a/lib/nova_plugins/hypervisor-fake
+++ b/lib/nova_plugins/hypervisor-fake
@@ -17,7 +17,7 @@
 # cleanup_nova_hypervisor - remove transient data and cache
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+_XTRACE_VIRTFAKE=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -72,7 +72,7 @@
 
 
 # Restore xtrace
-$MY_XTRACE
+$_XTRACE_VIRTFAKE
 
 # Local variables:
 # mode: shell-script
diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic
index b9e286d..c6ed85d 100644
--- a/lib/nova_plugins/hypervisor-ironic
+++ b/lib/nova_plugins/hypervisor-ironic
@@ -17,7 +17,7 @@
 # cleanup_nova_hypervisor - remove transient data and cache
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+_XTRACE_HYP_IRONIC=$(set +o | grep xtrace)
 set +o xtrace
 
 source $TOP_DIR/lib/nova_plugins/functions-libvirt
@@ -81,7 +81,7 @@
 
 
 # Restore xtrace
-$MY_XTRACE
+$_XTRACE_HYP_IRONIC
 
 # Local variables:
 # mode: shell-script
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index c54a716..8bbaa21 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -17,7 +17,7 @@
 # cleanup_nova_hypervisor - remove transient data and cache
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+_XTRACE_NOVA_LIBVIRT=$(set +o | grep xtrace)
 set +o xtrace
 
 source $TOP_DIR/lib/nova_plugins/functions-libvirt
@@ -105,7 +105,7 @@
 
 
 # Restore xtrace
-$MY_XTRACE
+$_XTRACE_NOVA_LIBVIRT
 
 # Local variables:
 # mode: shell-script
diff --git a/lib/nova_plugins/hypervisor-openvz b/lib/nova_plugins/hypervisor-openvz
index cce36b8..58ab5c1 100644
--- a/lib/nova_plugins/hypervisor-openvz
+++ b/lib/nova_plugins/hypervisor-openvz
@@ -17,7 +17,7 @@
 # cleanup_nova_hypervisor - remove transient data and cache
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+_XTRACE_OPENVZ=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -62,7 +62,7 @@
 
 
 # Restore xtrace
-$MY_XTRACE
+$_XTRACE_OPENVZ
 
 # Local variables:
 # mode: shell-script
diff --git a/lib/nova_plugins/hypervisor-vsphere b/lib/nova_plugins/hypervisor-vsphere
index 698f836..7c08bc9 100644
--- a/lib/nova_plugins/hypervisor-vsphere
+++ b/lib/nova_plugins/hypervisor-vsphere
@@ -17,7 +17,7 @@
 # cleanup_nova_hypervisor - remove transient data and cache
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+_XTRACE_NOVA_VSPHERE=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -64,7 +64,7 @@
 
 
 # Restore xtrace
-$MY_XTRACE
+$_XTRACE_NOVA_VSPHERE
 
 # Local variables:
 # mode: shell-script
diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver
index efce383..3eb9149 100644
--- a/lib/nova_plugins/hypervisor-xenserver
+++ b/lib/nova_plugins/hypervisor-xenserver
@@ -17,7 +17,7 @@
 # cleanup_nova_hypervisor - remove transient data and cache
 
 # Save trace setting
-MY_XTRACE=$(set +o | grep xtrace)
+_XTRACE_XENSERVER=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -79,7 +79,7 @@
 
     # Create a cron job that will rotate guest logs
     $ssh_dom0 crontab - << CRONTAB
-* * * * * /root/rotate_xen_guest_logs.sh
+* * * * * /root/rotate_xen_guest_logs.sh >/dev/null 2>&1
 CRONTAB
 
     # Create directories for kernels and images
@@ -111,7 +111,7 @@
 
 
 # Restore xtrace
-$MY_XTRACE
+$_XTRACE_XENSERVER
 
 # Local variables:
 # mode: shell-script
diff --git a/lib/oslo b/lib/oslo
index f64f327..3d6fbb3 100644
--- a/lib/oslo
+++ b/lib/oslo
@@ -16,7 +16,7 @@
 # - install_oslo
 
 # Save trace setting
-XTRACE=$(set +o | grep xtrace)
+_XTRACE_LIB_OSLO=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -36,6 +36,7 @@
 GITDIR["oslo.messaging"]=$DEST/oslo.messaging
 GITDIR["oslo.middleware"]=$DEST/oslo.middleware
 GITDIR["oslo.policy"]=$DEST/oslo.policy
+GITDIR["oslo.privsep"]=$DEST/oslo.privsep
 GITDIR["oslo.reports"]=$DEST/oslo.reports
 GITDIR["oslo.rootwrap"]=$DEST/oslo.rootwrap
 GITDIR["oslo.serialization"]=$DEST/oslo.serialization
@@ -79,6 +80,7 @@
     _do_install_oslo_lib "oslo.messaging"
     _do_install_oslo_lib "oslo.middleware"
     _do_install_oslo_lib "oslo.policy"
+    _do_install_oslo_lib "oslo.privsep"
     _do_install_oslo_lib "oslo.reports"
     _do_install_oslo_lib "oslo.rootwrap"
     _do_install_oslo_lib "oslo.serialization"
@@ -93,7 +95,7 @@
 }
 
 # Restore xtrace
-$XTRACE
+$_XTRACE_LIB_OSLO
 
 # Tell emacs to use shell-script-mode
 ## Local variables:
diff --git a/lib/rpc_backend b/lib/rpc_backend
index 2e1c897..a44287c 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -21,7 +21,7 @@
 # of this file which is a standard interface.
 
 # Save trace setting
-XTRACE=$(set +o | grep xtrace)
+_XTRACE_RPC_BACKEND=$(set +o | grep xtrace)
 set +o xtrace
 
 # Functions
@@ -61,7 +61,7 @@
         # NOTE(bnemec): Retry initial rabbitmq configuration to deal with
         # the fact that sometimes it fails to start properly.
         # Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1144100
-        # NOTE(tonyb): Extend the orginal retry logic to only restart rabbitmq
+        # NOTE(tonyb): Extend the original retry logic to only restart rabbitmq
         # every second time around the loop.
         # See: https://bugs.launchpad.net/devstack/+bug/1449056 for details on
         # why this is needed.  This can bee seen on vivid and Debian unstable
@@ -109,7 +109,7 @@
     fi
 }
 
-# iniset cofiguration
+# iniset configuration
 function iniset_rpc_backend {
     local package=$1
     local file=$2
@@ -144,7 +144,7 @@
 }
 
 # Restore xtrace
-$XTRACE
+$_XTRACE_RPC_BACKEND
 
 # Tell emacs to use shell-script-mode
 ## Local variables:
diff --git a/lib/stack b/lib/stack
index 47e8ce2..f09ddce 100644
--- a/lib/stack
+++ b/lib/stack
@@ -14,11 +14,12 @@
 # Functions
 # ---------
 
-# Generic service install handles venv creation if confgured for service
+# Generic service install handles venv creation if configured for service
 # stack_install_service service
 function stack_install_service {
     local service=$1
     if type install_${service} >/dev/null 2>&1; then
+        # FIXME(dhellmann): Needs to be python3-aware at some point.
         if [[ ${USE_VENV} = True && -n ${PROJECT_VENV[$service]:-} ]]; then
             rm -rf ${PROJECT_VENV[$service]}
             source $TOP_DIR/tools/build_venv.sh ${PROJECT_VENV[$service]} ${ADDITIONAL_VENV_PACKAGES//,/ }
diff --git a/lib/swift b/lib/swift
index 645bfd7..b596142 100644
--- a/lib/swift
+++ b/lib/swift
@@ -24,7 +24,7 @@
 # - _cleanup_swift_apache_wsgi
 
 # Save trace setting
-XTRACE=$(set +o | grep xtrace)
+_XTRACE_LIB_SWIFT=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -44,6 +44,7 @@
 SWIFT3_DIR=$DEST/swift3
 
 SWIFT_SERVICE_PROTOCOL=${SWIFT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
+SWIFT_DEFAULT_BIND_PORT=${SWIFT_DEFAULT_BIND_PORT:-8080}
 SWIFT_DEFAULT_BIND_PORT_INT=${SWIFT_DEFAULT_BIND_PORT_INT:-8081}
 SWIFT_SERVICE_LOCAL_HOST=${SWIFT_SERVICE_LOCAL_HOST:-$SERVICE_LOCAL_HOST}
 SWIFT_SERVICE_LISTEN_ADDRESS=${SWIFT_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS}
@@ -62,7 +63,7 @@
 if is_service_enabled s-proxy && is_service_enabled swift3; then
     # If we are using ``swift3``, we can default the S3 port to swift instead
     # of nova-objectstore
-    S3_SERVICE_PORT=${S3_SERVICE_PORT:-8080}
+    S3_SERVICE_PORT=${S3_SERVICE_PORT:-$SWIFT_DEFAULT_BIND_PORT}
 fi
 
 if is_service_enabled g-api; then
@@ -122,17 +123,17 @@
 # trace through the logs when looking for its use.
 SWIFT_LOG_TOKEN_LENGTH=${SWIFT_LOG_TOKEN_LENGTH:-12}
 
-# Set ``SWIFT_MAX_HEADER_SIZE`` to configure the maximun length of headers in
+# Set ``SWIFT_MAX_HEADER_SIZE`` to configure the maximum length of headers in
 # Swift API
 SWIFT_MAX_HEADER_SIZE=${SWIFT_MAX_HEADER_SIZE:-16384}
 
 # Set ``OBJECT_PORT_BASE``, ``CONTAINER_PORT_BASE``, ``ACCOUNT_PORT_BASE``
-# Port bases used in port number calclution for the service "nodes"
-# The specified port number will be used, the additinal ports calculated by
+# Port bases used in port number calculation for the service "nodes"
+# The specified port number will be used, the additional ports calculated by
 # base_port + node_num * 10
-OBJECT_PORT_BASE=${OBJECT_PORT_BASE:-6013}
-CONTAINER_PORT_BASE=${CONTAINER_PORT_BASE:-6011}
-ACCOUNT_PORT_BASE=${ACCOUNT_PORT_BASE:-6012}
+OBJECT_PORT_BASE=${OBJECT_PORT_BASE:-6613}
+CONTAINER_PORT_BASE=${CONTAINER_PORT_BASE:-6611}
+ACCOUNT_PORT_BASE=${ACCOUNT_PORT_BASE:-6612}
 
 # Enable tempurl feature
 SWIFT_ENABLE_TEMPURLS=${SWIFT_ENABLE_TEMPURLS:-False}
@@ -183,7 +184,7 @@
 # _config_swift_apache_wsgi() - Set WSGI config files of Swift
 function _config_swift_apache_wsgi {
     sudo mkdir -p ${SWIFT_APACHE_WSGI_DIR}
-    local proxy_port=${SWIFT_DEFAULT_BIND_PORT:-8080}
+    local proxy_port=${SWIFT_DEFAULT_BIND_PORT}
 
     # copy proxy vhost and wsgi file
     sudo cp ${SWIFT_DIR}/examples/apache2/proxy-server.template $(apache_site_config_for proxy-server)
@@ -205,9 +206,12 @@
     # copy apache vhost file and set name and port
     local node_number
     for node_number in ${SWIFT_REPLICAS_SEQ}; do
-        local object_port=$(( OBJECT_PORT_BASE + 10 * (node_number - 1) ))
-        local container_port=$(( CONTAINER_PORT_BASE + 10 * (node_number - 1) ))
-        local account_port=$(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) ))
+        local object_port
+        object_port=$(( OBJECT_PORT_BASE + 10 * (node_number - 1) ))
+        local container_port
+        container_port=$(( CONTAINER_PORT_BASE + 10 * (node_number - 1) ))
+        local account_port
+        account_port=$(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) ))
 
         sudo cp ${SWIFT_DIR}/examples/apache2/object-server.template $(apache_site_config_for object-server-${node_number})
         sudo sed -e "
@@ -344,7 +348,7 @@
     local csyncfile=${SWIFT_CONF_DIR}/container-sync-realms.conf
     cp ${SWIFT_DIR}/etc/container-sync-realms.conf-sample ${csyncfile}
     iniset ${csyncfile} realm1 key realm1key
-    iniset ${csyncfile} realm1 cluster_name1 "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:8080/v1/"
+    iniset ${csyncfile} realm1 cluster_name1 "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/"
 
     iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user
     iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user ${STACK_USER}
@@ -365,7 +369,7 @@
     if is_service_enabled tls-proxy; then
         iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT_INT}
     else
-        iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080}
+        iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT}
     fi
 
     if is_ssl_enabled_service s-proxy; then
@@ -504,7 +508,8 @@
 
     if is_service_enabled keystone; then
         iniuncomment ${testfile} func_test auth_version
-        local auth_vers=$(iniget ${testfile} func_test auth_version)
+        local auth_vers
+        auth_vers=$(iniget ${testfile} func_test auth_version)
         iniset ${testfile} func_test auth_host ${KEYSTONE_SERVICE_HOST}
         iniset ${testfile} func_test auth_port ${KEYSTONE_AUTH_PORT}
         if [[ $auth_vers == "3" ]]; then
@@ -514,7 +519,8 @@
         fi
     fi
 
-    local user_group=$(id -g ${STACK_USER})
+    local user_group
+    user_group=$(id -g ${STACK_USER})
     sudo install -d -o ${STACK_USER} -g ${user_group} ${SWIFT_DATA_DIR}
 
     local swift_log_dir=${SWIFT_DATA_DIR}/logs
@@ -540,7 +546,8 @@
     # First do a bit of setup by creating the directories and
     # changing the permissions so we can run it as our user.
 
-    local user_group=$(id -g ${STACK_USER})
+    local user_group
+    user_group=$(id -g ${STACK_USER})
     sudo install -d -o ${STACK_USER} -g ${user_group} ${SWIFT_DATA_DIR}/{drives,cache,run,logs}
 
     # Create a loopback disk and format it to XFS.
@@ -607,7 +614,8 @@
 
     KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-sql}
 
-    local another_role=$(get_or_create_role "anotherrole")
+    local another_role
+    another_role=$(get_or_create_role "anotherrole")
 
     # NOTE(jroll): Swift doesn't need the admin role here, however Ironic uses
     # temp urls, which break when uploaded by a non-admin role
@@ -618,38 +626,45 @@
         get_or_create_service "swift" "object-store" "Swift Service"
         get_or_create_endpoint "object-store" \
             "$REGION_NAME" \
-            "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" \
-            "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:8080" \
-            "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s"
+            "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_\$(tenant_id)s" \
+            "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT" \
+            "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_\$(tenant_id)s"
     fi
 
-    local swift_tenant_test1=$(get_or_create_project swifttenanttest1 default)
+    local swift_tenant_test1
+    swift_tenant_test1=$(get_or_create_project swifttenanttest1 default)
     die_if_not_set $LINENO swift_tenant_test1 "Failure creating swift_tenant_test1"
     SWIFT_USER_TEST1=$(get_or_create_user swiftusertest1 $swiftusertest1_password \
                         "default" "test@example.com")
     die_if_not_set $LINENO SWIFT_USER_TEST1 "Failure creating SWIFT_USER_TEST1"
     get_or_add_user_project_role admin $SWIFT_USER_TEST1 $swift_tenant_test1
 
-    local swift_user_test3=$(get_or_create_user swiftusertest3 $swiftusertest3_password \
+    local swift_user_test3
+    swift_user_test3=$(get_or_create_user swiftusertest3 $swiftusertest3_password \
                                 "default" "test3@example.com")
     die_if_not_set $LINENO swift_user_test3 "Failure creating swift_user_test3"
     get_or_add_user_project_role $another_role $swift_user_test3 $swift_tenant_test1
 
-    local swift_tenant_test2=$(get_or_create_project swifttenanttest2 default)
+    local swift_tenant_test2
+    swift_tenant_test2=$(get_or_create_project swifttenanttest2 default)
     die_if_not_set $LINENO swift_tenant_test2 "Failure creating swift_tenant_test2"
 
-    local swift_user_test2=$(get_or_create_user swiftusertest2 $swiftusertest2_password \
+    local swift_user_test2
+    swift_user_test2=$(get_or_create_user swiftusertest2 $swiftusertest2_password \
                                 "default" "test2@example.com")
     die_if_not_set $LINENO swift_user_test2 "Failure creating swift_user_test2"
     get_or_add_user_project_role admin $swift_user_test2 $swift_tenant_test2
 
-    local swift_domain=$(get_or_create_domain swift_test 'Used for swift functional testing')
+    local swift_domain
+    swift_domain=$(get_or_create_domain swift_test 'Used for swift functional testing')
     die_if_not_set $LINENO swift_domain "Failure creating swift_test domain"
 
-    local swift_tenant_test4=$(get_or_create_project swifttenanttest4 $swift_domain)
+    local swift_tenant_test4
+    swift_tenant_test4=$(get_or_create_project swifttenanttest4 $swift_domain)
     die_if_not_set $LINENO swift_tenant_test4 "Failure creating swift_tenant_test4"
 
-    local swift_user_test4=$(get_or_create_user swiftusertest4 $swiftusertest4_password \
+    local swift_user_test4
+    swift_user_test4=$(get_or_create_user swiftusertest4 $swiftusertest4_password \
                                 $swift_domain "test4@example.com")
     die_if_not_set $LINENO swift_user_test4 "Failure creating swift_user_test4"
     get_or_add_user_project_role admin $swift_user_test4 $swift_tenant_test4
@@ -761,7 +776,7 @@
         swift-init --run-dir=${SWIFT_DATA_DIR}/run ${type} stop || true
     done
     if is_service_enabled tls-proxy; then
-        local proxy_port=${SWIFT_DEFAULT_BIND_PORT:-8080}
+        local proxy_port=${SWIFT_DEFAULT_BIND_PORT}
         start_tls_proxy '*' $proxy_port $SERVICE_HOST $SWIFT_DEFAULT_BIND_PORT_INT &
     fi
     run_process s-proxy "$SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v"
@@ -798,15 +813,17 @@
 }
 
 function swift_configure_tempurls {
+    # note we are using swift credentials!
     OS_USERNAME=swift \
-        OS_TENANT_NAME=$SERVICE_TENANT_NAME \
-        OS_PASSWORD=$SERVICE_PASSWORD \
-        OS_AUTH_URL=$KEYSTONE_AUTH_URI/v$IDENTITY_API_VERSION \
-        swift post -m "Temp-URL-Key: $SWIFT_TEMPURL_KEY"
+    OS_PASSWORD=$SERVICE_PASSWORD \
+    OS_PROJECT_NAME=$SERVICE_TENANT_NAME \
+    OS_AUTH_URL=$SERVICE_ENDPOINT \
+    openstack object store account \
+        set --property "Temp-URL-Key=$SWIFT_TEMPURL_KEY"
 }
 
 # Restore xtrace
-$XTRACE
+$_XTRACE_LIB_SWIFT
 
 # Tell emacs to use shell-script-mode
 ## Local variables:
diff --git a/lib/tempest b/lib/tempest
index f4d0a6d..8431229 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -15,7 +15,6 @@
 #   - ``SERVICE_HOST``
 #   - ``BASE_SQL_CONN`` ``lib/database`` declares
 #   - ``PUBLIC_NETWORK_NAME``
-#   - ``Q_USE_NAMESPACE``
 #   - ``Q_ROUTER_NAME``
 #   - ``Q_L3_ENABLED``
 #   - ``VIRT_DRIVER``
@@ -24,7 +23,7 @@
 #
 # Optional Dependencies:
 #
-# - ``ALT_*`` (similar vars exists in keystone_data.sh)
+# - ``ALT_*``
 # - ``LIVE_MIGRATION_AVAILABLE``
 # - ``USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION``
 # - ``DEFAULT_INSTANCE_TYPE``
@@ -36,10 +35,9 @@
 #
 # - install_tempest
 # - configure_tempest
-# - init_tempest
 
 # Save trace setting
-XTRACE=$(set +o | grep xtrace)
+_XTRACE_TEMPEST=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -68,9 +66,6 @@
 # have tempest installed in DevStack by default.
 INSTALL_TEMPEST=${INSTALL_TEMPEST:-"True"}
 
-BOTO_MATERIALS_PATH="$FILES/images/s3-materials/cirros-${CIRROS_VERSION}"
-BOTO_CONF=/etc/boto.cfg
-
 # Cinder/Volume variables
 TEMPEST_VOLUME_DRIVER=${TEMPEST_VOLUME_DRIVER:-default}
 TEMPEST_DEFAULT_VOLUME_VENDOR="Open Source"
@@ -119,10 +114,6 @@
         pip_install_gr testrepository
     fi
 
-    # Used during configuration so make sure we have the correct
-    # version installed
-    pip_install_gr python-openstackclient
-
     local image_lines
     local images
     local num_images
@@ -136,8 +127,6 @@
     local flavor_lines
     local public_network_id
     local public_router_id
-    local tenant_networks_reachable
-    local boto_instance_type="m1.tiny"
     local ssh_connect_method="fixed"
 
     # Save IFS
@@ -202,38 +191,26 @@
     if is_service_enabled nova; then
         # If ``DEFAULT_INSTANCE_TYPE`` is not declared, use the new behavior
         # Tempest creates its own instance types
+        available_flavors=$(nova flavor-list)
         if  [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then
-            available_flavors=$(nova flavor-list)
             if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then
-                if is_arch "ppc64"; then
-                    # Qemu needs at least 128MB of memory to boot on ppc64
-                    nova flavor-create m1.nano 42 128 0 1
-                else
-                    nova flavor-create m1.nano 42 64 0 1
-                fi
+                nova flavor-create m1.nano 42 64 0 1
             fi
             flavor_ref=42
-            boto_instance_type=m1.nano
             if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then
-                if is_arch "ppc64"; then
-                    nova flavor-create m1.micro 84 256 0 1
-                else
-                    nova flavor-create m1.micro 84 128 0 1
-                fi
+                nova flavor-create m1.micro 84 128 0 1
             fi
             flavor_ref_alt=84
         else
             # Check Nova for existing flavors, if ``DEFAULT_INSTANCE_TYPE`` is set use it.
-            boto_instance_type=$DEFAULT_INSTANCE_TYPE
-            flavor_lines=`nova flavor-list`
             IFS=$'\r\n'
             flavors=""
-            for line in $flavor_lines; do
+            for line in $available_flavors; do
                 f=$(echo $line | awk "/ $DEFAULT_INSTANCE_TYPE / { print \$2 }")
                 flavors="$flavors $f"
             done
 
-            for line in $flavor_lines; do
+            for line in $available_flavors; do
                 flavors="$flavors `echo $line | grep -v "^\(|\s*ID\|+--\)" | cut -d' ' -f2`"
             done
 
@@ -259,13 +236,8 @@
         fi
     fi
 
-    if [ "$Q_USE_NAMESPACE" != "False" ]; then
-        tenant_networks_reachable=false
-        if ! is_service_enabled n-net; then
-            ssh_connect_method="floating"
-        fi
-    else
-        tenant_networks_reachable=true
+    if ! is_service_enabled n-net; then
+        ssh_connect_method="floating"
     fi
 
     ssh_connect_method=${TEMPEST_SSH_CONNECT_METHOD:-$ssh_connect_method}
@@ -273,21 +245,6 @@
     if [ "$Q_L3_ENABLED" = "True" ]; then
         public_network_id=$(neutron net-list | grep $PUBLIC_NETWORK_NAME | \
             awk '{print $2}')
-        if [ "$Q_USE_NAMESPACE" == "False" ]; then
-            # If namespaces are disabled, DevStack will create a single
-            # public router that tempest should be configured to use.
-            public_router_id=$(neutron router-list | awk "/ $Q_ROUTER_NAME / \
-                { print \$2 }")
-        fi
-    fi
-
-    EC2_URL=$(get_endpoint_url ec2 public || true)
-    if [[ -z $EC2_URL ]]; then
-        EC2_URL="$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/"
-    fi
-    S3_URL=$(get_endpoint_url s3 public || true)
-    if [[ -z $S3_URL ]]; then
-        S3_URL="http://$SERVICE_HOST:${S3_SERVICE_PORT:-3333}"
     fi
 
     iniset $TEMPEST_CONFIG DEFAULT use_syslog $SYSLOG
@@ -302,24 +259,16 @@
     # Timeouts
     iniset $TEMPEST_CONFIG compute build_timeout $BUILD_TIMEOUT
     iniset $TEMPEST_CONFIG volume build_timeout $BUILD_TIMEOUT
-    iniset $TEMPEST_CONFIG boto build_timeout $BUILD_TIMEOUT
-    iniset $TEMPEST_CONFIG boto http_socket_timeout 5
 
     # Identity
     iniset $TEMPEST_CONFIG identity uri "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:5000/v2.0/"
     iniset $TEMPEST_CONFIG identity uri_v3 "$KEYSTONE_SERVICE_URI_V3"
-    iniset $TEMPEST_CONFIG identity username $TEMPEST_USERNAME
-    iniset $TEMPEST_CONFIG identity password "$password"
-    iniset $TEMPEST_CONFIG identity tenant_name $TEMPEST_TENANT_NAME
-    iniset $TEMPEST_CONFIG identity alt_username $ALT_USERNAME
-    iniset $TEMPEST_CONFIG identity alt_password "$password"
-    iniset $TEMPEST_CONFIG identity alt_tenant_name $ALT_TENANT_NAME
     if [[ "$TEMPEST_HAS_ADMIN" == "True" ]]; then
-        iniset $TEMPEST_CONFIG identity admin_username $ADMIN_USERNAME
-        iniset $TEMPEST_CONFIG identity admin_password "$password"
-        iniset $TEMPEST_CONFIG identity admin_tenant_name $ADMIN_TENANT_NAME
-        iniset $TEMPEST_CONFIG identity admin_tenant_id $ADMIN_TENANT_ID
-        iniset $TEMPEST_CONFIG identity admin_domain_name $ADMIN_DOMAIN_NAME
+        iniset $TEMPEST_CONFIG auth admin_username $ADMIN_USERNAME
+        iniset $TEMPEST_CONFIG auth admin_password "$password"
+        iniset $TEMPEST_CONFIG auth admin_tenant_name $ADMIN_TENANT_NAME
+        iniset $TEMPEST_CONFIG auth admin_tenant_id $ADMIN_TENANT_ID
+        iniset $TEMPEST_CONFIG auth admin_domain_name $ADMIN_DOMAIN_NAME
     fi
     if [ "$ENABLE_IDENTITY_V2" == "False" ]; then
         # Only Identity v3 is available; then skip Identity API v2 tests
@@ -340,23 +289,21 @@
     if [[ ! -z "$TEMPEST_HTTP_IMAGE" ]]; then
         iniset $TEMPEST_CONFIG image http_image $TEMPEST_HTTP_IMAGE
     fi
-    iniset $TEMPEST_CONFIG image-feature-enabled deactivate_image true
 
     # Image Features
     iniset $TEMPEST_CONFIG image-feature-enabled deactivate_image True
 
     # Compute
     iniset $TEMPEST_CONFIG compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED
-    iniset $TEMPEST_CONFIG compute network_for_ssh $PRIVATE_NETWORK_NAME
-    iniset $TEMPEST_CONFIG compute ip_version_for_ssh 4
-    iniset $TEMPEST_CONFIG compute ssh_timeout $BUILD_TIMEOUT
     iniset $TEMPEST_CONFIG compute image_ref $image_uuid
-    iniset $TEMPEST_CONFIG compute image_ssh_user ${DEFAULT_INSTANCE_USER:-cirros}
     iniset $TEMPEST_CONFIG compute image_ref_alt $image_uuid_alt
     iniset $TEMPEST_CONFIG compute image_alt_ssh_user ${ALT_INSTANCE_USER:-cirros}
     iniset $TEMPEST_CONFIG compute flavor_ref $flavor_ref
     iniset $TEMPEST_CONFIG compute flavor_ref_alt $flavor_ref_alt
     iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method
+    # set the equiv validation option here as well to ensure they are
+    # in sync. They shouldn't be separate options.
+    iniset $TEMPEST_CONFIG validation connect_method $ssh_connect_method
     if [[ ! $(is_service_enabled n-cell) && ! $(is_service_enabled neutron) ]]; then
         iniset $TEMPEST_CONFIG compute fixed_network_name $PRIVATE_NETWORK_NAME
     fi
@@ -372,7 +319,8 @@
     # Compute Features
     # Run ``verify_tempest_config -ur`` to retrieve enabled extensions on API endpoints
     # NOTE(mtreinish): This must be done after auth settings are added to the tempest config
-    local tmp_cfg_file=$(mktemp)
+    local tmp_cfg_file
+    tmp_cfg_file=$(mktemp)
     cd $TEMPEST_DIR
     tox -revenv -- verify-tempest-config -uro $tmp_cfg_file
 
@@ -384,6 +332,30 @@
         compute_api_extensions=$(remove_disabled_extensions $compute_api_extensions $DISABLE_COMPUTE_API_EXTENSIONS)
     fi
 
+    # Set the microversion range for compute tests.
+    # This is used to run the Nova microversions tests.
+    # Setting [None, latest] range of microversion which allow Tempest to run all microversions tests.
+    # NOTE- To avoid microversion tests failure on stable branch, we need to change "tempest_compute_max_microversion"
+    #       for stable branch on each release which should be changed from "latest" to max supported version of that release.
+    local tempest_compute_min_microversion=${TEMPEST_COMPUTE_MIN_MICROVERSION:-None}
+    local tempest_compute_max_microversion=${TEMPEST_COMPUTE_MAX_MICROVERSION:-"latest"}
+    # Reset microversions to None where v2.0 is running which does not support microversion.
+    # Both "None" means no microversion testing.
+    if [[ "$TEMPEST_COMPUTE_TYPE" == "compute_legacy" ]]; then
+        tempest_compute_min_microversion=None
+        tempest_compute_max_microversion=None
+    fi
+    if [ "$tempest_compute_min_microversion" == "None" ]; then
+        inicomment $TEMPEST_CONFIG compute-feature-enabled min_microversion
+    else
+        iniset $TEMPEST_CONFIG compute-feature-enabled min_microversion $tempest_compute_min_microversion
+    fi
+    if [ "$tempest_compute_max_microversion" == "None" ]; then
+        inicomment $TEMPEST_CONFIG compute-feature-enabled max_microversion
+    else
+        iniset $TEMPEST_CONFIG compute-feature-enabled max_microversion $tempest_compute_max_microversion
+    fi
+
     iniset $TEMPEST_CONFIG compute-feature-enabled resize True
     iniset $TEMPEST_CONFIG compute-feature-enabled live_migration ${LIVE_MIGRATION_AVAILABLE:-False}
     iniset $TEMPEST_CONFIG compute-feature-enabled change_password False
@@ -401,11 +373,22 @@
     if is_service_enabled n-cell; then
         # Cells doesn't support shelving/unshelving
         iniset $TEMPEST_CONFIG compute-feature-enabled shelve False
+        # Cells doesn't support hot-plugging virtual interfaces.
+        iniset $TEMPEST_CONFIG compute-feature-enabled interface_attach False
+
+        if  [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then
+            # Cells supports resize but does not currently work with devstack
+            # because of the custom flavors created for Tempest runs which are
+            # not in the cells database.
+            # TODO(mriedem): work on adding a nova-manage command to sync
+            # flavors into the cells database.
+            iniset $TEMPEST_CONFIG compute-feature-enabled resize False
+        fi
     fi
 
     # Network
     iniset $TEMPEST_CONFIG network api_version 2.0
-    iniset $TEMPEST_CONFIG network tenant_networks_reachable "$tenant_networks_reachable"
+    iniset $TEMPEST_CONFIG network tenant_networks_reachable false
     iniset $TEMPEST_CONFIG network public_network_id "$public_network_id"
     iniset $TEMPEST_CONFIG network public_router_id "$public_router_id"
     iniset $TEMPEST_CONFIG network default_network "$FIXED_RANGE"
@@ -421,16 +404,6 @@
     fi
     iniset $TEMPEST_CONFIG network-feature-enabled api_extensions $network_api_extensions
 
-    # boto
-    iniset $TEMPEST_CONFIG boto ec2_url "$EC2_URL"
-    iniset $TEMPEST_CONFIG boto s3_url "$S3_URL"
-    iniset $TEMPEST_CONFIG boto s3_materials_path "$BOTO_MATERIALS_PATH"
-    iniset $TEMPEST_CONFIG boto ari_manifest cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-initrd.manifest.xml
-    iniset $TEMPEST_CONFIG boto ami_manifest cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-blank.img.manifest.xml
-    iniset $TEMPEST_CONFIG boto aki_manifest cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-vmlinuz.manifest.xml
-    iniset $TEMPEST_CONFIG boto instance_type "$boto_instance_type"
-    iniset $TEMPEST_CONFIG boto http_socket_timeout 30
-
     # Orchestration Tests
     if is_service_enabled heat; then
         if [[ ! -z "$HEAT_CFN_IMAGE_URL" ]]; then
@@ -472,12 +445,18 @@
 
     # Validation
     iniset $TEMPEST_CONFIG validation run_validation ${TEMPEST_RUN_VALIDATION:-False}
+    iniset $TEMPEST_CONFIG validation ip_version_for_ssh 4
+    iniset $TEMPEST_CONFIG validation ssh_timeout $BUILD_TIMEOUT
+    iniset $TEMPEST_CONFIG validation image_ssh_user ${DEFAULT_INSTANCE_USER:-cirros}
+    iniset $TEMPEST_CONFIG validation network_for_ssh $PRIVATE_NETWORK_NAME
 
     # Volume
     # TODO(dkranz): Remove the bootable flag when Juno is end of life.
     iniset $TEMPEST_CONFIG volume-feature-enabled bootable True
     # TODO(jordanP): Remove the extend_with_snapshot flag when Juno is end of life.
     iniset $TEMPEST_CONFIG volume-feature-enabled extend_with_snapshot True
+    # TODO(obutenko): Remove the incremental_backup_force flag when Kilo and Juno is end of life.
+    iniset $TEMPEST_CONFIG volume-feature-enabled incremental_backup_force True
 
     local volume_api_extensions=${VOLUME_API_EXTENSIONS:-"all"}
     if [[ ! -z "$DISABLE_VOLUME_API_EXTENSIONS" ]]; then
@@ -515,7 +494,6 @@
 
     # Dashboard
     iniset $TEMPEST_CONFIG dashboard dashboard_url "http://$SERVICE_HOST/"
-    iniset $TEMPEST_CONFIG dashboard login_url "http://$SERVICE_HOST/auth/login/"
 
     # CLI
     iniset $TEMPEST_CONFIG cli cli_dir $NOVA_BIN_DIR
@@ -523,7 +501,7 @@
     # Baremetal
     if [ "$VIRT_DRIVER" = "ironic" ] ; then
         iniset $TEMPEST_CONFIG baremetal driver_enabled True
-        iniset $TEMPEST_CONFIG baremetal unprovision_timeout 300
+        iniset $TEMPEST_CONFIG baremetal deploy_timeout $BUILD_TIMEOUT
         iniset $TEMPEST_CONFIG baremetal deploy_img_dir $FILES
         iniset $TEMPEST_CONFIG baremetal node_uuid $IRONIC_NODE_UUID
         iniset $TEMPEST_CONFIG compute-feature-enabled change_password False
@@ -542,6 +520,8 @@
     if [ "$VIRT_DRIVER" = "libvirt" ] && [ "$LIBVIRT_TYPE" = "lxc" ]; then
         iniset $TEMPEST_CONFIG compute-feature-enabled rescue False
         iniset $TEMPEST_CONFIG compute-feature-enabled resize False
+        iniset $TEMPEST_CONFIG compute-feature-enabled shelve False
+        iniset $TEMPEST_CONFIG compute-feature-enabled snapshot False
         iniset $TEMPEST_CONFIG compute-feature-enabled suspend False
     fi
 
@@ -559,10 +539,10 @@
         fi
     done
 
-    if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then
-        # Use the ``BOTO_CONFIG`` environment variable to point to this file
-        iniset -sudo $BOTO_CONF Boto ca_certificates_file $SSL_BUNDLE_FILE
-        sudo chown $STACK_USER $BOTO_CONF
+    if [ "$VIRT_DRIVER" = "libvirt" ] && [ "$LIBVIRT_TYPE" = "lxc" ]; then
+        # libvirt-lxc does not support boot from volume or attaching volumes
+        # so basically anything with cinder is out of the question.
+        iniset $TEMPEST_CONFIG service_available cinder "False"
     fi
 
     # Auth
@@ -573,11 +553,15 @@
         else
             tempest-account-generator -c $TEMPEST_CONFIG --os-username $ADMIN_USERNAME --os-password $ADMIN_PASSWORD --os-tenant-name $ADMIN_TENANT_NAME -r $TEMPEST_CONCURRENCY etc/accounts.yaml
         fi
-        iniset $TEMPEST_CONFIG auth allow_tenant_isolation False
+        iniset $TEMPEST_CONFIG auth use_dynamic_credentials False
         iniset $TEMPEST_CONFIG auth test_accounts_file "etc/accounts.yaml"
+    elif [[ $TEMPEST_HAS_ADMIN == "False" ]]; then
+        iniset $TEMPEST_CONFIG auth use_dynamic_credentials ${TEMPEST_ALLOW_TENANT_ISOLATION:-False}
+
     else
-        iniset $TEMPEST_CONFIG auth allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True}
+        iniset $TEMPEST_CONFIG auth use_dynamic_credentials ${TEMPEST_ALLOW_TENANT_ISOLATION:-True}
     fi
+
     # Restore IFS
     IFS=$ifs
 }
@@ -622,37 +606,8 @@
     popd
 }
 
-# init_tempest() - Initialize EC2 images
-function init_tempest {
-    local base_image_name=cirros-${CIRROS_VERSION}-${CIRROS_ARCH}
-    # /opt/stack/devstack/files/images/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec
-    local image_dir="$FILES/images/${base_image_name}-uec"
-    local kernel="$image_dir/${base_image_name}-vmlinuz"
-    local ramdisk="$image_dir/${base_image_name}-initrd"
-    local disk_image="$image_dir/${base_image_name}-blank.img"
-    if is_service_enabled nova; then
-        # If the CirrOS uec downloaded and the system is UEC capable
-        if [ -f "$kernel" -a -f "$ramdisk" -a -f "$disk_image" -a  "$VIRT_DRIVER" != "openvz" \
-            -a \( "$LIBVIRT_TYPE" != "lxc" -o "$VIRT_DRIVER" != "libvirt" \) ]; then
-            echo "Prepare aki/ari/ami Images"
-            mkdir -p $BOTO_MATERIALS_PATH
-            ( #new namespace
-                # euca2ools should be installed to call euca-* commands
-                is_package_installed euca2ools || install_package euca2ools
-                # tenant:demo ; user: demo
-                source $TOP_DIR/accrc/demo/demo
-                euca-bundle-image -r ${CIRROS_ARCH} -i "$kernel" --kernel true -d "$BOTO_MATERIALS_PATH"
-                euca-bundle-image -r ${CIRROS_ARCH} -i "$ramdisk" --ramdisk true -d "$BOTO_MATERIALS_PATH"
-                euca-bundle-image -r ${CIRROS_ARCH} -i "$disk_image" -d "$BOTO_MATERIALS_PATH"
-            ) 2>&1 </dev/null | cat
-        else
-            echo "Boto materials are not prepared"
-        fi
-    fi
-}
-
 # Restore xtrace
-$XTRACE
+$_XTRACE_TEMPEST
 
 # Tell emacs to use shell-script-mode
 ## Local variables:
diff --git a/lib/template b/lib/template
index 2703788..08d10bb 100644
--- a/lib/template
+++ b/lib/template
@@ -21,7 +21,7 @@
 # - cleanup_XXXX
 
 # Save trace setting
-XTRACE=$(set +o | grep xtrace)
+_XTRACE_TEMPLATE=$(set +o | grep xtrace)
 set +o xtrace
 
 
@@ -92,7 +92,7 @@
 }
 
 # Restore xtrace
-$XTRACE
+$_XTRACE_TEMPLATE
 
 # Tell emacs to use shell-script-mode
 ## Local variables:
diff --git a/lib/tls b/lib/tls
index 8ff2027..f4740b8 100644
--- a/lib/tls
+++ b/lib/tls
@@ -346,7 +346,8 @@
 # we need to change it.
 function fix_system_ca_bundle_path {
     if is_service_enabled tls-proxy || [ "$USE_SSL" == "True" ]; then
-        local capath=$(python -c $'try:\n from requests import certs\n print certs.where()\nexcept ImportError: pass')
+        local capath
+        capath=$(python -c $'try:\n from requests import certs\n print certs.where()\nexcept ImportError: pass')
 
         if [[ ! $capath == "" && ! $capath =~ ^/etc/.* && ! -L $capath ]]; then
             if is_fedora; then
diff --git a/openrc b/openrc
index 71ba5a6..9bc0fd7 100644
--- a/openrc
+++ b/openrc
@@ -95,12 +95,6 @@
     fi
 fi
 
-# Currently novaclient needs you to specify the *compute api* version.  This
-# needs to match the config of your catalog returned by Keystone.
-export NOVA_VERSION=${NOVA_VERSION:-1.1}
-# In the future this will change names:
-export COMPUTE_API_VERSION=${COMPUTE_API_VERSION:-$NOVA_VERSION}
-
 # Currently cinderclient needs you to specify the *volume api* version. This
 # needs to match the config of your catalog returned by Keystone.
 export CINDER_VERSION=${CINDER_VERSION:-2}
diff --git a/pkg/elasticsearch.sh b/pkg/elasticsearch.sh
index 14d13cf..9c4f6f7 100755
--- a/pkg/elasticsearch.sh
+++ b/pkg/elasticsearch.sh
@@ -88,11 +88,7 @@
         sudo dpkg -i ${FILES}/elasticsearch-${ELASTICSEARCH_VERSION}.deb
         sudo update-rc.d elasticsearch defaults 95 10
     elif is_fedora; then
-        if [[ "$os_RELEASE" -ge "21" ]]; then
-            is_package_installed java-1.8.0-openjdk-headless || install_package java-1.8.0-openjdk-headless
-        else
-            is_package_installed java-1.7.0-openjdk-headless || install_package java-1.7.0-openjdk-headless
-        fi
+        is_package_installed java-1.8.0-openjdk-headless || install_package java-1.8.0-openjdk-headless
         yum_install ${FILES}/elasticsearch-${ELASTICSEARCH_VERSION}.noarch.rpm
         sudo /bin/systemctl daemon-reload
         sudo /bin/systemctl enable elasticsearch.service
diff --git a/samples/local.conf b/samples/local.conf
index ce70073..34c9e8b 100644
--- a/samples/local.conf
+++ b/samples/local.conf
@@ -28,7 +28,7 @@
 # and they will be added to ``local.conf``.
 SERVICE_TOKEN=azertytoken
 ADMIN_PASSWORD=nomoresecrete
-MYSQL_PASSWORD=stackdb
+DATABASE_PASSWORD=stackdb
 RABBIT_PASSWORD=stackqueue
 SERVICE_PASSWORD=$ADMIN_PASSWORD
 
@@ -63,7 +63,8 @@
 # Using milestone-proposed branches
 # ---------------------------------
 
-# Uncomment these to grab the milestone-proposed branches from the repos:
+# Uncomment these to grab the milestone-proposed branches from the
+# repos:
 #CINDER_BRANCH=milestone-proposed
 #GLANCE_BRANCH=milestone-proposed
 #HORIZON_BRANCH=milestone-proposed
@@ -74,6 +75,13 @@
 #NEUTRON_BRANCH=milestone-proposed
 #SWIFT_BRANCH=milestone-proposed
 
+# Using git versions of clients
+# -----------------------------
+# By default clients are installed from pip.  See LIBS_FROM_GIT in
+# stackrc for details on getting clients from specific branches or
+# revisions.  e.g.
+# LIBS_FROM_GIT="python-ironicclient"
+# IRONICCLIENT_BRANCH=refs/changes/44/2.../1
 
 # Swift
 # -----
@@ -93,9 +101,3 @@
 # moved by setting ``SWIFT_DATA_DIR``. The directory will be created
 # if it does not exist.
 SWIFT_DATA_DIR=$DEST/data
-
-# Tempest
-# -------
-
-# Install the tempest test suite
-enable_service tempest
diff --git a/stack.sh b/stack.sh
index c2eeaee..b5dc51c 100755
--- a/stack.sh
+++ b/stack.sh
@@ -75,6 +75,7 @@
 
 # Check if run in POSIX shell
 if [[ "${POSIXLY_CORRECT}" == "y" ]]; then
+    set +o xtrace
     echo "You are running POSIX compatibility mode, DevStack requires bash 4.2 or newer."
     exit 1
 fi
@@ -85,14 +86,39 @@
 # action to create a suitable user account.
 
 if [[ $EUID -eq 0 ]]; then
-    echo "You are running this script as root."
-    echo "Cut it out."
-    echo "Really."
-    echo "If you need an account to run DevStack, do this (as root, heh) to create a non-root account:"
-    echo "$TOP_DIR/tools/create-stack-user.sh"
+    set +o xtrace
+    echo "DevStack should be run as a user with sudo permissions, "
+    echo "not root."
+    echo "A \"stack\" user configured correctly can be created with:"
+    echo " $TOP_DIR/tools/create-stack-user.sh"
     exit 1
 fi
 
+# OpenStack is designed to run at a system level, with system level
+# installation of python packages. It does not support running under a
+# virtual env, and will fail in really odd ways if you do this. Make
+# this explicit as it has come up on the mailing list.
+if [[ -n "$VIRTUAL_ENV" ]]; then
+    set +o xtrace
+    echo "You appear to be running under a python virtualenv."
+    echo "DevStack does not support this, as we may break the"
+    echo "virtualenv you are currently in by modifying "
+    echo "external system-level components the virtualenv relies on."
+    echo "We recommend you use a separate virtual-machine if "
+    echo "you are worried about DevStack taking over your system."
+    exit 1
+fi
+
+# Provide a safety switch for devstack. If you do a lot of devstack,
+# on a lot of different environments, you sometimes run it on the
+# wrong box. This makes there be a way to prevent that.
+if [[ -e $HOME/.no-devstack ]]; then
+    set +o xtrace
+    echo "You've marked this host as a no-devstack host, to save yourself from"
+    echo "running devstack accidentally. If this is in error, please remove the"
+    echo "~/.no-devstack file"
+    exit 1
+fi
 
 # Prepare the environment
 # -----------------------
@@ -169,7 +195,7 @@
 
 # Warn users who aren't on an explicitly supported distro, but allow them to
 # override check and attempt installation with ``FORCE=yes ./stack``
-if [[ ! ${DISTRO} =~ (precise|trusty|utopic|vivid|7.0|wheezy|sid|testing|jessie|f21|f22|rhel7) ]]; then
+if [[ ! ${DISTRO} =~ (trusty|vivid|wily|7.0|wheezy|sid|testing|jessie|f22|f23|rhel7) ]]; then
     echo "WARNING: this script has not been tested on $DISTRO"
     if [[ "$FORCE" != "yes" ]]; then
         die $LINENO "If you wish to run this script anyway run with FORCE=yes"
@@ -240,9 +266,7 @@
 # Some distros need to add repos beyond the defaults provided by the vendor
 # to pick up required packages.
 
-if is_fedora && [[ $DISTRO == "rhel7" ]]; then
-    # RHEL requires EPEL for many Open Stack dependencies
-
+function _install_epel_and_rdo {
     # NOTE: We always remove and install latest -- some environments
     # use snapshot images, and if EPEL version updates they break
     # unless we update them to latest version.
@@ -272,25 +296,27 @@
     sudo yum-config-manager --enable epel-bootstrap
     yum_install epel-release || \
         die $LINENO "Error installing EPEL repo, cannot continue"
-    # EPEL rpm has installed it's version
     sudo rm -f /etc/yum.repos.d/epel-bootstrap.repo
 
     # ... and also optional to be enabled
     sudo yum-config-manager --enable rhel-7-server-optional-rpms
 
-    RHEL_RDO_REPO_RPM=${RHEL7_RDO_REPO_RPM:-"https://repos.fedorapeople.org/repos/openstack/openstack-kilo/rdo-release-kilo-1.noarch.rpm"}
-    RHEL_RDO_REPO_ID=${RHEL7_RDO_REPO_ID:-"openstack-kilo"}
-
-    if ! sudo yum repolist enabled $RHEL_RDO_REPO_ID | grep -q $RHEL_RDO_REPO_ID; then
-        echo "RDO repo not detected; installing"
-        yum_install $RHEL_RDO_REPO_RPM || \
-            die $LINENO "Error installing RDO repo, cannot continue"
-    fi
+    # install the lastest RDO
+    sudo yum install -y https://rdoproject.org/repos/rdo-release.rpm
 
     if is_oraclelinux; then
         sudo yum-config-manager --enable ol7_optional_latest ol7_addons ol7_MySQL56
     fi
+}
 
+# If you have all the repos installed above already setup (e.g. a CI
+# situation where they are on your image) you may choose to skip this
+# to speed things up
+SKIP_EPEL_INSTALL=$(trueorfalse False SKIP_EPEL_INSTALL)
+
+if is_fedora && [[ $DISTRO == "rhel7" ]] && \
+        [[ ${SKIP_EPEL_INSTALL} != True ]]; then
+    _install_epel_and_rdo
 fi
 
 
@@ -306,9 +332,6 @@
 safe_chown -R $STACK_USER $DEST
 safe_chmod 0755 $DEST
 
-# Basic test for ``$DEST`` path permissions (fatal on error unless skipped)
-check_path_perm_sanity ${DEST}
-
 # Destination path for service data
 DATA_DIR=${DATA_DIR:-${DEST}/data}
 sudo mkdir -p $DATA_DIR
@@ -322,6 +345,10 @@
     sudo sed -i "s/\(^127.0.0.1.*\)/\1 $LOCAL_HOSTNAME/" /etc/hosts
 fi
 
+# Ensure python is installed
+# --------------------------
+is_package_installed python || install_package python
+
 
 # Configure Logging
 # -----------------
@@ -443,6 +470,8 @@
     fi
 fi
 
+# Basic test for ``$DEST`` path permissions (fatal on error unless skipped)
+check_path_perm_sanity ${DEST}
 
 # Configure Error Traps
 # ---------------------
@@ -538,6 +567,7 @@
 source $TOP_DIR/lib/neutron-legacy
 source $TOP_DIR/lib/ldap
 source $TOP_DIR/lib/dstat
+source $TOP_DIR/lib/dlm
 
 # Extras Source
 # --------------
@@ -545,6 +575,7 @@
 # Phase: source
 run_phase source
 
+
 # Interactive Configuration
 # -------------------------
 
@@ -552,7 +583,8 @@
 
 # Generic helper to configure passwords
 function read_password {
-    XTRACE=$(set +o | grep xtrace)
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
     set +o xtrace
     var=$1; msg=$2
     pw=${!var}
@@ -560,7 +592,7 @@
     if [[ -f $RC_DIR/localrc ]]; then
         localrc=$TOP_DIR/localrc
     else
-        localrc=$TOP_DIR/.localrc.auto
+        localrc=$TOP_DIR/.localrc.password
     fi
 
     # If the password is not defined yet, proceed to prompt user for a password.
@@ -570,13 +602,15 @@
             touch $localrc
         fi
 
-        # Presumably if we got this far it can only be that our localrc is missing
-        # the required password.  Prompt user for a password and write to localrc.
+        # Presumably if we got this far it can only be that our
+        # localrc is missing the required password.  Prompt user for a
+        # password and write to localrc.
+
         echo ''
         echo '################################################################################'
         echo $msg
         echo '################################################################################'
-        echo "This value will be written to your localrc file so you don't have to enter it "
+        echo "This value will be written to ${localrc} file so you don't have to enter it "
         echo "again.  Use only alphanumeric characters."
         echo "If you leave this blank, a random default value will be used."
         pw=" "
@@ -593,7 +627,9 @@
         eval "$var=$pw"
         echo "$var=$pw" >> $localrc
     fi
-    $XTRACE
+
+    # restore previous xtrace value
+    $xtrace
 }
 
 
@@ -712,12 +748,6 @@
 # Install required infra support libraries
 install_infra
 
-# Pre-build some problematic wheels
-if [[ -n ${WHEELHOUSE:-} && ! -d ${WHEELHOUSE:-} ]]; then
-    source $TOP_DIR/tools/build_wheels.sh
-fi
-
-
 # Extras Pre-install
 # ------------------
 # Phase: pre-install
@@ -725,6 +755,10 @@
 
 install_rpc_backend
 
+# NOTE(sdague): dlm install is conditional on one being enabled by configuration
+install_dlm
+configure_dlm
+
 if is_service_enabled $DATABASE_BACKENDS; then
     install_database
 fi
@@ -904,8 +938,8 @@
 restart_rpc_backend
 
 
-# Export Certicate Authority Bundle
-# ---------------------------------
+# Export Certificate Authority Bundle
+# -----------------------------------
 
 # If certificates were used and written to the SSL bundle file then these
 # should be exported so clients can validate their connections.
@@ -975,13 +1009,15 @@
         start_keystone
     fi
 
+    export OS_IDENTITY_API_VERSION=3
+
     # Set up a temporary admin URI for Keystone
-    SERVICE_ENDPOINT=$KEYSTONE_AUTH_URI/v2.0
+    SERVICE_ENDPOINT=$KEYSTONE_AUTH_URI/v3
 
     if is_service_enabled tls-proxy; then
         export OS_CACERT=$INT_CA_DIR/ca-chain.pem
         # Until the client support is fixed, just use the internal endpoint
-        SERVICE_ENDPOINT=http://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT_INT/v2.0
+        SERVICE_ENDPOINT=http://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT_INT/v3
     fi
 
     # Setup OpenStackClient token-endpoint auth
@@ -1005,15 +1041,27 @@
     # Begone token auth
     unset OS_TOKEN OS_URL
 
-    # force set to use v2 identity authentication even with v3 commands
-    export OS_AUTH_TYPE=v2password
+    # Rather than just export these, we write them out to a
+    # intermediate userrc file that can also be used to debug if
+    # something goes wrong between here and running
+    # tools/create_userrc.sh (this script relies on services other
+    # than keystone being available, so we can't call it right now)
+    cat > $TOP_DIR/userrc_early <<EOF
+# Use this for debugging issues before files in accrc are created
 
-    # Set up password auth credentials now that Keystone is bootstrapped
-    export OS_AUTH_URL=$SERVICE_ENDPOINT
-    export OS_TENANT_NAME=admin
-    export OS_USERNAME=admin
-    export OS_PASSWORD=$ADMIN_PASSWORD
-    export OS_REGION_NAME=$REGION_NAME
+# Set up password auth credentials now that Keystone is bootstrapped
+export OS_AUTH_URL=$KEYSTONE_AUTH_URI
+export OS_USERNAME=admin
+export OS_USER_DOMAIN_ID=default
+export OS_PASSWORD=$ADMIN_PASSWORD
+export OS_PROJECT_NAME=admin
+export OS_PROJECT_DOMAIN_ID=default
+export OS_REGION_NAME=$REGION_NAME
+
+EOF
+
+    source $TOP_DIR/userrc_early
+
 fi
 
 # Write a clouds.yaml file
@@ -1356,6 +1404,8 @@
     exec 1>&3
 fi
 
+# Dump out the time totals
+time_totals
 
 # Using the cloud
 # ===============
diff --git a/stackrc b/stackrc
index c7c6313..16621f1 100644
--- a/stackrc
+++ b/stackrc
@@ -72,18 +72,6 @@
     ENABLED_SERVICES+=,rabbit,tempest,mysql,dstat
 fi
 
-# SQLAlchemy supports multiple database drivers for each database server
-# type. For example, deployer may use MySQLdb, MySQLConnector, or oursql
-# to access MySQL database.
-#
-# When defined, the variable controls which database driver is used to
-# connect to database server. Otherwise using default driver defined for
-# each database type.
-#
-# You can find the list of currently supported drivers for each database
-# type at: http://docs.sqlalchemy.org/en/rel_0_9/core/engines.html
-# SQLALCHEMY_DATABASE_DRIVER="mysqldb"
-
 # Global toggle for enabling services under mod_wsgi. If this is set to
 # ``True`` all services that use HTTPD + mod_wsgi as the preferred method of
 # deployment, will be deployed under Apache. If this is set to ``False`` all
@@ -91,7 +79,7 @@
 ENABLE_HTTPD_MOD_WSGI_SERVICES=True
 
 # Set the default Nova APIs to enable
-NOVA_ENABLED_APIS=ec2,osapi_compute,metadata
+NOVA_ENABLED_APIS=osapi_compute,metadata
 
 # Set the root URL for Horizon
 HORIZON_APACHE_ROOT="/dashboard"
@@ -101,7 +89,33 @@
 # ctrl-c, up-arrow, enter to restart the service. Starting services
 # this way is slightly unreliable, and a bit slower, so this can
 # be disabled for automated testing by setting this value to False.
-USE_SCREEN=True
+USE_SCREEN=$(trueorfalse True USE_SCREEN)
+
+# When using screen, should we keep a log file on disk?  You might
+# want this False if you have a long-running setup where verbose logs
+# can fill-up the host.
+# XXX: Ideally screen itself would be configured to log but just not
+# activate.  This isn't possible with the screerc syntax.  Temporary
+# logging can still be used by a developer with:
+#    C-a : logfile foo
+#    C-a : log on
+SCREEN_IS_LOGGING=$(trueorfalse True SCREEN_IS_LOGGING)
+
+# Passwords generated by interactive devstack runs
+if [[ -r $RC_DIR/.localrc.password ]]; then
+    source $RC_DIR/.localrc.password
+fi
+
+# Control whether Python 3 should be used.
+export USE_PYTHON3=${USE_PYTHON3:-False}
+
+# When Python 3 is supported by an application, adding the specific
+# version of Python 3 to this variable will install the app using that
+# version of the interpreter instead of 2.7.
+export PYTHON3_VERSION=${PYTHON3_VERSION:-3.4}
+
+# Just to be more explicit on the Python 2 version to use.
+export PYTHON2_VERSION=${PYTHON2_VERSION:-2.7}
 
 # allow local overrides of env variables, including repo config
 if [[ -f $RC_DIR/localrc ]]; then
@@ -138,11 +152,6 @@
 # requirmenets files here, in a comma-separated list
 ADDITIONAL_VENV_PACKAGES=${ADITIONAL_VENV_PACKAGES:-""}
 
-# Configure wheel cache location
-export WHEELHOUSE=${WHEELHOUSE:-$DEST/.wheelhouse}
-export PIP_WHEEL_DIR=${PIP_WHEEL_DIR:-$WHEELHOUSE}
-export PIP_FIND_LINKS=${PIP_FIND_LINKS:-file://$WHEELHOUSE}
-
 # This can be used to turn database query logging on and off
 # (currently only implemented for MySQL backend)
 DATABASE_QUERY_LOGGING=$(trueorfalse False DATABASE_QUERY_LOGGING)
@@ -204,10 +213,6 @@
 HORIZON_REPO=${HORIZON_REPO:-${GIT_BASE}/openstack/horizon.git}
 HORIZON_BRANCH=${HORIZON_BRANCH:-master}
 
-# baremetal provisioning service
-IRONIC_REPO=${IRONIC_REPO:-${GIT_BASE}/openstack/ironic.git}
-IRONIC_BRANCH=${IRONIC_BRANCH:-master}
-
 # unified auth system (manages accounts/tokens)
 KEYSTONE_REPO=${KEYSTONE_REPO:-${GIT_BASE}/openstack/keystone.git}
 KEYSTONE_BRANCH=${KEYSTONE_BRANCH:-master}
@@ -258,6 +263,7 @@
 ##############
 #
 #  OpenStack Client Library Components
+#   Note default install is from pip, see LIBS_FROM_GIT
 #
 ##############
 
@@ -276,6 +282,8 @@
 # ironic client
 GITREPO["python-ironicclient"]=${IRONICCLIENT_REPO:-${GIT_BASE}/openstack/python-ironicclient.git}
 GITBRANCH["python-ironicclient"]=${IRONICCLIENT_BRANCH:-master}
+# ironic plugin is out of tree, but nova uses it. set GITDIR here.
+GITDIR["python-ironicclient"]=$DEST/python-ironicclient
 
 # the base authentication plugins that clients use to authenticate
 GITREPO["keystoneauth"]=${KEYSTONEAUTH_REPO:-${GIT_BASE}/openstack/keystoneauth.git}
@@ -307,6 +315,7 @@
 ###################
 #
 #  Oslo Libraries
+#   Note default install is from pip, see LIBS_FROM_GIT
 #
 ###################
 
@@ -366,6 +375,10 @@
 GITREPO["oslo.policy"]=${OSLOPOLICY_REPO:-${GIT_BASE}/openstack/oslo.policy.git}
 GITBRANCH["oslo.policy"]=${OSLOPOLICY_BRANCH:-master}
 
+# oslo.privsep
+GITREPO["oslo.privsep"]=${OSLOPRIVSEP_REPO:-${GIT_BASE}/openstack/oslo.privsep.git}
+GITBRANCH["oslo.privsep"]=${OSLOPRIVSEP_BRANCH:-master}
+
 # oslo.reports
 GITREPO["oslo.reports"]=${OSLOREPORTS_REPO:-${GIT_BASE}/openstack/oslo.reports.git}
 GITBRANCH["oslo.reports"]=${OSLOREPORTS_BRANCH:-master}
@@ -442,7 +455,7 @@
 GITBRANCH["keystonemiddleware"]=${KEYSTONEMIDDLEWARE_BRANCH:-master}
 
 # s3 support for swift
-SWIFT3_REPO=${SWIFT3_REPO:-${GIT_BASE}/stackforge/swift3.git}
+SWIFT3_REPO=${SWIFT3_REPO:-${GIT_BASE}/openstack/swift3.git}
 SWIFT3_BRANCH=${SWIFT3_BRANCH:-master}
 
 # ceilometer middleware
@@ -457,6 +470,8 @@
 # ironic common lib
 GITREPO["ironic-lib"]=${IRONIC_LIB_REPO:-${GIT_BASE}/openstack/ironic-lib.git}
 GITBRANCH["ironic-lib"]=${IRONIC_LIB_BRANCH:-master}
+# this doesn't exist in a lib file, so set it here
+GITDIR["ironic-lib"]=$DEST/ironic-lib
 
 
 ##################
@@ -637,9 +652,6 @@
 PRIVATE_NETWORK_NAME=${PRIVATE_NETWORK_NAME:-"private"}
 PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-"public"}
 
-# Compatibility until it's eradicated from CI
-USE_SCREEN=${SCREEN_DEV:-$USE_SCREEN}
-
 # Set default screen name
 SCREEN_NAME=${SCREEN_NAME:-stack}
 
@@ -660,6 +672,9 @@
 # Service startup timeout
 SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60}
 
+# Service graceful shutdown timeout
+SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT=${SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT:-5}
+
 # Support alternative yum -- in future Fedora 'dnf' will become the
 # only supported installer, but for now 'yum' and 'dnf' are both
 # available in parallel with compatible CLIs.  Allow manual switching
@@ -755,6 +770,16 @@
 # Use native SSL for servers in ``SSL_ENABLED_SERVICES``
 USE_SSL=$(trueorfalse False USE_SSL)
 
+# ebtables is inherently racey. If you run it by two or more processes
+# simultaneously it will collide, badly, in the kernel and produce
+# failures or corruption of ebtables. The only way around it is for
+# all tools running ebtables to only ever do so with the --concurrent
+# flag. This requires libvirt >= 1.2.11.
+#
+# If you don't have this then the following work around will replace
+# ebtables with a wrapper script so that it is safe to run without
+# that flag.
+EBTABLES_RACE_FIX=$(trueorfalse False EBTABLES_RACE_FIX)
 
 # Following entries need to be last items in file
 
diff --git a/tests/run-process.sh b/tests/run-process.sh
index bdf1395..301b9a0 100755
--- a/tests/run-process.sh
+++ b/tests/run-process.sh
@@ -5,7 +5,7 @@
 #
 # Set USE_SCREEN True|False to change use of screen.
 #
-# This script emulates the basic exec envirnment in ``stack.sh`` to test
+# This script emulates the basic exec environment in ``stack.sh`` to test
 # the process spawn and kill operations.
 
 if [[ -z $1 ]]; then
diff --git a/tests/test_functions.sh b/tests/test_functions.sh
index f555de8..be8dc5e 100755
--- a/tests/test_functions.sh
+++ b/tests/test_functions.sh
@@ -245,4 +245,33 @@
     passed "OK"
 fi
 
+function test_export_proxy_variables {
+    echo "Testing export_proxy_variables()"
+
+    local expected results
+
+    http_proxy=http_proxy_test
+    https_proxy=https_proxy_test
+    no_proxy=no_proxy_test
+
+    export_proxy_variables
+    expected=$(echo -e "http_proxy=$http_proxy\nhttps_proxy=$https_proxy\nno_proxy=$no_proxy")
+    results=$(env | egrep '(http(s)?|no)_proxy=')
+    if [[ $expected = $results ]]; then
+        passed "OK: Proxy variables are exported when proxy variables are set"
+    else
+        failed "Expected: $expected, Failed: $results"
+    fi
+
+    unset http_proxy https_proxy no_proxy
+    export_proxy_variables
+    results=$(env | egrep '(http(s)?|no)_proxy=')
+    if [[ "" = $results ]]; then
+        passed "OK: Proxy variables aren't exported when proxy variables aren't set"
+    else
+        failed "Expected: '', Failed: $results"
+    fi
+}
+test_export_proxy_variables
+
 report_results
diff --git a/tests/test_ini_config.sh b/tests/test_ini_config.sh
index d9cb8d8..a5e1107 100755
--- a/tests/test_ini_config.sh
+++ b/tests/test_ini_config.sh
@@ -80,6 +80,11 @@
     sudo chown -R root:root ${INI_TMP_ETC_DIR}
 fi
 
+# test iniget_sections
+VAL=$(iniget_sections "${TEST_INI}")
+assert_equal "$VAL" "default aaa bbb ccc ddd eee del_separate_options \
+del_same_option del_missing_option del_missing_option_multi del_no_options"
+
 # Test with missing arguments
 BEFORE=$(cat ${TEST_INI})
 
diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh
index 8e8c022..f31560a 100755
--- a/tests/test_libs_from_pypi.sh
+++ b/tests/test_libs_from_pypi.sh
@@ -41,7 +41,7 @@
 ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy"
 ALL_LIBS+=" debtcollector os-brick automaton futurist oslo.service"
 ALL_LIBS+=" oslo.cache oslo.reports"
-ALL_LIBS+=" keystoneauth ironic-lib"
+ALL_LIBS+=" keystoneauth ironic-lib oslo.privsep"
 
 # Generate the above list with
 # echo ${!GITREPO[@]}
diff --git a/tests/test_meta_config.sh b/tests/test_meta_config.sh
index a04c081..327fb56 100755
--- a/tests/test_meta_config.sh
+++ b/tests/test_meta_config.sh
@@ -23,6 +23,12 @@
     fi
 }
 
+# mock function-common:die so that it does not
+# interrupt our test script
+function die {
+    exit -1
+}
+
 TEST_1C_ADD="[eee]
 type=new
 multi = foo2"
@@ -110,6 +116,15 @@
 [DEFAULT]
 servers=10.11.12.13:80
 
+[[test8|/permission-denied.conf]]
+foo=bar
+
+[[test9|\$UNDEF]]
+foo=bar
+
+[[test10|does-not-exist-dir/test.conf]]
+foo=bar
+
 [[test-multi-sections|test-multi-sections.conf]]
 [sec-1]
 cfg_item1 = abcd
@@ -340,6 +355,36 @@
 servers = 10.11.12.13:80"
 check_result "$VAL" "$EXPECT_VAL"
 
+echo "merge_config_file test8 non-touchable conf file: "
+set +e
+# function is expected to fail and exit, running it
+# in a subprocess to let this script proceed
+(merge_config_file test.conf test8 /permission-denied.conf)
+VAL=$?
+EXPECT_VAL=255
+check_result "$VAL" "$EXPECT_VAL"
+set -e
+
+echo -n "merge_config_group test9 undefined conf file: "
+set +e
+# function is expected to fail and exit, running it
+# in a subprocess to let this script proceed
+(merge_config_group test.conf test9)
+VAL=$?
+EXPECT_VAL=255
+check_result "$VAL" "$EXPECT_VAL"
+set -e
+
+echo -n "merge_config_group test10 not directory: "
+set +e
+# function is expected to fail and exit, running it
+# in a subprocess to let this script proceed
+(merge_config_group test.conf test10)
+VAL=$?
+EXPECT_VAL=255
+check_result "$VAL" "$EXPECT_VAL"
+set -e
+
 rm -f test.conf test1c.conf test2a.conf \
     test-space.conf test-equals.conf test-strip.conf \
     test-colon.conf test-env.conf test-multiline.conf \
diff --git a/tests/test_package_ordering.sh b/tests/test_package_ordering.sh
new file mode 100755
index 0000000..a568abf
--- /dev/null
+++ b/tests/test_package_ordering.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+# basic test to ensure that package-install files remain sorted
+# alphabetically.
+
+TOP=$(cd $(dirname "$0")/.. && pwd)
+
+source $TOP/tests/unittest.sh
+
+PKG_FILES=$(find $TOP/files/debs $TOP/files/rpms $TOP/files/rpms-suse -type f)
+
+TMPDIR=$(mktemp -d)
+
+SORTED=${TMPDIR}/sorted
+UNSORTED=${TMPDIR}/unsorted
+
+for p in $PKG_FILES; do
+    grep -v '^#' $p > ${UNSORTED}
+    sort ${UNSORTED} > ${SORTED}
+
+    if [ -n "$(diff -c ${UNSORTED} ${SORTED})" ]; then
+        failed "$p is unsorted"
+        # output this, it's helpful to see what exactly is unsorted
+        diff -c ${UNSORTED} ${SORTED}
+    else
+        passed "$p is sorted"
+    fi
+done
+
+rm -rf ${TMPDIR}
+
+report_results
diff --git a/tests/test_truefalse.sh b/tests/test_truefalse.sh
index 2689589..03996ce 100755
--- a/tests/test_truefalse.sh
+++ b/tests/test_truefalse.sh
@@ -8,6 +8,14 @@
 source $TOP/functions
 source $TOP/tests/unittest.sh
 
+# common mistake is to use $FOO instead of "FOO"; in that case we
+# should die
+bash -c "source $TOP/functions-common; VAR=\$(trueorfalse False \$FOO)" &> /dev/null
+assert_equal 1 $? "missing test-value"
+
+VAL=$(trueorfalse False MISSING_VARIABLE)
+assert_equal "False" $VAL "blank test-value"
+
 function test_trueorfalse {
     local one=1
     local captrue=True
diff --git a/tests/unittest.sh b/tests/unittest.sh
index 603652a..26b5b8e 100644
--- a/tests/unittest.sh
+++ b/tests/unittest.sh
@@ -20,8 +20,10 @@
 # pass a test, printing out MSG
 #  usage: passed message
 function passed {
-    local lineno=$(caller 0 | awk '{print $1}')
-    local function=$(caller 0 | awk '{print $2}')
+    local lineno
+    lineno=$(caller 0 | awk '{print $1}')
+    local function
+    function=$(caller 0 | awk '{print $2}')
     local msg="$1"
     if [ -z "$msg" ]; then
         msg="OK"
@@ -33,8 +35,10 @@
 # fail a test, printing out MSG
 #  usage: failed message
 function failed {
-    local lineno=$(caller 0 | awk '{print $1}')
-    local function=$(caller 0 | awk '{print $2}')
+    local lineno
+    lineno=$(caller 0 | awk '{print $1}')
+    local function
+    function=$(caller 0 | awk '{print $2}')
     local msg="$1"
     FAILED_FUNCS+="$function:L$lineno\n"
     echo "ERROR: $function:L$lineno!"
@@ -42,11 +46,13 @@
     ERROR=$((ERROR+1))
 }
 
-# assert string comparision of val1 equal val2, printing out msg
+# assert string comparison of val1 equal val2, printing out msg
 #  usage: assert_equal val1 val2 msg
 function assert_equal {
-    local lineno=`caller 0 | awk '{print $1}'`
-    local function=`caller 0 | awk '{print $2}'`
+    local lineno
+    lineno=`caller 0 | awk '{print $1}'`
+    local function
+    function=`caller 0 | awk '{print $2}'`
     local msg=$3
 
     if [ -z "$msg" ]; then
@@ -66,8 +72,10 @@
 # assert variable is empty/blank, printing out msg
 #  usage: assert_empty VAR msg
 function assert_empty {
-    local lineno=`caller 0 | awk '{print $1}'`
-    local function=`caller 0 | awk '{print $2}'`
+    local lineno
+    lineno=`caller 0 | awk '{print $1}'`
+    local function
+    function=`caller 0 | awk '{print $2}'`
     local msg=$2
 
     if [ -z "$msg" ]; then
@@ -84,16 +92,17 @@
     fi
 }
 
-# print a summary of passing and failing tests, exiting
-# with an error if we have failed tests
+# Print a summary of passing and failing tests and exit
+# (with an error if we have failed tests)
 #  usage: report_results
 function report_results {
     echo "$PASS Tests PASSED"
-    if [[ $ERROR -gt 1 ]]; then
+    if [[ $ERROR -gt 0 ]]; then
         echo
         echo "The following $ERROR tests FAILED"
         echo -e "$FAILED_FUNCS"
         echo "---"
         exit 1
     fi
+    exit 0
 }
diff --git a/tools/build_docs.sh b/tools/build_docs.sh
index fa84343..7dc492e 100755
--- a/tools/build_docs.sh
+++ b/tools/build_docs.sh
@@ -81,7 +81,7 @@
     mkdir -p $FQ_HTML_BUILD/`dirname $f`;
     $SHOCCO $f > $FQ_HTML_BUILD/$f.html
 done
-for f in $(find functions functions-common inc lib pkg samples -type f -name \*); do
+for f in $(find functions functions-common inc lib pkg samples -type f -name \* ! -name *.md ! -name *.conf); do
     echo $f
     FILES+="$f "
     mkdir -p $FQ_HTML_BUILD/`dirname $f`;
diff --git a/tools/build_wheels.sh b/tools/build_wheels.sh
deleted file mode 100755
index 14c2999..0000000
--- a/tools/build_wheels.sh
+++ /dev/null
@@ -1,86 +0,0 @@
-#!/usr/bin/env bash
-#
-# **tools/build_wheels.sh** - Build a cache of Python wheels
-#
-# build_wheels.sh [package [...]]
-#
-# System package prerequisites listed in ``files/*/devlibs`` will be installed
-#
-# Builds wheels for all virtual env requirements listed in
-# ``venv-requirements.txt`` plus any supplied on the command line.
-#
-# Assumes:
-# - ``tools/install_pip.sh`` has been run and a suitable ``pip/setuptools`` is available.
-
-# If ``TOP_DIR`` is set we're being sourced rather than running stand-alone
-# or in a sub-shell
-if [[ -z "$TOP_DIR" ]]; then
-
-    set -o errexit
-    set -o nounset
-
-    # Keep track of the DevStack directory
-    TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
-    FILES=$TOP_DIR/files
-
-    # Import common functions
-    source $TOP_DIR/functions
-
-    GetDistro
-
-    source $TOP_DIR/stackrc
-
-    trap err_trap ERR
-
-fi
-
-# Get additional packages to build
-MORE_PACKAGES="$@"
-
-# Exit on any errors so that errors don't compound
-function err_trap {
-    local r=$?
-    set +o xtrace
-
-    rm -rf $TMP_VENV_PATH
-
-    exit $r
-}
-
-# Get system prereqs
-install_package $(get_packages devlibs)
-
-# Get a modern ``virtualenv``
-pip_install virtualenv
-
-# Prepare the workspace
-TMP_VENV_PATH=$(mktemp -d tmp-venv-XXXX)
-virtualenv $TMP_VENV_PATH
-
-# Install modern pip and wheel
-PIP_VIRTUAL_ENV=$TMP_VENV_PATH pip_install -U pip wheel
-
-# BUG: cffi has a lot of issues. It has no stable ABI, if installed
-# code is built with a different ABI than the one that's detected at
-# load time, it tries to compile on the fly for the new ABI in the
-# install location (which will probably be /usr and not
-# writable). Also cffi is often included via setup_requires by
-# packages, which have different install rules (allowing betas) than
-# pip has.
-#
-# Because of this we must pip install cffi into the venv to build
-# wheels.
-PIP_VIRTUAL_ENV=$TMP_VENV_PATH pip_install_gr cffi
-
-# ``VENV_PACKAGES`` is a list of packages we want to pre-install
-VENV_PACKAGE_FILE=$FILES/venv-requirements.txt
-if [[ -r $VENV_PACKAGE_FILE ]]; then
-    VENV_PACKAGES=$(grep -v '^#' $VENV_PACKAGE_FILE)
-fi
-
-for pkg in ${VENV_PACKAGES,/ } ${MORE_PACKAGES}; do
-    $TMP_VENV_PATH/bin/pip wheel $pkg
-done
-
-# Clean up wheel workspace
-rm -rf $TMP_VENV_PATH
diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh
index de44abb..25f713c 100755
--- a/tools/create_userrc.sh
+++ b/tools/create_userrc.sh
@@ -190,7 +190,8 @@
     local user_passwd=$5
 
     # The admin user can see all user's secret AWS keys, it does not looks good
-    local line=`openstack ec2 credentials list --user $user_id | grep " $project_id "`
+    local line
+    line=$(openstack ec2 credentials list --user $user_id | grep " $project_id " || true)
     if [ -z "$line" ]; then
         openstack ec2 credentials create --user $user_id --project $project_id 1>&2
         line=`openstack ec2 credentials list --user $user_id | grep " $project_id "`
diff --git a/tools/dstat.sh b/tools/dstat.sh
new file mode 100755
index 0000000..3c0b3be
--- /dev/null
+++ b/tools/dstat.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+# **tools/dstat.sh** - Execute instances of DStat to log system load info
+#
+# Multiple instances of DStat are executed in order to take advantage of
+# incompatible features, particularly CSV output and the "top-cpu-adv" and
+# "top-io-adv" flags.
+#
+# Assumes:
+#  - dstat command is installed
+
+# Retreive log directory as argument from calling script.
+LOGDIR=$1
+
+# Command line arguments for primary DStat process.
+DSTAT_OPTS="-tcmndrylpg --top-cpu-adv --top-io-adv --swap"
+
+# Command-line arguments for secondary background DStat process.
+DSTAT_CSV_OPTS="-tcmndrylpg --output $LOGDIR/dstat-csv.log"
+
+# Execute and background the secondary dstat process and discard its output.
+dstat $DSTAT_CSV_OPTS >& /dev/null &
+
+# Execute and background the primary dstat process, but keep its output in this
+# TTY.
+dstat $DSTAT_OPTS &
+
+# Catch any exit signals, making sure to also terminate any child processes.
+trap "kill -- -$$" EXIT
+
+# Keep this script running as long as child dstat processes are alive.
+wait
diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh
index a601cf2..193a1f7 100755
--- a/tools/fixup_stuff.sh
+++ b/tools/fixup_stuff.sh
@@ -108,7 +108,7 @@
         sudo setenforce 0
     fi
 
-    FORCE_FIREWALLD=$(trueorfalse False $FORCE_FIREWALLD)
+    FORCE_FIREWALLD=$(trueorfalse False FORCE_FIREWALLD)
     if [[ $FORCE_FIREWALLD == "False" ]]; then
         # On Fedora 20 firewalld interacts badly with libvirt and
         # slows things down significantly (this issue was fixed in
@@ -135,7 +135,7 @@
         fi
     fi
 
-    if  [[ "$os_RELEASE" -ge "21" ]]; then
+    if  [[ "$os_VENDOR" == "Fedora" ]] && [[ "$os_RELEASE" -ge "22" ]]; then
         # requests ships vendored version of chardet/urllib3, but on
         # fedora these are symlinked back to the primary versions to
         # avoid duplication of code on disk.  This is fine when
@@ -152,9 +152,9 @@
         # https://bugs.launchpad.net/glance/+bug/1476770
         # https://bugzilla.redhat.com/show_bug.cgi?id=1253823
 
-        base_path=/usr/lib/python2.7/site-packages/requests/packages
+        base_path=$(get_package_path requests)/packages
         if [ -L $base_path/chardet -o -L $base_path/urllib3 ]; then
-            sudo rm -f /usr/lib/python2.7/site-packages/requests/packages/{chardet,urllib3}
+            sudo rm -f $base_path/{chardet,urllib3}
             # install requests with the bundled urllib3 to avoid conflicts
             pip_install --upgrade --force-reinstall requests
         fi
diff --git a/tools/image_list.sh b/tools/image_list.sh
index a27635e..27b3d46 100755
--- a/tools/image_list.sh
+++ b/tools/image_list.sh
@@ -3,6 +3,12 @@
 # Keep track of the DevStack directory
 TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
 
+# The following "source" implicitly calls get_default_host_ip() in
+# stackrc and will die if the selected default IP happens to lie
+# in the default ranges for FIXED_RANGE or FLOATING_RANGE. Since we
+# do not really need HOST_IP to be properly set in the remainder of
+# this script, just set it to some dummy value and make stackrc happy.
+HOST_IP=SKIP
 source $TOP_DIR/functions
 
 # Possible virt drivers, if we have more, add them here. Always keep
diff --git a/tools/install_ebtables_workaround.sh b/tools/install_ebtables_workaround.sh
new file mode 100755
index 0000000..45ced87
--- /dev/null
+++ b/tools/install_ebtables_workaround.sh
@@ -0,0 +1,31 @@
+#!/bin/bash -eu
+#
+# Copyright 2015 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+#
+# This replaces the ebtables on your system with a wrapper script that
+# does implicit locking. This is needed if libvirt < 1.2.11 on your platform.
+
+EBTABLES=/sbin/ebtables
+EBTABLESREAL=/sbin/ebtables.real
+FILES=$TOP_DIR/files
+
+if [[ -f "$EBTABLES" ]]; then
+    if file $EBTABLES | grep ELF; then
+        sudo mv $EBTABLES $EBTABLESREAL
+        sudo install -m 0755 $FILES/ebtables.workaround $EBTABLES
+        echo "Replaced ebtables with locking workaround"
+    fi
+fi
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index 7b42c8c..f239c7b 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -8,6 +8,7 @@
 
 # Assumptions:
 # - update pip to $INSTALL_PIP_VERSION
+# - if USE_PYTHON3=True, PYTHON3_VERSION refers to a version already installed
 
 set -o errexit
 set -o xtrace
@@ -31,6 +32,8 @@
 echo "Distro: $DISTRO"
 
 function get_versions {
+    # FIXME(dhellmann): Deal with multiple python versions here? This
+    # is just used for reporting, so maybe not?
     PIP=$(which pip 2>/dev/null || which pip-python 2>/dev/null || true)
     if [[ -n $PIP ]]; then
         PIP_VERSION=$($PIP --version | awk '{ print $2}')
@@ -42,6 +45,15 @@
 
 
 function install_get_pip {
+    # If get-pip.py isn't python, delete it. This was probably an
+    # outage on the server.
+    if [[ -r $LOCAL_PIP ]]; then
+        if ! head -1 $LOCAL_PIP | grep -q '#!/usr/bin/env python'; then
+            echo "WARNING: Corrupt $LOCAL_PIP found removing"
+            rm $LOCAL_PIP
+        fi
+    fi
+
     # The OpenStack gate and others put a cached version of get-pip.py
     # for this to find, explicitly to avoid download issues.
     #
@@ -53,12 +65,22 @@
     # since and only download if a new version is out -- but only if
     # it seems we downloaded the file originally.
     if [[ ! -r $LOCAL_PIP || -r $LOCAL_PIP.downloaded ]]; then
-        curl --retry 6 --retry-delay 5 \
-            -z $LOCAL_PIP -o $LOCAL_PIP $PIP_GET_PIP_URL || \
+        # only test freshness if LOCAL_PIP is actually there,
+        # otherwise we generate a scary warning.
+        local timecond=""
+        if [[ -r $LOCAL_PIP ]]; then
+            timecond="-z $LOCAL_PIP"
+        fi
+
+        curl -f --retry 6 --retry-delay 5 \
+            $timecond -o $LOCAL_PIP $PIP_GET_PIP_URL || \
             die $LINENO "Download of get-pip.py failed"
         touch $LOCAL_PIP.downloaded
     fi
     sudo -H -E python $LOCAL_PIP
+    if python3_enabled; then
+        sudo -H -E python${PYTHON3_VERSION} $LOCAL_PIP
+    fi
 }
 
 
@@ -94,7 +116,12 @@
 # Do pip
 
 # Eradicate any and all system packages
-uninstall_package python-pip
+
+# python in f23 depends on the python-pip package
+if ! { is_fedora && [[ $DISTRO == "f23" ]]; }; then
+    uninstall_package python-pip
+    uninstall_package python3-pip
+fi
 
 install_get_pip
 
@@ -102,6 +129,7 @@
     configure_pypi_alternative_url
 fi
 
+set -x
 pip_install -U setuptools
 
 get_versions
diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh
index a07e58d..031f8a8 100755
--- a/tools/install_prereqs.sh
+++ b/tools/install_prereqs.sh
@@ -61,7 +61,7 @@
 # ================
 
 # Install package requirements
-PACKAGES=$(get_packages general $ENABLED_SERVICES)
+PACKAGES=$(get_packages general,$ENABLED_SERVICES)
 PACKAGES="$PACKAGES $(get_plugin_packages)"
 
 if is_ubuntu && echo $PACKAGES | grep -q dkms ; then
@@ -81,6 +81,9 @@
     fi
 fi
 
+if python3_enabled; then
+    install_python3
+fi
 
 # Mark end of run
 # ---------------
diff --git a/tools/ironic/scripts/cleanup-node b/tools/ironic/scripts/cleanup-node
deleted file mode 100755
index c4e4e70..0000000
--- a/tools/ironic/scripts/cleanup-node
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env bash
-
-# **cleanup-nodes**
-
-# Cleans up baremetal poseur nodes and volumes created during ironic setup
-# Assumes calling user has proper libvirt group membership and access.
-
-set -exu
-
-LIBVIRT_STORAGE_POOL=${LIBVIRT_STORAGE_POOL:-"default"}
-LIBVIRT_CONNECT_URI=${LIBVIRT_CONNECT_URI:-"qemu:///system"}
-
-NAME=$1
-NETWORK_BRIDGE=$2
-
-export VIRSH_DEFAULT_CONNECT_URI=$LIBVIRT_CONNECT_URI
-
-VOL_NAME="$NAME.qcow2"
-virsh list | grep -q $NAME && virsh destroy $NAME
-virsh list --inactive | grep -q $NAME && virsh undefine $NAME
-
-if virsh pool-list | grep -q $LIBVIRT_STORAGE_POOL ; then
-  virsh vol-list $LIBVIRT_STORAGE_POOL | grep -q $VOL_NAME &&
-      virsh vol-delete $VOL_NAME --pool $LIBVIRT_STORAGE_POOL
-fi
diff --git a/tools/ironic/scripts/configure-vm b/tools/ironic/scripts/configure-vm
deleted file mode 100755
index 378fcb8..0000000
--- a/tools/ironic/scripts/configure-vm
+++ /dev/null
@@ -1,93 +0,0 @@
-#!/usr/bin/env python
-
-import argparse
-import os.path
-
-import libvirt
-
-templatedir = os.path.join(os.path.dirname(os.path.dirname(__file__)),
-                           'templates')
-
-
-CONSOLE_LOG = """
-    <serial type='file'>
-      <source path='%(console_log)s'/>
-      <target port='0'/>
-      <alias name='serial0'/>
-    </serial>
-    <serial type='pty'>
-      <source path='/dev/pts/49'/>
-      <target port='1'/>
-      <alias name='serial1'/>
-    </serial>
-    <console type='file'>
-      <source path='%(console_log)s'/>
-      <target type='serial' port='0'/>
-      <alias name='serial0'/>
-    </console>
-"""
-
-
-def main():
-    parser = argparse.ArgumentParser(
-        description="Configure a kvm virtual machine for the seed image.")
-    parser.add_argument('--name', default='seed',
-                        help='the name to give the machine in libvirt.')
-    parser.add_argument('--image',
-                        help='Use a custom image file (must be qcow2).')
-    parser.add_argument('--engine', default='qemu',
-                        help='The virtualization engine to use')
-    parser.add_argument('--arch', default='i686',
-                        help='The architecture to use')
-    parser.add_argument('--memory', default='2097152',
-                        help="Maximum memory for the VM in KB.")
-    parser.add_argument('--cpus', default='1',
-                        help="CPU count for the VM.")
-    parser.add_argument('--bootdev', default='hd',
-                        help="What boot device to use (hd/network).")
-    parser.add_argument('--network', default="brbm",
-                        help='The libvirt network name to use')
-    parser.add_argument('--libvirt-nic-driver', default='e1000',
-                        help='The libvirt network driver to use')
-    parser.add_argument('--console-log',
-                        help='File to log console')
-    parser.add_argument('--emulator', default=None,
-                        help='Path to emulator bin for vm template')
-    args = parser.parse_args()
-    with file(templatedir + '/vm.xml', 'rb') as f:
-        source_template = f.read()
-    params = {
-        'name': args.name,
-        'imagefile': args.image,
-        'engine': args.engine,
-        'arch': args.arch,
-        'memory': args.memory,
-        'cpus': args.cpus,
-        'bootdev': args.bootdev,
-        'network': args.network,
-        'nicdriver': args.libvirt_nic_driver,
-        'emulator': args.emulator,
-    }
-
-    if args.emulator:
-        params['emulator'] = args.emulator
-    else:
-        if os.path.exists("/usr/bin/kvm"):  # Debian
-            params['emulator'] = "/usr/bin/kvm"
-        elif os.path.exists("/usr/bin/qemu-kvm"):  # Redhat
-            params['emulator'] = "/usr/bin/qemu-kvm"
-
-    if args.console_log:
-        params['bios_serial'] = "<bios useserial='yes'/>"
-        params['console_log'] = CONSOLE_LOG % {'console_log': args.console_log}
-    else:
-        params['bios_serial'] = ''
-        params['console_log'] = ''
-    libvirt_template = source_template % params
-    conn = libvirt.open("qemu:///system")
-
-    a = conn.defineXML(libvirt_template)
-    print ("Created machine %s with UUID %s" % (args.name, a.UUIDString()))
-
-if __name__ == '__main__':
-    main()
diff --git a/tools/ironic/scripts/create-node b/tools/ironic/scripts/create-node
deleted file mode 100755
index b018acd..0000000
--- a/tools/ironic/scripts/create-node
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/usr/bin/env bash
-
-# **create-nodes**
-
-# Creates baremetal poseur nodes for ironic testing purposes
-
-set -ex
-
-# Keep track of the DevStack directory
-TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
-
-NAME=$1
-CPU=$2
-MEM=$(( 1024 * $3 ))
-# Extra G to allow fuzz for partition table : flavor size and registered size
-# need to be different to actual size.
-DISK=$(( $4 + 1))
-
-case $5 in
-    i386) ARCH='i686' ;;
-    amd64) ARCH='x86_64' ;;
-    *) echo "Unsupported arch $4!" ; exit 1 ;;
-esac
-
-BRIDGE=$6
-EMULATOR=$7
-LOGDIR=$8
-
-LIBVIRT_NIC_DRIVER=${LIBVIRT_NIC_DRIVER:-"e1000"}
-LIBVIRT_STORAGE_POOL=${LIBVIRT_STORAGE_POOL:-"default"}
-LIBVIRT_CONNECT_URI=${LIBVIRT_CONNECT_URI:-"qemu:///system"}
-
-export VIRSH_DEFAULT_CONNECT_URI=$LIBVIRT_CONNECT_URI
-
-if ! virsh pool-list --all | grep -q $LIBVIRT_STORAGE_POOL; then
-    virsh pool-define-as --name $LIBVIRT_STORAGE_POOL dir --target /var/lib/libvirt/images >&2
-    virsh pool-autostart $LIBVIRT_STORAGE_POOL >&2
-    virsh pool-start $LIBVIRT_STORAGE_POOL >&2
-fi
-
-pool_state=$(virsh pool-info $LIBVIRT_STORAGE_POOL | grep State | awk '{ print $2 }')
-if [ "$pool_state" != "running" ] ; then
-  [ ! -d /var/lib/libvirt/images ] && sudo mkdir /var/lib/libvirt/images
-  virsh pool-start $LIBVIRT_STORAGE_POOL >&2
-fi
-
-if [ -n "$LOGDIR" ] ; then
-  mkdir -p "$LOGDIR"
-fi
-
-PREALLOC=
-if [ -f /etc/debian_version ]; then
-    PREALLOC="--prealloc-metadata"
-fi
-
-if [ -n "$LOGDIR" ] ; then
-  VM_LOGGING="--console-log $LOGDIR/${NAME}_console.log"
-else
-  VM_LOGGING=""
-fi
-VOL_NAME="${NAME}.qcow2"
-
-if ! virsh list --all | grep -q $NAME; then
-  virsh vol-list --pool $LIBVIRT_STORAGE_POOL | grep -q $VOL_NAME &&
-      virsh vol-delete $VOL_NAME --pool $LIBVIRT_STORAGE_POOL >&2
-  virsh vol-create-as $LIBVIRT_STORAGE_POOL ${VOL_NAME} ${DISK}G --format qcow2 $PREALLOC >&2
-  volume_path=$(virsh vol-path --pool $LIBVIRT_STORAGE_POOL $VOL_NAME)
-  # Pre-touch the VM to set +C, as it can only be set on empty files.
-  sudo touch "$volume_path"
-  sudo chattr +C "$volume_path" || true
-  $TOP_DIR/scripts/configure-vm \
-    --bootdev network --name $NAME --image "$volume_path" \
-    --arch $ARCH --cpus $CPU --memory $MEM --libvirt-nic-driver $LIBVIRT_NIC_DRIVER \
-    --emulator $EMULATOR --network $BRIDGE $VM_LOGGING >&2
-
-fi
-
-# echo mac
-virsh dumpxml $NAME | grep "mac address" | head -1 | cut -d\' -f2
diff --git a/tools/ironic/scripts/setup-network b/tools/ironic/scripts/setup-network
deleted file mode 100755
index 83308ed..0000000
--- a/tools/ironic/scripts/setup-network
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/usr/bin/env bash
-
-# **setup-network**
-
-# Setups openvswitch libvirt network suitable for
-# running baremetal poseur nodes for ironic testing purposes
-
-set -exu
-
-LIBVIRT_CONNECT_URI=${LIBVIRT_CONNECT_URI:-"qemu:///system"}
-
-# Keep track of the DevStack directory
-TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
-BRIDGE_SUFFIX=${1:-''}
-BRIDGE_NAME=brbm$BRIDGE_SUFFIX
-
-export VIRSH_DEFAULT_CONNECT_URI="$LIBVIRT_CONNECT_URI"
-
-# Only add bridge if missing
-(sudo ovs-vsctl list-br | grep ${BRIDGE_NAME}$) || sudo ovs-vsctl add-br ${BRIDGE_NAME}
-
-# Remove bridge before replacing it.
-(virsh net-list | grep "${BRIDGE_NAME} ") && virsh net-destroy ${BRIDGE_NAME}
-(virsh net-list --inactive  | grep "${BRIDGE_NAME} ") && virsh net-undefine ${BRIDGE_NAME}
-
-virsh net-define <(sed s/brbm/$BRIDGE_NAME/ $TOP_DIR/templates/brbm.xml)
-virsh net-autostart ${BRIDGE_NAME}
-virsh net-start ${BRIDGE_NAME}
diff --git a/tools/ironic/templates/brbm.xml b/tools/ironic/templates/brbm.xml
deleted file mode 100644
index 0769d3f..0000000
--- a/tools/ironic/templates/brbm.xml
+++ /dev/null
@@ -1,6 +0,0 @@
-<network>
-  <name>brbm</name>
-  <forward mode='bridge'/>
-  <bridge name='brbm'/>
-  <virtualport type='openvswitch'/>
-</network>
diff --git a/tools/ironic/templates/tftpd-xinetd.template b/tools/ironic/templates/tftpd-xinetd.template
deleted file mode 100644
index 5f3d03f..0000000
--- a/tools/ironic/templates/tftpd-xinetd.template
+++ /dev/null
@@ -1,14 +0,0 @@
-service tftp
-{
-  protocol        = udp
-  port            = 69
-  socket_type     = dgram
-  wait            = yes
-  user            = root
-  server          = /usr/sbin/in.tftpd
-  server_args     = -v -v -v -v -v --map-file %TFTPBOOT_DIR%/map-file %TFTPBOOT_DIR%
-  disable         = no
-  # This is a workaround for Fedora, where TFTP will listen only on
-  # IPv6 endpoint, if IPv4 flag is not used.
-  flags           = IPv4
-}
diff --git a/tools/ironic/templates/vm.xml b/tools/ironic/templates/vm.xml
deleted file mode 100644
index ae7d685..0000000
--- a/tools/ironic/templates/vm.xml
+++ /dev/null
@@ -1,49 +0,0 @@
-<domain type='%(engine)s'>
-  <name>%(name)s</name>
-  <memory unit='KiB'>%(memory)s</memory>
-  <vcpu>%(cpus)s</vcpu>
-  <os>
-    <type arch='%(arch)s' machine='pc-1.0'>hvm</type>
-    <boot dev='%(bootdev)s'/>
-    <bootmenu enable='no'/>
-    %(bios_serial)s
-  </os>
-  <features>
-    <acpi/>
-    <apic/>
-    <pae/>
-  </features>
-  <clock offset='utc'/>
-  <on_poweroff>destroy</on_poweroff>
-  <on_reboot>restart</on_reboot>
-  <on_crash>restart</on_crash>
-  <devices>
-    <emulator>%(emulator)s</emulator>
-    <disk type='file' device='disk'>
-      <driver name='qemu' type='qcow2' cache='writeback'/>
-      <source file='%(imagefile)s'/>
-      <target dev='vda' bus='virtio'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
-    </disk>
-    <controller type='ide' index='0'>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x1'/>
-    </controller>
-    <interface type='network'>
-      <source network='%(network)s'/>
-      <virtualport type='openvswitch'/>
-      <model type='%(nicdriver)s'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
-    </interface>
-    <input type='mouse' bus='ps2'/>
-    <graphics type='vnc' port='-1' autoport='yes'/>
-    <video>
-      <model type='cirrus' vram='9216' heads='1'/>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0'/>
-    </video>
-    %(console_log)s
-    <memballoon model='virtio'>
-      <address type='pci' domain='0x0000' bus='0x00' slot='0x07' function='0x0'/>
-    </memballoon>
-  </devices>
-</domain>
-
diff --git a/tools/peakmem_tracker.sh b/tools/peakmem_tracker.sh
index 0d5728a..ecbd79a 100755
--- a/tools/peakmem_tracker.sh
+++ b/tools/peakmem_tracker.sh
@@ -41,10 +41,12 @@
 # snapshot of current usage; i.e. checking the latest entry in the
 # file will give the peak-memory usage
 function tracker {
-    local low_point=$(get_mem_available)
+    local low_point
+    low_point=$(get_mem_available)
     while [ 1 ]; do
 
-        local mem_available=$(get_mem_available)
+        local mem_available
+        mem_available=$(get_mem_available)
 
         if [[ $mem_available -lt $low_point ]]; then
             low_point=$mem_available
diff --git a/tools/update_clouds_yaml.py b/tools/update_clouds_yaml.py
index 3a364fe..eb7265f 100755
--- a/tools/update_clouds_yaml.py
+++ b/tools/update_clouds_yaml.py
@@ -36,6 +36,7 @@
         self._cloud_data = {
             'region_name': args.os_region_name,
             'identity_api_version': args.os_identity_api_version,
+            'volume_api_version': args.os_volume_api_version,
             'auth': {
                 'auth_url': args.os_auth_url,
                 'username': args.os_username,
@@ -82,6 +83,7 @@
     parser.add_argument('--os-cloud', required=True)
     parser.add_argument('--os-region-name', default='RegionOne')
     parser.add_argument('--os-identity-api-version', default='3')
+    parser.add_argument('--os-volume-api-version', default='2')
     parser.add_argument('--os-cacert')
     parser.add_argument('--os-auth-url', required=True)
     parser.add_argument('--os-username', required=True)
diff --git a/tools/worlddump.py b/tools/worlddump.py
index 1b337a9..97e4d94 100755
--- a/tools/worlddump.py
+++ b/tools/worlddump.py
@@ -86,8 +86,10 @@
 
 
 def ebtables_dump():
+    tables = ['filter', 'nat', 'broute']
     _header("EB Tables Dump")
-    _dump_cmd("sudo ebtables -L")
+    for table in tables:
+        _dump_cmd("sudo ebtables -t %s -L" % table)
 
 
 def iptables_dump():
diff --git a/tools/xen/README.md b/tools/xen/README.md
index 6212cc5..a1adf59 100644
--- a/tools/xen/README.md
+++ b/tools/xen/README.md
@@ -77,7 +77,7 @@
     # NOTE: these need to be specified, otherwise devstack will try
     # to prompt for these passwords, blocking the install process.
 
-    MYSQL_PASSWORD=my_super_secret
+    DATABASE_PASSWORD=my_super_secret
     SERVICE_TOKEN=my_super_secret
     ADMIN_PASSWORD=my_super_secret
     SERVICE_PASSWORD=my_super_secret
diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh
index 1ebbeaf..66f7ef4 100755
--- a/tools/xen/scripts/install-os-vpx.sh
+++ b/tools/xen/scripts/install-os-vpx.sh
@@ -100,7 +100,8 @@
 {
     local v="$1"
     echo "Installing VM interface on [$BRIDGE]"
-    local out_network_uuid=$(find_network "$BRIDGE")
+    local out_network_uuid
+    out_network_uuid=$(find_network "$BRIDGE")
     xe vif-create vm-uuid="$v" network-uuid="$out_network_uuid" device="0"
 }
 
diff --git a/tools/xen/scripts/uninstall-os-vpx.sh b/tools/xen/scripts/uninstall-os-vpx.sh
index 1ed2494..96dad7e 100755
--- a/tools/xen/scripts/uninstall-os-vpx.sh
+++ b/tools/xen/scripts/uninstall-os-vpx.sh
@@ -35,9 +35,12 @@
 destroy_vdi()
 {
     local vbd_uuid="$1"
-    local type=$(xe_min vbd-list uuid=$vbd_uuid params=type)
-    local dev=$(xe_min vbd-list uuid=$vbd_uuid params=userdevice)
-    local vdi_uuid=$(xe_min vbd-list uuid=$vbd_uuid params=vdi-uuid)
+    local type
+    type=$(xe_min vbd-list uuid=$vbd_uuid params=type)
+    local dev
+    dev=$(xe_min vbd-list uuid=$vbd_uuid params=userdevice)
+    local vdi_uuid
+    vdi_uuid=$(xe_min vbd-list uuid=$vbd_uuid params=vdi-uuid)
 
     if [ "$type" == 'Disk' ] && [ "$dev" != 'xvda' ] && [ "$dev" != '0' ]; then
         xe vdi-destroy uuid=$vdi_uuid
@@ -47,7 +50,8 @@
 uninstall()
 {
     local vm_uuid="$1"
-    local power_state=$(xe_min vm-list uuid=$vm_uuid params=power-state)
+    local power_state
+    power_state=$(xe_min vm-list uuid=$vm_uuid params=power-state)
 
     if [ "$power_state" != "halted" ]; then
         xe vm-shutdown vm=$vm_uuid force=true
diff --git a/tools/xen/test_functions.sh b/tools/xen/test_functions.sh
index 924e773..324e6a1 100755
--- a/tools/xen/test_functions.sh
+++ b/tools/xen/test_functions.sh
@@ -165,7 +165,8 @@
 function test_get_local_sr {
     setup_xe_response "uuid123"
 
-    local RESULT=$(. mocks && get_local_sr)
+    local RESULT
+    RESULT=$(. mocks && get_local_sr)
 
     [ "$RESULT" == "uuid123" ]
 
@@ -173,7 +174,8 @@
 }
 
 function test_get_local_sr_path {
-    local RESULT=$(mock_out get_local_sr "uuid1" && get_local_sr_path)
+    local RESULT
+    RESULT=$(mock_out get_local_sr "uuid1" && get_local_sr_path)
 
     [ "/var/run/sr-mount/uuid1" == "$RESULT" ]
 }
diff --git a/tox.ini b/tox.ini
index 788fea9..f9d04f2 100644
--- a/tox.ini
+++ b/tox.ini
@@ -8,23 +8,25 @@
 install_command = pip install {opts} {packages}
 
 [testenv:bashate]
-deps = bashate
+deps =
+   {env:BASHATE_INSTALL_PATH:bashate==0.3.2}
 whitelist_externals = bash
 commands = bash -c "find {toxinidir}             \
-         -not \( -type d -name .?\* -prune \)    \ # prune all 'dot' dirs
-         -not \( -type d -name doc -prune \)     \ # skip documentation
-         -not \( -type d -name shocco -prune \)  \ # skip shocco
-         -type f                                 \ # only files
-         -not -name \*~                          \ # skip editors, readme, etc
+         -not \( -type d -name .?\* -prune \)    \
+         -not \( -type d -name doc -prune \)     \
+         -not \( -type d -name shocco -prune \)  \
+         -type f                                 \
+         -not -name \*~                          \
          -not -name \*.md                        \
          \(                                      \
           -name \*.sh -or                        \
+          -name \*.orig -or                      \
           -name \*rc -or                         \
           -name functions\* -or                  \
-          -wholename \*/inc/\* -or               \ # /inc files and
-          -wholename \*/lib/\*                   \ # /lib files are shell, but
-         \)                                      \ #   have no extension
-         -print0 | xargs -0 bashate -v"
+          -wholename \*/inc/\* -or               \
+          -wholename \*/lib/\*                   \
+         \)                                      \
+         -print0 | xargs -0 bashate -v -iE006 -eE005,E042"
 
 [testenv:docs]
 deps =
diff --git a/unstack.sh b/unstack.sh
index 30447a7..8eded83 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -69,6 +69,7 @@
 source $TOP_DIR/lib/neutron-legacy
 source $TOP_DIR/lib/ldap
 source $TOP_DIR/lib/dstat
+source $TOP_DIR/lib/dlm
 
 # Extras Source
 # --------------
@@ -172,6 +173,10 @@
     stop_dstat
 fi
 
+if is_service_enabled zookeeper; then
+    stop_zookeeper
+fi
+
 # Clean up the remainder of the screen processes
 SCREEN=$(which screen)
 if [[ -n "$SCREEN" ]]; then