Merge "Remove all *.pyc files in $DEST when executing clean.sh"
diff --git a/.gitignore b/.gitignore
index a470ff5..d1781bc 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,6 +15,8 @@
 files/*.vmdk
 files/*.rpm
 files/*.rpm.*
+files/*.deb
+files/*.deb.*
 files/*.qcow2
 files/*.img
 files/images
diff --git a/HACKING.rst b/HACKING.rst
index d763c75..b76cb6c 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -219,7 +219,7 @@
     set -o errexit
 
     # Print the commands being run so that we can see the command that triggers
-    # an error.  It is also useful for following allowing as the install occurs.
+    # an error.  It is also useful for following as the install occurs.
     set -o xtrace
 
 * Settings and configuration are stored in ``exerciserc``, which must be
diff --git a/README.md b/README.md
index 4ba4619..ff5598b 100644
--- a/README.md
+++ b/README.md
@@ -25,9 +25,9 @@
 The DevStack master branch generally points to trunk versions of OpenStack
 components.  For older, stable versions, look for branches named
 stable/[release] in the DevStack repo.  For example, you can do the
-following to create a juno OpenStack cloud:
+following to create a Newton OpenStack cloud:
 
-    git checkout stable/juno
+    git checkout stable/newton
     ./stack.sh
 
 You can also pick specific OpenStack project releases by setting the appropriate
diff --git a/clean.sh b/clean.sh
index e0ec9f5..d92807c 100755
--- a/clean.sh
+++ b/clean.sh
@@ -46,6 +46,7 @@
 source $TOP_DIR/lib/keystone
 source $TOP_DIR/lib/glance
 source $TOP_DIR/lib/nova
+source $TOP_DIR/lib/placement
 source $TOP_DIR/lib/cinder
 source $TOP_DIR/lib/swift
 source $TOP_DIR/lib/heat
@@ -95,6 +96,7 @@
 cleanup_nova
 cleanup_neutron
 cleanup_swift
+cleanup_horizon
 
 if is_service_enabled ldap; then
     cleanup_ldap
diff --git a/data/devstack-plugins-registry.header b/data/devstack-plugins-registry.header
index 6119ab5..576dbbd 100644
--- a/data/devstack-plugins-registry.header
+++ b/data/devstack-plugins-registry.header
@@ -1,18 +1,16 @@
-..
+.. Note to patch submitters:
 
-  Note to patch submitters:
+   # ============================= #
+   # THIS FILE IS AUTOGENERATED !  #
+   # ============================= #
 
-  # ============================= #
-  # THIS FILE IS AUTOGENERATED !  #
-  # ============================= #
+   ** Plugins are found automatically and added to this list **
 
-  ** Plugins are found automatically and added to this list **
+   This file is created by a periodic proposal job.  You should not
+   edit this file.
 
-  This file is created by a periodic proposal job.  You should not
-  edit this file.
-
-  You should edit the files data/devstack-plugins-registry.footer
-  data/devstack-plugins-registry.header to modify this text.
+   You should edit the files data/devstack-plugins-registry.footer
+   data/devstack-plugins-registry.header to modify this text.
 
 ==========================
  DevStack Plugin Registry
diff --git a/doc/source/assets/images/screen_session_1.png b/doc/source/assets/images/screen_session_1.png
new file mode 100644
index 0000000..6ad6752
--- /dev/null
+++ b/doc/source/assets/images/screen_session_1.png
Binary files differ
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 1161b34..53ae82f 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -63,7 +63,7 @@
 ::
 
     [[local|localrc]]
-    FIXED_RANGE=10.254.1.0/24
+    IPV4_ADDRS_SAFE_TO_USE=10.254.1.0/24
     ADMIN_PASSWORD=speciale
     LOGFILE=$DEST/logs/stack.sh.log
 
@@ -161,8 +161,8 @@
 
 -  no logging
 -  pre-set the passwords to prevent interactive prompts
--  move network ranges away from the local network (``FIXED_RANGE`` and
-   ``FLOATING_RANGE``, commented out below)
+-  move network ranges away from the local network (``IPV4_ADDRS_SAFE_TO_USE``
+   and ``FLOATING_RANGE``, commented out below)
 -  set the host IP if detection is unreliable (``HOST_IP``, commented
    out below)
 
@@ -173,7 +173,7 @@
     DATABASE_PASSWORD=$ADMIN_PASSWORD
     RABBIT_PASSWORD=$ADMIN_PASSWORD
     SERVICE_PASSWORD=$ADMIN_PASSWORD
-    #FIXED_RANGE=172.31.1.0/24
+    #IPV4_ADDRS_SAFE_TO_USE=172.31.1.0/24
     #FLOATING_RANGE=192.168.20.0/25
     #HOST_IP=10.3.4.5
 
@@ -521,16 +521,14 @@
 IP Version
 ----------
 
-``IP_VERSION`` can be used to configure DevStack to create either an
-IPv4, IPv6, or dual-stack self service project data-network by with
+``IP_VERSION`` can be used to configure Neutron to create either an
+IPv4, IPv6, or dual-stack self-service project data-network by with
 either ``IP_VERSION=4``, ``IP_VERSION=6``, or ``IP_VERSION=4+6``
-respectively.  This functionality requires that the Neutron networking
-service is enabled by setting the following options:
+respectively.
 
     ::
 
-        disable_service n-net
-        enable_service q-svc q-agt q-dhcp q-l3
+        IP_VERSION=4+6
 
 The following optional variables can be used to alter the default IPv6
 behavior:
@@ -539,12 +537,12 @@
 
         IPV6_RA_MODE=slaac
         IPV6_ADDRESS_MODE=slaac
-        FIXED_RANGE_V6=fd$IPV6_GLOBAL_ID::/64
+        IPV6_ADDRS_SAFE_TO_USE=fd$IPV6_GLOBAL_ID::/56
         IPV6_PRIVATE_NETWORK_GATEWAY=fd$IPV6_GLOBAL_ID::1
 
-*Note*: ``FIXED_RANGE_V6`` and ``IPV6_PRIVATE_NETWORK_GATEWAY`` can be
-configured with any valid IPv6 prefix. The default values make use of
-an auto-generated ``IPV6_GLOBAL_ID`` to comply with RFC4193.
+*Note*: ``IPV6_ADDRS_SAFE_TO_USE`` and ``IPV6_PRIVATE_NETWORK_GATEWAY``
+can be configured with any valid IPv6 prefix. The default values make
+use of an auto-generated ``IPV6_GLOBAL_ID`` to comply with RFC4193.
 
 Service Version
 ~~~~~~~~~~~~~~~
@@ -705,13 +703,13 @@
 ~~~~~~
 
 The logical volume group used to hold the Cinder-managed volumes is
-set by ``VOLUME_GROUP``, the logical volume name prefix is set with
+set by ``VOLUME_GROUP_NAME``, the logical volume name prefix is set with
 ``VOLUME_NAME_PREFIX`` and the size of the volume backing file is set
 with ``VOLUME_BACKING_FILE_SIZE``.
 
     ::
 
-        VOLUME_GROUP="stack-volumes"
+        VOLUME_GROUP_NAME="stack-volumes"
         VOLUME_NAME_PREFIX="volume-"
         VOLUME_BACKING_FILE_SIZE=10250M
 
diff --git a/doc/source/development.rst b/doc/source/development.rst
new file mode 100644
index 0000000..776ac6c
--- /dev/null
+++ b/doc/source/development.rst
@@ -0,0 +1,140 @@
+==========================
+ Developing with Devstack
+==========================
+
+Now that you have your nifty DevStack up and running, what can you do
+with it?
+
+Inspecting Services
+===================
+
+By default most services in DevStack are running in a `screen
+<https://www.gnu.org/software/screen/manual/screen.html>`_
+session.
+
+.. code-block:: bash
+
+   os3:~> screen -list
+   There is a screen on:
+        28994.stack	(08/10/2016 09:01:33 PM)	(Detached)
+   1 Socket in /var/run/screen/S-sdague.
+
+You can attach to this screen session using ``screen -r`` which gives
+you a view of the services in action.
+
+.. image:: assets/images/screen_session_1.png
+   :width: 100%
+
+Basic Screen Commands
+---------------------
+
+The following minimal commands will be useful to using screen:
+
+* ``ctrl-a n`` - go to next window. Next is assumed to be right of
+  current window.
+* ``ctrl-a p`` - go to previous window. Previous is assumed to be left
+  of current window.
+* ``ctrl-a [`` - entry copy/scrollback mode. This allows you to
+  navigate back through the logs with the up arrow.
+* ``ctrl-a d`` - detach from screen. Gets you back to a normal
+  terminal, while leaving everything running.
+
+For more about using screen, see the excellent `screen manual
+<https://www.gnu.org/software/screen/manual/screen.html>`_.
+
+Patching a Service
+==================
+
+If you want to make a quick change to a running service the easiest
+way to do this is:
+
+* attach to screen
+* navigate to the window in question
+* ``ctrl-c`` to kill the service
+* make appropriate changes to the code
+* ``up arrow`` in the screen window to display the command used to run
+  that service
+* ``enter`` to restart the service
+
+This works for services, except those running under Apache (currently
+just ``keystone`` by default).
+
+.. warning::
+
+   All changes you are making are in checked out git trees that
+   DevStack thinks it has full control over. Uncommitted work, or
+   work committed to the master branch, may be overwritten during
+   subsequent DevStack runs.
+
+Testing a Patch Series
+======================
+
+When testing a larger set of patches, or patches that will impact more
+than one service within a project, it is often less confusing to use
+custom git locations, and make all your changes in a dedicated git
+tree.
+
+In your ``local.conf`` you can add ``**_REPO``, ``**_BRANCH`` for most projects
+to use a custom git tree instead of the default upstream ones.
+
+For instance:
+
+.. code-block:: bash
+
+   [[local|localrc]]
+   NOVA_REPO=/home/sdague/nova
+   NOVA_BRANCH=fold_disk_config
+
+Will use a custom git tree and branch when doing any devstack
+operations, such as ``stack.sh``.
+
+When testing complicated changes committing to these trees, then doing
+``./unstack.sh && ./stack.sh`` is often a valuable way to
+iterate. This does take longer per iteration than direct patching, as
+the whole devstack needs to rebuild.
+
+You can use this same approach to test patches that are up for review
+in gerrit by using the ref name that gerrit assigns to each change.
+
+.. code-block:: bash
+
+   [[local|localrc]]
+   NOVA_BRANCH=refs/changes/10/353710/1
+
+
+Testing Changes to Apache Based Services
+========================================
+
+When testing changes to Apache based services, such as ``keystone``,
+you can either use the Testing a Patch Series approach above, or make
+changes in the code tree and issue an apache restart.
+
+
+Testing Changes to Libraries
+============================
+
+When testing changes to libraries consumed by OpenStack services (such
+as oslo or any of the python-fooclient libraries) things are a little
+more complicated. By default we only test with released versions of
+these libraries that are on pypi.
+
+You must first override this with the setting ``LIBS_FROM_GIT``. This
+will enable your DevStack with the git version of that library instead
+of the released version.
+
+After that point you can also specify ``**_REPO``, ``**_BRANCH`` to use
+your changes instead of just upstream master.
+
+.. code-block:: bash
+
+   [[local|localrc]]
+   LIBS_FROM_GIT=oslo.policy
+   OSLOPOLICY_REPO=/home/sdague/oslo.policy
+   OSLOPOLICY_BRANCH=better_exception
+
+Because libraries are used by many services, library changes really
+need to go through a full ``./unstack.sh && ./stack.sh`` to see your
+changes in action.
+
+To figure out the repo / branch names for every library that's
+supported, you'll need to read the devstack source.
diff --git a/doc/source/guides.rst b/doc/source/guides.rst
new file mode 100644
index 0000000..c2c7b91
--- /dev/null
+++ b/doc/source/guides.rst
@@ -0,0 +1,68 @@
+Guides
+======
+
+.. warning::
+
+   The guides are point in time contributions, and may not always be
+   up to date with the latest work in devstack.
+
+Walk through various setups used by stackers
+
+.. toctree::
+   :glob:
+   :maxdepth: 1
+
+   guides/single-vm
+   guides/single-machine
+   guides/lxc
+   guides/multinode-lab
+   guides/neutron
+   guides/devstack-with-nested-kvm
+   guides/nova
+   guides/devstack-with-lbaas-v2
+
+All-In-One Single VM
+--------------------
+
+Run :doc:`OpenStack in a VM <guides/single-vm>`. The VMs launched in your cloud will be slow as
+they are running in QEMU (emulation), but it is useful if you don't have
+spare hardware laying around. :doc:`[Read] <guides/single-vm>`
+
+All-In-One Single Machine
+-------------------------
+
+Run :doc:`OpenStack on dedicated hardware <guides/single-machine>`  This can include a
+server-class machine or a laptop at home.
+:doc:`[Read] <guides/single-machine>`
+
+All-In-One LXC Container
+-------------------------
+
+Run :doc:`OpenStack in a LXC container <guides/lxc>`. Beneficial for intermediate
+and advanced users. The VMs launched in this cloud will be fully accelerated but
+not all OpenStack features are supported. :doc:`[Read] <guides/lxc>`
+
+Multi-Node Lab
+--------------
+
+Setup a :doc:`multi-node cluster <guides/multinode-lab>` with dedicated VLANs for VMs & Management.
+:doc:`[Read] <guides/multinode-lab>`
+
+DevStack with Neutron Networking
+--------------------------------
+
+Building a DevStack cluster with :doc:`Neutron Networking <guides/neutron>`.
+This guide is meant for building lab environments with a dedicated
+control node and multiple compute nodes.
+
+DevStack with KVM-based Nested Virtualization
+---------------------------------------------
+
+Procedure to setup :doc:`DevStack with KVM-based Nested Virtualization
+<guides/devstack-with-nested-kvm>`. With this setup, Nova instances
+will be more performant than with plain QEMU emulation.
+
+Nova and devstack
+--------------------------------
+
+Guide to working with nova features :doc:`Nova and devstack <guides/nova>`.
diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst
index 0c439ad..21bea99 100644
--- a/doc/source/guides/devstack-with-lbaas-v2.rst
+++ b/doc/source/guides/devstack-with-lbaas-v2.rst
@@ -66,21 +66,21 @@
     ./stack.sh
     . ./openrc
 
-    neutron net-list  # should show public and private networks
+    openstack network list  # should show public and private networks
 
 Create two nova instances that we can use as test http servers:
 
   ::
 
     #create nova instances on private network
-    nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(neutron net-list | awk '/ private / {print $2}') node1
-    nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(neutron net-list | awk '/ private / {print $2}') node2
+    nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1
+    nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2
     nova list # should show the nova instances just created
 
     #add secgroup rules to allow ssh etc..
-    neutron security-group-rule-create default --protocol icmp
-    neutron security-group-rule-create default --protocol tcp --port-range-min 22 --port-range-max 22
-    neutron security-group-rule-create default --protocol tcp --port-range-min 80 --port-range-max 80
+    openstack security group rule create default --protocol icmp
+    openstack security group rule create default --protocol tcp --dst-port 22:22
+    openstack security group rule create default --protocol tcp --dst-port 80:80
 
 Set up a simple web server on each of these instances. ssh into each instance (username 'cirros', password 'cubswin:)') and run
 
diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst
index c996f95..dfc9936 100644
--- a/doc/source/guides/multinode-lab.rst
+++ b/doc/source/guides/multinode-lab.rst
@@ -260,7 +260,7 @@
     openstack user create $NAME --password=$PASSWORD --project $PROJECT
     openstack role add Member --user $NAME --project $PROJECT
     # The Member role is created by stack.sh
-    # openstack role list
+    # openstack role assignment list
 
 Swift
 -----
@@ -294,10 +294,10 @@
 
 ``stack-volumes`` can be pre-created on any physical volume supported by
 Linux's LVM. The name of the volume group can be changed by setting
-``VOLUME_GROUP`` in ``localrc``. ``stack.sh`` deletes all logical
-volumes in ``VOLUME_GROUP`` that begin with ``VOLUME_NAME_PREFIX`` as
+``VOLUME_GROUP_NAME`` in ``localrc``. ``stack.sh`` deletes all logical
+volumes in ``VOLUME_GROUP_NAME`` that begin with ``VOLUME_NAME_PREFIX`` as
 part of cleaning up from previous runs. It is recommended to not use the
-root volume group as ``VOLUME_GROUP``.
+root volume group as ``VOLUME_GROUP_NAME``.
 
 The details of creating the volume group depends on the server hardware
 involved but looks something like this:
@@ -400,6 +400,10 @@
 
         ssh-keyscan -H DEST_HOSTNAME | sudo tee -a /root/.ssh/known_hosts
 
+3. Verify that login via ssh works without a password::
+
+        ssh -i /root/.ssh/id_rsa.pub stack@DESTINATION
+
 In essence, this means that every compute node's root user's public RSA key
 must exist in every other compute node's stack user's authorized_keys file and
 every compute node's public ECDSA key needs to be in every other compute
diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst
index c5b1634..092809a 100644
--- a/doc/source/guides/neutron.rst
+++ b/doc/source/guides/neutron.rst
@@ -76,16 +76,10 @@
         RABBIT_PASSWORD=secret
         SERVICE_PASSWORD=secret
 
-        # Do not use Nova-Network
-        disable_service n-net
-        # Enable Neutron
-        ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt,q-l3
-
-
         ## Neutron options
         Q_USE_SECGROUP=True
         FLOATING_RANGE="172.18.161.0/24"
-        FIXED_RANGE="10.0.0.0/24"
+        IPV4_ADDRS_SAFE_TO_USE="10.0.0.0/22"
         Q_FLOATING_ALLOCATION_POOL=start=172.18.161.250,end=172.18.161.254
         PUBLIC_NETWORK_GATEWAY="172.18.161.1"
         PUBLIC_INTERFACE=eth0
@@ -389,24 +383,21 @@
 
         Q_USE_PROVIDER_NETWORKING=True
 
-        # Do not use Nova-Network
-        disable_service n-net
-
-        # Neutron
-        ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt
+        disable_service q-l3
 
         ## Neutron Networking options used to create Neutron Subnets
 
-        FIXED_RANGE="203.0.113.0/24"
+        IPV4_ADDRS_SAFE_TO_USE="203.0.113.0/24"
         NETWORK_GATEWAY=203.0.113.1
         PROVIDER_SUBNET_NAME="provider_net"
         PROVIDER_NETWORK_TYPE="vlan"
         SEGMENTATION_ID=2010
+        USE_SUBNETPOOL=False
 
-In this configuration we are defining FIXED_RANGE to be a
+In this configuration we are defining IPV4_ADDRS_SAFE_TO_USE to be a
 publicly routed IPv4 subnet. In this specific instance we are using
 the special TEST-NET-3 subnet defined in `RFC 5737 <http://tools.ietf.org/html/rfc5737>`_,
-which is used for documentation.  In your DevStack setup, FIXED_RANGE
+which is used for documentation.  In your DevStack setup, IPV4_ADDRS_SAFE_TO_USE
 would be a public IP address range that you or your organization has
 allocated to you, so that you could access your instances from the
 public internet.
@@ -530,16 +521,10 @@
     RABBIT_PASSWORD=secret
     SERVICE_PASSWORD=secret
 
-    # Do not use Nova-Network
-    disable_service n-net
-    # Enable Neutron
-    ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt,q-l3
-
-
     ## Neutron options
     Q_USE_SECGROUP=True
     FLOATING_RANGE="172.18.161.0/24"
-    FIXED_RANGE="10.0.0.0/24"
+    IPV4_ADDRS_SAFE_TO_USE="10.0.0.0/24"
     Q_FLOATING_ALLOCATION_POOL=start=172.18.161.250,end=172.18.161.254
     PUBLIC_NETWORK_GATEWAY="172.18.161.1"
     PUBLIC_INTERFACE=eth0
@@ -582,20 +567,18 @@
     Q_ML2_PLUGIN_MECHANISM_DRIVERS=macvtap
     Q_USE_PROVIDER_NETWORKING=True
 
-    #Enable Neutron services
-    disable_service n-net
     enable_plugin neutron git://git.openstack.org/openstack/neutron
-    ENABLED_SERVICES+=,q-agt,q-svc
 
     ## MacVTap agent options
     Q_AGENT=macvtap
     PHYSICAL_NETWORK=default
 
-    FIXED_RANGE="203.0.113.0/24"
+    IPV4_ADDRS_SAFE_TO_USE="203.0.113.0/24"
     NETWORK_GATEWAY=203.0.113.1
     PROVIDER_SUBNET_NAME="provider_net"
     PROVIDER_NETWORK_TYPE="vlan"
     SEGMENTATION_ID=2010
+    USE_SUBNETPOOL=False
 
     [[post-config|/$Q_PLUGIN_CONF_FILE]]
     [macvtap]
@@ -614,7 +597,7 @@
 
 For OVS, a similar configuration like described in the
 :ref:`OVS Provider Network <ovs-provider-network-controller>` section can be
-used. Just add the the following line to this local.conf, which also loads
+used. Just add the following line to this local.conf, which also loads
 the MacVTap mechanism driver:
 
 ::
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 68ec174..435011b 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -1,163 +1,135 @@
-DevStack
-========
+.. Documentation Architecture for the devstack docs.
+
+   It is really easy for online docs to meander over time as people
+   attempt to add the small bit of additional information they think
+   people need, into an existing information architecture. In order to
+   prevent that we need to be a bit strict as to what's on this front
+   page.
+
+   This should *only* be the quick start narrative. Which should end
+   with 2 sections: what you can do with devstack once it's set up,
+   and how to go beyond this setup. Both should be a set of quick
+   links to other documents to let people explore from there.
+
+==========
+ DevStack
+==========
 
 .. image:: assets/images/logo-blue.png
 
 DevStack is a series of extensible scripts used to quickly bring up a
-complete OpenStack environment.  It is used interactively as a
-development environment and as the basis for much of the OpenStack
-project's functional testing.
+complete OpenStack environment based on the latest versions of
+everything from git master.  It is used interactively as a development
+environment and as the basis for much of the OpenStack project's
+functional testing.
 
 The source is available at
 `<https://git.openstack.org/cgit/openstack-dev/devstack>`__.
 
-.. toctree::
-   :glob:
-   :maxdepth: 1
+.. warning::
 
-   overview
-   configuration
-   plugins
-   plugin-registry
-   faq
-   hacking
+   DevStack will make substantial changes to your system during
+   installation. Only run DevStack on servers or virtual machines that
+   are dedicated to this purpose.
 
 Quick Start
------------
+===========
 
-#. Select a Linux Distribution
-
-   Only Ubuntu 14.04 (Trusty), Fedora 22 (or Fedora 23) and CentOS/RHEL
-   7 are documented here. OpenStack also runs and is packaged on other
-   flavors of Linux such as OpenSUSE and Debian.
-
-#. Install Selected OS
-
-   In order to correctly install all the dependencies, we assume a
-   specific minimal version of the supported distributions to make it as
-   easy as possible. We recommend using a minimal install of Ubuntu or
-   Fedora server in a VM if this is your first time.
-
-#. Download DevStack
-
-   ::
-
-       git clone https://git.openstack.org/openstack-dev/devstack
-
-   The ``devstack`` repo contains a script that installs OpenStack and
-   templates for configuration files
-
-#. Configure
-
-   We recommend at least a :ref:`minimal-configuration` be set up.
-
-#. Add Stack User
-
-   Devstack should be run as a non-root user with sudo enabled
-   (standard logins to cloud images such as "ubuntu" or "cloud-user"
-   are usually fine).
-
-   You can quickly create a separate `stack` user to run DevStack with
-
-   ::
-
-       devstack/tools/create-stack-user.sh; su stack
-
-#. Start the install, this will take a few minutes.
-
-   ::
-
-       cd devstack; ./stack.sh
-
-Guides
-======
-
-Walk through various setups used by stackers
-
-.. toctree::
-   :glob:
-   :maxdepth: 1
-
-   guides/single-vm
-   guides/single-machine
-   guides/lxc
-   guides/multinode-lab
-   guides/neutron
-   guides/devstack-with-nested-kvm
-   guides/nova
-   guides/devstack-with-lbaas-v2
-
-All-In-One Single VM
---------------------
-
-Run :doc:`OpenStack in a VM <guides/single-vm>`. The VMs launched in your cloud will be slow as
-they are running in QEMU (emulation), but it is useful if you don't have
-spare hardware laying around. :doc:`[Read] <guides/single-vm>`
-
-All-In-One Single Machine
--------------------------
-
-Run :doc:`OpenStack on dedicated hardware <guides/single-machine>`  This can include a
-server-class machine or a laptop at home.
-:doc:`[Read] <guides/single-machine>`
-
-All-In-One LXC Container
--------------------------
-
-Run :doc:`OpenStack in a LXC container <guides/lxc>`. Beneficial for intermediate
-and advanced users. The VMs launched in this cloud will be fully accelerated but
-not all OpenStack features are supported. :doc:`[Read] <guides/lxc>`
-
-Multi-Node Lab
---------------
-
-Setup a :doc:`multi-node cluster <guides/multinode-lab>` with dedicated VLANs for VMs & Management.
-:doc:`[Read] <guides/multinode-lab>`
-
-DevStack with Neutron Networking
---------------------------------
-
-Building a DevStack cluster with :doc:`Neutron Networking <guides/neutron>`.
-This guide is meant for building lab environments with a dedicated
-control node and multiple compute nodes.
-
-DevStack with KVM-based Nested Virtualization
----------------------------------------------
-
-Procedure to setup :doc:`DevStack with KVM-based Nested Virtualization
-<guides/devstack-with-nested-kvm>`. With this setup, Nova instances
-will be more performant than with plain QEMU emulation.
-
-Nova and devstack
---------------------------------
-
-Guide to working with nova features :doc:`Nova and devstack <guides/nova>`.
-
-DevStack Documentation
-======================
-
-Overview
---------
-
-:doc:`An overview of DevStack goals and priorities <overview>`
-
-Configuration
+Install Linux
 -------------
 
-:doc:`Configuring and customizing the stack <configuration>`
+Start with a clean and minimal install of a Linux system. Devstack
+attempts to support Ubuntu 14.04/16.04, Fedora 23/24, CentOS/RHEL 7,
+as well as Debian and OpenSUSE.
 
-Plugins
+If you do not have a preference, Ubuntu 16.04 is the most tested, and
+will probably go the smoothest.
+
+Download DevStack
+-----------------
+
+::
+
+   git clone https://git.openstack.org/openstack-dev/devstack
+
+The ``devstack`` repo contains a script that installs OpenStack and
+templates for configuration files
+
+Create a local.conf
+-------------------
+
+Create a ``local.conf`` file with 4 passwords preset
+
+::
+
+   [[local|localrc]]
+   ADMIN_PASSWORD=secret
+   DATABASE_PASSWORD=$ADMIN_PASSWORD
+   RABBIT_PASSWORD=$ADMIN_PASSWORD
+   SERVICE_PASSWORD=$ADMIN_PASSWORD
+
+This is the minimum required config to get started with DevStack.
+
+Add Stack User
+--------------
+
+Devstack should be run as a non-root user with sudo enabled
+(standard logins to cloud images such as "ubuntu" or "cloud-user"
+are usually fine).
+
+You can quickly create a separate `stack` user to run DevStack with
+
+::
+
+   devstack/tools/create-stack-user.sh; su stack
+
+Start the install
+-----------------
+
+::
+
+   cd devstack; ./stack.sh
+
+This will take a 15 - 20 minutes, largely depending on the speed of
+your internet connection. Many git trees and packages will be
+installed during this process.
+
+Profit!
 -------
 
-:doc:`Extending DevStack with new features <plugins>`
+You now have a working DevStack! Congrats!
 
-FAQ
----
+Your devstack will have installed ``keystone``, ``glance``, ``nova``,
+``cinder``, ``neutron``, and ``horizon``. Floating IPs will be
+available, guests have access to the external world.
 
-:doc:`The DevStack FAQ <faq>`
+You can access horizon to experience the web interface to
+OpenStack, and manage vms, networks, volumes, and images from
+there.
 
-Contributing
-------------
+You can ``source openrc`` in your shell, and then use the
+``openstack`` command line tool to manage your devstack.
 
-:doc:`Pitching in to make DevStack a better place <hacking>`
+You can ``cd /opt/stack/tempest`` and run tempest tests that have
+been configured to work with your devstack.
 
+You can :doc:`make code changes to OpenStack and validate them
+<development>`.
+
+Going further
+-------------
+
+Learn more about our :doc:`configuration system <configuration>` to
+customize devstack for your needs. Including making adjustments to the
+default :doc:`networking <networking>`.
+
+Read :doc:`guides <guides>` for specific setups people have (note:
+guides are point in time contributions, and may not always be kept
+up to date to the latest devstack).
+
+Enable :doc:`devstack plugins <plugins>` to support additional
+services, features, and configuration not present in base devstack.
+
+Get :doc:`the big picture <overview>` of what we are trying to do
+with devstack, and help us by :doc:`contributing to the project
+<hacking>`.
diff --git a/doc/source/networking.rst b/doc/source/networking.rst
new file mode 100644
index 0000000..bdbeaaa
--- /dev/null
+++ b/doc/source/networking.rst
@@ -0,0 +1,116 @@
+=====================
+ DevStack Networking
+=====================
+
+An important part of the DevStack experience is networking that works
+by default for created guests. This might not be optimal for your
+particular testing environment, so this document tries its best to
+explain what's going on.
+
+Defaults
+========
+
+If you don't specify any configuration you will get the following:
+
+* neutron (including l3 with openvswitch)
+* private project networks for each openstack project
+* a floating ip range of 172.24.4.0/24 with the gateway of 172.24.4.1
+* the demo project configured with fixed ips on a subnet allocated from
+  the 10.0.0.0/22 range
+* a ``br-ex`` interface controlled by neutron for all its networking
+  (this is not connected to any physical interfaces).
+* DNS resolution for guests based on the resolv.conf for your host
+* an ip masq rule that allows created guests to route out
+
+This creates an environment which is isolated to the single
+host. Guests can get to the external network for package
+updates. Tempest tests will work in this environment.
+
+.. note::
+
+   By default all OpenStack environments have security group rules
+   which block all inbound packets to guests. If you want to be able
+   to ssh / ping your created guests you should run the following.
+
+   .. code-block:: bash
+
+      openstack security group rule create --proto icmp --dst-port 0 default
+      openstack security group rule create --proto tcp --dst-port 22 default
+
+Locally Accessible Guests
+=========================
+
+If you want to make you guests accessible from other machines on your
+network, we have to connect ``br-ex`` to a physical interface.
+
+Dedicated Guest Interface
+-------------------------
+
+If you have 2 or more interfaces on your devstack server, you can
+allocate an interface to neutron to fully manage. This **should not**
+be the same interface you use to ssh into the devstack server itself.
+
+This is done by setting with the ``PUBLIC_INTERFACE`` attribute.
+
+.. code-block:: bash
+
+   [[local|localrc]]
+   PUBLIC_INTERFACE=eth1
+
+That will put all layer 2 traffic from your guests onto the main
+network. When running in this mode the ip masq rule is **not** added
+in your devstack, you are responsible for making routing work on your
+local network.
+
+Shared Guest Interface
+----------------------
+
+.. warning::
+
+   This is not a recommended configuration. Because of interactions
+   between ovs and bridging, if you reboot your box with active
+   networking you may loose network connectivity to your system.
+
+If you need your guests accessible on the network, but only have 1
+interface (using something like a NUC), you can share your one
+network. But in order for this to work you need to manually set a lot
+of addresses, and have them all exactly correct.
+
+.. code-block:: bash
+
+   [[local|localrc]]
+   PUBLIC_INTERFACE=eth0
+   HOST_IP=10.42.0.52
+   FLOATING_RANGE=10.42.0.52/24
+   PUBLIC_NETWORK_GATEWAY=10.42.0.1
+   Q_FLOATING_ALLOCATION_POOL=start=10.42.0.250,end=10.42.0.254
+
+In order for this scenario to work the floating ip network must match
+the default networking on your server. This breaks HOST_IP detection,
+as we exclude the floating range by default, so you have to specify
+that manually.
+
+The ``PUBLIC_NETWORK_GATEWAY`` is the gateway that server would normally
+use to get off the network. ``Q_FLOATING_ALLOCATION_POOL`` controls
+the range of floating ips that will be handed out. As we are sharing
+your existing network, you'll want to give it a slice that your local
+dhcp server is not allocating. Otherwise you could easily have
+conflicting ip addresses, and cause havoc with your local network.
+
+
+Private Network Addressing
+==========================
+
+The private networks addresses are controlled by the ``IPV4_ADDRS_SAFE_TO_USE``
+and the ``IPV6_ADDRS_SAFE_TO_USE`` variables. This allows users to specify one
+single variable of safe internal IPs to use that will be referenced whether or
+not subnetpools are in use.
+
+For IPv4, ``FIXED_RANGE`` and ``SUBNETPOOL_PREFIX_V4`` will just default to
+the value of ``IPV4_ADDRS_SAFE_TO_USE`` directly.
+
+For IPv6, ``FIXED_RANGE_V6`` will default to the first /64 of the value of
+``IPV6_ADDRS_SAFE_TO_USE``. If ``IPV6_ADDRS_SAFE_TO_USE`` is /64 or smaller,
+``FIXED_RANGE_V6`` will just use the value of that directly.
+``SUBNETPOOL_PREFIX_V6`` will just default to the value of
+``IPV6_ADDRS_SAFE_TO_USE`` directly.
diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst
index 5b6622e..6ece997 100644
--- a/doc/source/plugin-registry.rst
+++ b/doc/source/plugin-registry.rst
@@ -1,18 +1,16 @@
-..
+.. Note to patch submitters:
 
-  Note to patch submitters:
+   # ============================= #
+   # THIS FILE IS AUTOGENERATED !  #
+   # ============================= #
 
-  # ============================= #
-  # THIS FILE IS AUTOGENERATED !  #
-  # ============================= #
+   ** Plugins are found automatically and added to this list **
 
-  ** Plugins are found automatically and added to this list **
+   This file is created by a periodic proposal job.  You should not
+   edit this file.
 
-  This file is created by a periodic proposal job.  You should not
-  edit this file.
-
-  You should edit the files data/devstack-plugins-registry.footer
-  data/devstack-plugins-registry.header to modify this text.
+   You should edit the files data/devstack-plugins-registry.footer
+   data/devstack-plugins-registry.header to modify this text.
 
 ==========================
  DevStack Plugin Registry
@@ -26,6 +24,7 @@
 ====================================== ===
 Plugin Name                            URL
 ====================================== ===
+almanach                               `git://git.openstack.org/openstack/almanach <https://git.openstack.org/cgit/openstack/almanach>`__
 aodh                                   `git://git.openstack.org/openstack/aodh <https://git.openstack.org/cgit/openstack/aodh>`__
 app-catalog-ui                         `git://git.openstack.org/openstack/app-catalog-ui <https://git.openstack.org/cgit/openstack/app-catalog-ui>`__
 astara                                 `git://git.openstack.org/openstack/astara <https://git.openstack.org/cgit/openstack/astara>`__
@@ -60,18 +59,26 @@
 freezer-api                            `git://git.openstack.org/openstack/freezer-api <https://git.openstack.org/cgit/openstack/freezer-api>`__
 freezer-web-ui                         `git://git.openstack.org/openstack/freezer-web-ui <https://git.openstack.org/cgit/openstack/freezer-web-ui>`__
 gce-api                                `git://git.openstack.org/openstack/gce-api <https://git.openstack.org/cgit/openstack/gce-api>`__
+glare                                  `git://git.openstack.org/openstack/glare <https://git.openstack.org/cgit/openstack/glare>`__
 gnocchi                                `git://git.openstack.org/openstack/gnocchi <https://git.openstack.org/cgit/openstack/gnocchi>`__
 group-based-policy                     `git://git.openstack.org/openstack/group-based-policy <https://git.openstack.org/cgit/openstack/group-based-policy>`__
-higgins                                `git://git.openstack.org/openstack/higgins <https://git.openstack.org/cgit/openstack/higgins>`__
+heat                                   `git://git.openstack.org/openstack/heat <https://git.openstack.org/cgit/openstack/heat>`__
+horizon-mellanox                       `git://git.openstack.org/openstack/horizon-mellanox <https://git.openstack.org/cgit/openstack/horizon-mellanox>`__
 ironic                                 `git://git.openstack.org/openstack/ironic <https://git.openstack.org/cgit/openstack/ironic>`__
 ironic-inspector                       `git://git.openstack.org/openstack/ironic-inspector <https://git.openstack.org/cgit/openstack/ironic-inspector>`__
 ironic-staging-drivers                 `git://git.openstack.org/openstack/ironic-staging-drivers <https://git.openstack.org/cgit/openstack/ironic-staging-drivers>`__
+karbor                                 `git://git.openstack.org/openstack/karbor <https://git.openstack.org/cgit/openstack/karbor>`__
+karbor-dashboard                       `git://git.openstack.org/openstack/karbor-dashboard <https://git.openstack.org/cgit/openstack/karbor-dashboard>`__
+keystone                               `git://git.openstack.org/openstack/keystone <https://git.openstack.org/cgit/openstack/keystone>`__
 kingbird                               `git://git.openstack.org/openstack/kingbird <https://git.openstack.org/cgit/openstack/kingbird>`__
-kuryr                                  `git://git.openstack.org/openstack/kuryr <https://git.openstack.org/cgit/openstack/kuryr>`__
+kuryr-kubernetes                       `git://git.openstack.org/openstack/kuryr-kubernetes <https://git.openstack.org/cgit/openstack/kuryr-kubernetes>`__
+kuryr-libnetwork                       `git://git.openstack.org/openstack/kuryr-libnetwork <https://git.openstack.org/cgit/openstack/kuryr-libnetwork>`__
 magnum                                 `git://git.openstack.org/openstack/magnum <https://git.openstack.org/cgit/openstack/magnum>`__
 magnum-ui                              `git://git.openstack.org/openstack/magnum-ui <https://git.openstack.org/cgit/openstack/magnum-ui>`__
 manila                                 `git://git.openstack.org/openstack/manila <https://git.openstack.org/cgit/openstack/manila>`__
+masakari                               `git://git.openstack.org/openstack/masakari <https://git.openstack.org/cgit/openstack/masakari>`__
 mistral                                `git://git.openstack.org/openstack/mistral <https://git.openstack.org/cgit/openstack/mistral>`__
+mixmatch                               `git://git.openstack.org/openstack/mixmatch <https://git.openstack.org/cgit/openstack/mixmatch>`__
 monasca-analytics                      `git://git.openstack.org/openstack/monasca-analytics <https://git.openstack.org/cgit/openstack/monasca-analytics>`__
 monasca-api                            `git://git.openstack.org/openstack/monasca-api <https://git.openstack.org/cgit/openstack/monasca-api>`__
 monasca-ceilometer                     `git://git.openstack.org/openstack/monasca-ceilometer <https://git.openstack.org/cgit/openstack/monasca-ceilometer>`__
@@ -79,6 +86,7 @@
 monasca-transform                      `git://git.openstack.org/openstack/monasca-transform <https://git.openstack.org/cgit/openstack/monasca-transform>`__
 murano                                 `git://git.openstack.org/openstack/murano <https://git.openstack.org/cgit/openstack/murano>`__
 networking-6wind                       `git://git.openstack.org/openstack/networking-6wind <https://git.openstack.org/cgit/openstack/networking-6wind>`__
+networking-arista                      `git://git.openstack.org/openstack/networking-arista <https://git.openstack.org/cgit/openstack/networking-arista>`__
 networking-bagpipe                     `git://git.openstack.org/openstack/networking-bagpipe <https://git.openstack.org/cgit/openstack/networking-bagpipe>`__
 networking-bgpvpn                      `git://git.openstack.org/openstack/networking-bgpvpn <https://git.openstack.org/cgit/openstack/networking-bgpvpn>`__
 networking-brocade                     `git://git.openstack.org/openstack/networking-brocade <https://git.openstack.org/cgit/openstack/networking-brocade>`__
@@ -100,6 +108,7 @@
 networking-plumgrid                    `git://git.openstack.org/openstack/networking-plumgrid <https://git.openstack.org/cgit/openstack/networking-plumgrid>`__
 networking-powervm                     `git://git.openstack.org/openstack/networking-powervm <https://git.openstack.org/cgit/openstack/networking-powervm>`__
 networking-sfc                         `git://git.openstack.org/openstack/networking-sfc <https://git.openstack.org/cgit/openstack/networking-sfc>`__
+networking-vpp                         `git://git.openstack.org/openstack/networking-vpp <https://git.openstack.org/cgit/openstack/networking-vpp>`__
 networking-vsphere                     `git://git.openstack.org/openstack/networking-vsphere <https://git.openstack.org/cgit/openstack/networking-vsphere>`__
 neutron                                `git://git.openstack.org/openstack/neutron <https://git.openstack.org/cgit/openstack/neutron>`__
 neutron-dynamic-routing                `git://git.openstack.org/openstack/neutron-dynamic-routing <https://git.openstack.org/cgit/openstack/neutron-dynamic-routing>`__
@@ -107,13 +116,15 @@
 neutron-lbaas                          `git://git.openstack.org/openstack/neutron-lbaas <https://git.openstack.org/cgit/openstack/neutron-lbaas>`__
 neutron-lbaas-dashboard                `git://git.openstack.org/openstack/neutron-lbaas-dashboard <https://git.openstack.org/cgit/openstack/neutron-lbaas-dashboard>`__
 neutron-vpnaas                         `git://git.openstack.org/openstack/neutron-vpnaas <https://git.openstack.org/cgit/openstack/neutron-vpnaas>`__
+nimble                                 `git://git.openstack.org/openstack/nimble <https://git.openstack.org/cgit/openstack/nimble>`__
 nova-docker                            `git://git.openstack.org/openstack/nova-docker <https://git.openstack.org/cgit/openstack/nova-docker>`__
 nova-lxd                               `git://git.openstack.org/openstack/nova-lxd <https://git.openstack.org/cgit/openstack/nova-lxd>`__
+nova-mksproxy                          `git://git.openstack.org/openstack/nova-mksproxy <https://git.openstack.org/cgit/openstack/nova-mksproxy>`__
 nova-powervm                           `git://git.openstack.org/openstack/nova-powervm <https://git.openstack.org/cgit/openstack/nova-powervm>`__
+oaktree                                `git://git.openstack.org/openstack/oaktree <https://git.openstack.org/cgit/openstack/oaktree>`__
 octavia                                `git://git.openstack.org/openstack/octavia <https://git.openstack.org/cgit/openstack/octavia>`__
 osprofiler                             `git://git.openstack.org/openstack/osprofiler <https://git.openstack.org/cgit/openstack/osprofiler>`__
 panko                                  `git://git.openstack.org/openstack/panko <https://git.openstack.org/cgit/openstack/panko>`__
-python-freezerclient                   `git://git.openstack.org/openstack/python-freezerclient <https://git.openstack.org/cgit/openstack/python-freezerclient>`__
 rally                                  `git://git.openstack.org/openstack/rally <https://git.openstack.org/cgit/openstack/rally>`__
 sahara                                 `git://git.openstack.org/openstack/sahara <https://git.openstack.org/cgit/openstack/sahara>`__
 sahara-dashboard                       `git://git.openstack.org/openstack/sahara-dashboard <https://git.openstack.org/cgit/openstack/sahara-dashboard>`__
@@ -121,12 +132,11 @@
 searchlight                            `git://git.openstack.org/openstack/searchlight <https://git.openstack.org/cgit/openstack/searchlight>`__
 searchlight-ui                         `git://git.openstack.org/openstack/searchlight-ui <https://git.openstack.org/cgit/openstack/searchlight-ui>`__
 senlin                                 `git://git.openstack.org/openstack/senlin <https://git.openstack.org/cgit/openstack/senlin>`__
-smaug                                  `git://git.openstack.org/openstack/smaug <https://git.openstack.org/cgit/openstack/smaug>`__
-smaug-dashboard                        `git://git.openstack.org/openstack/smaug-dashboard <https://git.openstack.org/cgit/openstack/smaug-dashboard>`__
 solum                                  `git://git.openstack.org/openstack/solum <https://git.openstack.org/cgit/openstack/solum>`__
 tacker                                 `git://git.openstack.org/openstack/tacker <https://git.openstack.org/cgit/openstack/tacker>`__
 tap-as-a-service                       `git://git.openstack.org/openstack/tap-as-a-service <https://git.openstack.org/cgit/openstack/tap-as-a-service>`__
 tricircle                              `git://git.openstack.org/openstack/tricircle <https://git.openstack.org/cgit/openstack/tricircle>`__
+trio2o                                 `git://git.openstack.org/openstack/trio2o <https://git.openstack.org/cgit/openstack/trio2o>`__
 trove                                  `git://git.openstack.org/openstack/trove <https://git.openstack.org/cgit/openstack/trove>`__
 trove-dashboard                        `git://git.openstack.org/openstack/trove-dashboard <https://git.openstack.org/cgit/openstack/trove-dashboard>`__
 vitrage                                `git://git.openstack.org/openstack/vitrage <https://git.openstack.org/cgit/openstack/vitrage>`__
@@ -136,6 +146,8 @@
 watcher-dashboard                      `git://git.openstack.org/openstack/watcher-dashboard <https://git.openstack.org/cgit/openstack/watcher-dashboard>`__
 zaqar                                  `git://git.openstack.org/openstack/zaqar <https://git.openstack.org/cgit/openstack/zaqar>`__
 zaqar-ui                               `git://git.openstack.org/openstack/zaqar-ui <https://git.openstack.org/cgit/openstack/zaqar-ui>`__
+zun                                    `git://git.openstack.org/openstack/zun <https://git.openstack.org/cgit/openstack/zun>`__
+zun-ui                                 `git://git.openstack.org/openstack/zun-ui <https://git.openstack.org/cgit/openstack/zun-ui>`__
 ====================================== ===
 
 
diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst
index 70469d6..31987bc 100644
--- a/doc/source/plugins.rst
+++ b/doc/source/plugins.rst
@@ -99,7 +99,7 @@
       should exist at this point.
    -  **extra** - Called near the end after layer 1 and 2 services have
       been started.
-   - **test-config** Called at the end of devstack used to configure tempest
+   - **test-config** - Called at the end of devstack used to configure tempest
       or any other test environments
 
 -  **unstack** - Called by ``unstack.sh`` before other services are shut
diff --git a/doc/source/site-map.rst b/doc/source/site-map.rst
new file mode 100644
index 0000000..801fc66
--- /dev/null
+++ b/doc/source/site-map.rst
@@ -0,0 +1,23 @@
+:orphan:
+
+.. the TOC on the front page actually makes the document a lot more
+   confusing. This lets us bury a toc which we can link in when
+   appropriate.
+
+==========
+ Site Map
+==========
+
+.. toctree::
+   :glob:
+   :maxdepth: 3
+
+   overview
+   configuration
+   networking
+   plugins
+   plugin-registry
+   faq
+   development
+   hacking
+   guides
diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh
index 808ef76..8cbca54 100755
--- a/exercises/aggregates.sh
+++ b/exercises/aggregates.sh
@@ -20,7 +20,7 @@
 set -o errexit
 
 # Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following allowing as the install occurs.
+# an error.  It is also useful for following as the install occurs.
 set -o xtrace
 
 
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
index 84ac08f..7478bdf 100755
--- a/exercises/boot_from_volume.sh
+++ b/exercises/boot_from_volume.sh
@@ -16,7 +16,7 @@
 set -o errexit
 
 # Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following allowing as the install occurs.
+# an error.  It is also useful for following as the install occurs.
 set -o xtrace
 
 
diff --git a/exercises/client-args.sh b/exercises/client-args.sh
index 2c8fe81..b380968 100755
--- a/exercises/client-args.sh
+++ b/exercises/client-args.sh
@@ -13,7 +13,7 @@
 set -o errexit
 
 # Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following allowing as the install occurs.
+# an error.  It is also useful for following as the install occurs.
 set -o xtrace
 
 
diff --git a/exercises/client-env.sh b/exercises/client-env.sh
index 6ab4d08..fff04df 100755
--- a/exercises/client-env.sh
+++ b/exercises/client-env.sh
@@ -13,7 +13,7 @@
 set -o errexit
 
 # Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following allowing as the install occurs.
+# an error.  It is also useful for following as the install occurs.
 set -o xtrace
 
 
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index 485208b..5abc713 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -13,7 +13,7 @@
 set -o errexit
 
 # Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following allowing as the install occurs.
+# an error.  It is also useful for following as the install occurs.
 set -o xtrace
 
 
diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh
index 8115006..e8c8f62 100755
--- a/exercises/neutron-adv-test.sh
+++ b/exercises/neutron-adv-test.sh
@@ -20,7 +20,7 @@
 set -o errtrace
 
 # Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following allowing as the install occurs.
+# an error.  It is also useful for following as the install occurs.
 set -o xtrace
 
 # Environment
@@ -148,7 +148,7 @@
 function get_role_id {
     local ROLE_NAME=$1
     local ROLE_ID
-    ROLE_ID=`openstack role list | grep $ROLE_NAME | awk '{print $2}'`
+    ROLE_ID=`openstack role assignment list | grep $ROLE_NAME | awk '{print $2}'`
     die_if_not_set $LINENO ROLE_ID "Failure retrieving ROLE_ID for $ROLE_NAME"
     echo "$ROLE_ID"
 }
@@ -156,7 +156,7 @@
 function get_network_id {
     local NETWORK_NAME="$1"
     local NETWORK_ID
-    NETWORK_ID=`neutron net-list -F id  -- --name=$NETWORK_NAME | awk "NR==4" | awk '{print $2}'`
+    NETWORK_ID=`openstack network show -f value -c id $NETWORK_NAME`
     echo $NETWORK_ID
 }
 
@@ -234,9 +234,9 @@
     PROJECT_ID=$(get_project_id $PROJECT)
     source $TOP_DIR/openrc $PROJECT $PROJECT
     local NET_ID
-    NET_ID=$(neutron net-create --project-id $PROJECT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' )
+    NET_ID=$(openstack network create --project $PROJECT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' )
     die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PROJECT_ID $NET_NAME $EXTRA"
-    neutron subnet-create --ip-version 4 --project-id $PROJECT_ID --gateway $GATEWAY --subnetpool None $NET_ID $CIDR
+    openstack subnet create --ip-version 4 --project $PROJECT_ID --gateway $GATEWAY --subnet-pool None --network $NET_ID --subnet-range $CIDR "${NET_NAME}_subnet"
     neutron_debug_admin probe-create --device-owner compute $NET_ID
     source $TOP_DIR/openrc demo demo
 }
@@ -325,10 +325,10 @@
     PROJECT_ID=$(get_project_id $PROJECT)
     #TODO(nati) comment out until l3-agent merged
     #for res in port subnet net router;do
-    for net_id in `neutron net-list -c id -c name | grep $NET_NAME | awk '{print $2}'`;do
+    for net_id in `openstack network list -c ID -c Name | grep $NET_NAME | awk '{print $2}'`;do
         delete_probe $net_id
-        neutron subnet-list | grep $net_id | awk '{print $2}' | xargs -I% neutron subnet-delete %
-        neutron net-delete $net_id
+        openstack subnet list | grep $net_id | awk '{print $2}' | xargs -I% openstack subnet delete %
+        openstack network delete $net_id
     done
     source $TOP_DIR/openrc demo demo
 }
diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh
index 5f8b0a4..2f78e39 100755
--- a/exercises/sec_groups.sh
+++ b/exercises/sec_groups.sh
@@ -13,7 +13,7 @@
 set -o errexit
 
 # Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following allowing as the install occurs.
+# an error.  It is also useful for following as the install occurs.
 set -o xtrace
 
 
diff --git a/exercises/swift.sh b/exercises/swift.sh
index 4a41e0f..8aa376b 100755
--- a/exercises/swift.sh
+++ b/exercises/swift.sh
@@ -13,7 +13,7 @@
 set -o errexit
 
 # Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following allowing as the install occurs.
+# an error.  It is also useful for following as the install occurs.
 set -o xtrace
 
 
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index 0de1226..e7c3560 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -13,7 +13,7 @@
 set -o errexit
 
 # Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following allowing as the install occurs.
+# an error.  It is also useful for following as the install occurs.
 set -o xtrace
 
 
diff --git a/extras.d/60-ceph.sh b/extras.d/60-ceph.sh
deleted file mode 100644
index cc90128..0000000
--- a/extras.d/60-ceph.sh
+++ /dev/null
@@ -1,75 +0,0 @@
-# ceph.sh - DevStack extras script to install Ceph
-
-if is_service_enabled ceph; then
-    if [[ "$1" == "source" ]]; then
-        # Initial source
-        source $TOP_DIR/lib/ceph
-    elif [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
-        echo_summary "Installing Ceph"
-        check_os_support_ceph
-        if [ "$REMOTE_CEPH" = "False" ]; then
-            install_ceph
-            echo_summary "Configuring Ceph"
-            configure_ceph
-            # NOTE (leseb): Do everything here because we need to have Ceph started before the main
-            # OpenStack components. Ceph OSD must start here otherwise we can't upload any images.
-            echo_summary "Initializing Ceph"
-            init_ceph
-            start_ceph
-        else
-            install_ceph_remote
-        fi
-    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
-        if is_service_enabled glance; then
-            echo_summary "Configuring Glance for Ceph"
-            configure_ceph_glance
-        fi
-        if is_service_enabled nova; then
-            echo_summary "Configuring Nova for Ceph"
-            configure_ceph_nova
-        fi
-        if is_service_enabled cinder; then
-            echo_summary "Configuring Cinder for Ceph"
-            configure_ceph_cinder
-        fi
-        if is_service_enabled n-cpu; then
-            # NOTE (leseb): the part below is a requirement to attach Ceph block devices
-            echo_summary "Configuring libvirt secret"
-            import_libvirt_secret_ceph
-        fi
-
-        if [ "$REMOTE_CEPH" = "False" ]; then
-            if is_service_enabled glance; then
-                echo_summary "Configuring Glance for Ceph"
-                configure_ceph_embedded_glance
-            fi
-            if is_service_enabled nova; then
-                echo_summary "Configuring Nova for Ceph"
-                configure_ceph_embedded_nova
-            fi
-            if is_service_enabled cinder; then
-                echo_summary "Configuring Cinder for Ceph"
-                configure_ceph_embedded_cinder
-            fi
-        fi
-    fi
-
-    if [[ "$1" == "unstack" ]]; then
-        if [ "$REMOTE_CEPH" = "True" ]; then
-            cleanup_ceph_remote
-        else
-            cleanup_ceph_embedded
-            stop_ceph
-        fi
-        cleanup_ceph_general
-    fi
-
-    if [[ "$1" == "clean" ]]; then
-        if [ "$REMOTE_CEPH" = "True" ]; then
-            cleanup_ceph_remote
-        else
-            cleanup_ceph_embedded
-        fi
-        cleanup_ceph_general
-    fi
-fi
diff --git a/files/apache-keystone.template b/files/apache-keystone.template
index 8a4b0f0..428544f 100644
--- a/files/apache-keystone.template
+++ b/files/apache-keystone.template
@@ -44,8 +44,8 @@
     WSGIPassAuthorization On
 </Location>
 
-Alias /identity_v2_admin %KEYSTONE_BIN%/keystone-wsgi-admin
-<Location /identity_v2_admin>
+Alias /identity_admin %KEYSTONE_BIN%/keystone-wsgi-admin
+<Location /identity_admin>
     SetHandler wsgi-script
     Options +ExecCGI
 
diff --git a/files/apache-placement-api.template b/files/apache-placement-api.template
new file mode 100644
index 0000000..b89ef96
--- /dev/null
+++ b/files/apache-placement-api.template
@@ -0,0 +1,25 @@
+Listen %PUBLICPORT%
+
+<VirtualHost *:%PUBLICPORT%>
+    WSGIDaemonProcess placement-api processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
+    WSGIProcessGroup placement-api
+    WSGIScriptAlias / %PUBLICWSGI%
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+    <IfVersion >= 2.4>
+      ErrorLogFormat "%M"
+    </IfVersion>
+    ErrorLog /var/log/%APACHE_NAME%/placement-api.log
+    %SSLENGINE%
+    %SSLCERTFILE%
+    %SSLKEYFILE%
+</VirtualHost>
+
+Alias /placement %PUBLICWSGI%
+<Location /placement>
+    SetHandler wsgi-script
+    Options +ExecCGI
+    WSGIProcessGroup placement-api
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+</Location>
diff --git a/files/debs/cinder b/files/debs/cinder
index 3595e01..c1b79fd 100644
--- a/files/debs/cinder
+++ b/files/debs/cinder
@@ -3,3 +3,4 @@
 open-iscsi-utils # Deprecated since quantal dist:precise
 qemu-utils
 tgt # NOPRIME
+thin-provisioning-tools
diff --git a/files/debs/tls-proxy b/files/debs/tls-proxy
index dce9c07..5bd8e21 100644
--- a/files/debs/tls-proxy
+++ b/files/debs/tls-proxy
@@ -1 +1 @@
-stud
+apache2
diff --git a/files/rpms/general b/files/rpms/general
index ee2e8a0..77d2fa5 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -7,9 +7,9 @@
 gettext  # used for compiling message catalogs
 git-core
 graphviz # needed only for docs
-iptables-services  # NOPRIME f22,f23,f24
+iptables-services  # NOPRIME f23,f24,f25
 java-1.7.0-openjdk-headless  # NOPRIME rhel7
-java-1.8.0-openjdk-headless  # NOPRIME f22,f23,f24
+java-1.8.0-openjdk-headless  # NOPRIME f23,f24,f25
 libffi-devel
 libjpeg-turbo-devel # Pillow 3.0.0
 libxml2-devel # lxml
diff --git a/files/rpms/nova b/files/rpms/nova
index 594393e..45f1c94 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -7,7 +7,7 @@
 genisoimage # required for config_drive
 iptables
 iputils
-kernel-modules # dist:f22,f23,f24
+kernel-modules # dist:f23,f24,f25
 kpartx
 kvm # NOPRIME
 libvirt-bin # NOPRIME
diff --git a/files/rpms/swift b/files/rpms/swift
index 1e05167..2f12df0 100644
--- a/files/rpms/swift
+++ b/files/rpms/swift
@@ -2,7 +2,7 @@
 liberasurecode-devel
 memcached
 pyxattr
-rsync-daemon # dist:f22,f23,f24
+rsync-daemon # dist:f23,f24,f25
 sqlite
 xfsprogs
 xinetd
diff --git a/files/swift/rsyncd.conf b/files/swift/rsyncd.conf
index c670531..c49f716 100644
--- a/files/swift/rsyncd.conf
+++ b/files/swift/rsyncd.conf
@@ -4,76 +4,76 @@
 pid file = %SWIFT_DATA_DIR%/run/rsyncd.pid
 address = 127.0.0.1
 
-[account6012]
+[account6612]
 max connections = 25
 path = %SWIFT_DATA_DIR%/1/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/account6012.lock
+lock file = %SWIFT_DATA_DIR%/run/account6612.lock
 
-[account6022]
+[account6622]
 max connections = 25
 path = %SWIFT_DATA_DIR%/2/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/account6022.lock
+lock file = %SWIFT_DATA_DIR%/run/account6622.lock
 
-[account6032]
+[account6632]
 max connections = 25
 path = %SWIFT_DATA_DIR%/3/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/account6032.lock
+lock file = %SWIFT_DATA_DIR%/run/account6632.lock
 
-[account6042]
+[account6642]
 max connections = 25
 path = %SWIFT_DATA_DIR%/4/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/account6042.lock
+lock file = %SWIFT_DATA_DIR%/run/account6642.lock
 
 
-[container6011]
+[container6611]
 max connections = 25
 path = %SWIFT_DATA_DIR%/1/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/container6011.lock
+lock file = %SWIFT_DATA_DIR%/run/container6611.lock
 
-[container6021]
+[container6621]
 max connections = 25
 path = %SWIFT_DATA_DIR%/2/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/container6021.lock
+lock file = %SWIFT_DATA_DIR%/run/container6621.lock
 
-[container6031]
+[container6631]
 max connections = 25
 path = %SWIFT_DATA_DIR%/3/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/container6031.lock
+lock file = %SWIFT_DATA_DIR%/run/container6631.lock
 
-[container6041]
+[container6641]
 max connections = 25
 path = %SWIFT_DATA_DIR%/4/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/container6041.lock
+lock file = %SWIFT_DATA_DIR%/run/container6641.lock
 
 
-[object6010]
+[object6613]
 max connections = 25
 path = %SWIFT_DATA_DIR%/1/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/object6010.lock
+lock file = %SWIFT_DATA_DIR%/run/object6613.lock
 
-[object6020]
+[object6623]
 max connections = 25
 path = %SWIFT_DATA_DIR%/2/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/object6020.lock
+lock file = %SWIFT_DATA_DIR%/run/object6623.lock
 
-[object6030]
+[object6633]
 max connections = 25
 path = %SWIFT_DATA_DIR%/3/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/object6030.lock
+lock file = %SWIFT_DATA_DIR%/run/object6633.lock
 
-[object6040]
+[object6643]
 max connections = 25
 path = %SWIFT_DATA_DIR%/4/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/object6040.lock
+lock file = %SWIFT_DATA_DIR%/run/object6643.lock
diff --git a/functions b/functions
index 46a7d41..6a0ac67 100644
--- a/functions
+++ b/functions
@@ -251,6 +251,7 @@
             image create \
             "$image_name" --public \
             --container-format=bare --disk-format=ploop \
+            --property hypervisor_type=vz \
             --property vm_mode=$vm_mode < "${image}"
         return
     fi
@@ -331,7 +332,7 @@
     fi
 
     if is_arch "aarch64"; then
-        img_property="--property hw_machine_type=virt --property hw_cdrom_bus=virtio --property os_command_line='console=ttyAMA0'"
+        img_property="--property hw_machine_type=virt --property hw_cdrom_bus=scsi --property hw_scsi_model=virtio-scsi --property os_command_line='console=ttyAMA0'"
     fi
 
     if [ "$container_format" = "bare" ]; then
@@ -636,6 +637,33 @@
     fi
 }
 
+
+# set_mtu - Set MTU on a device
+function set_mtu {
+    local dev=$1
+    local mtu=$2
+    sudo ip link set mtu $mtu dev $dev
+}
+
+
+# enable_kernel_bridge_firewall - Enable kernel support for bridge firewalling
+function enable_kernel_bridge_firewall {
+    # Load bridge module. This module provides access to firewall for bridged
+    # frames; and also on older kernels (pre-3.18) it provides sysctl knobs to
+    # enable/disable bridge firewalling
+    sudo modprobe bridge
+    # For newer kernels (3.18+), those sysctl settings are split into a separate
+    # kernel module (br_netfilter). Load it too, if present.
+    sudo modprobe br_netfilter 2>> /dev/null || :
+    # Enable bridge firewalling in case it's disabled in kernel (upstream
+    # default is enabled, but some distributions may decide to change it).
+    # This is at least needed for RHEL 7.2 and earlier releases.
+    for proto in arp ip ip6; do
+        sudo sysctl -w net.bridge.bridge-nf-call-${proto}tables=1
+    done
+}
+
+
 # Restore xtrace
 $_XTRACE_FUNCTIONS
 
diff --git a/functions-common b/functions-common
index 3fdd71b..d15999e 100644
--- a/functions-common
+++ b/functions-common
@@ -534,10 +534,8 @@
                 echo "the project to the \$PROJECTS variable in the job definition."
                 die $LINENO "Cloning not allowed in this configuration"
             fi
-            git_timed clone $git_clone_flags $git_remote $git_dest
-            cd $git_dest
-            # This checkout syntax works for both branches and tags
-            git checkout $git_ref
+            # '--branch' can also take tags
+            git_timed clone $git_clone_flags $git_remote $git_dest --branch $git_ref
         elif [[ "$RECLONE" = "True" ]]; then
             # if it does exist then simulate what clone does if asked to RECLONE
             cd $git_dest
@@ -865,11 +863,9 @@
     domain_args=$(_get_domain_args $4 $5)
 
     # Gets user role id
-    user_role_id=$(openstack role list \
+    user_role_id=$(openstack role assignment list \
         --user $2 \
-        --column "ID" \
         --project $3 \
-        --column "Name" \
         $domain_args \
         | grep " $1 " | get_field 1)
     if [[ -z "$user_role_id" ]]; then
@@ -878,11 +874,9 @@
             --user $2 \
             --project $3 \
             $domain_args
-        user_role_id=$(openstack role list \
+        user_role_id=$(openstack role assignment list \
             --user $2 \
-            --column "ID" \
             --project $3 \
-            --column "Name" \
             $domain_args \
             | grep " $1 " | get_field 1)
     fi
@@ -894,22 +888,18 @@
 function get_or_add_user_domain_role {
     local user_role_id
     # Gets user role id
-    user_role_id=$(openstack role list \
+    user_role_id=$(openstack role assignment list \
         --user $2 \
-        --column "ID" \
         --domain $3 \
-        --column "Name" \
         | grep " $1 " | get_field 1)
     if [[ -z "$user_role_id" ]]; then
         # Adds role to user and get it
         openstack role add $1 \
             --user $2 \
             --domain $3
-        user_role_id=$(openstack role list \
+        user_role_id=$(openstack role assignment list \
             --user $2 \
-            --column "ID" \
             --domain $3 \
-            --column "Name" \
             | grep " $1 " | get_field 1)
     fi
     echo $user_role_id
@@ -920,13 +910,11 @@
 function get_or_add_user_domain_role {
     local user_role_id
     # Gets user role id
-    user_role_id=$(openstack role list \
+    user_role_id=$(openstack role assignment list \
         --user $2 \
         --os-url=$KEYSTONE_SERVICE_URI_V3 \
         --os-identity-api-version=3 \
-        --column "ID" \
         --domain $3 \
-        --column "Name" \
         | grep " $1 " | get_field 1)
     if [[ -z "$user_role_id" ]]; then
         # Adds role to user and get it
@@ -935,13 +923,11 @@
             --domain $3 \
             --os-url=$KEYSTONE_SERVICE_URI_V3 \
             --os-identity-api-version=3
-        user_role_id=$(openstack role list \
+        user_role_id=$(openstack role assignment list \
             --user $2 \
             --os-url=$KEYSTONE_SERVICE_URI_V3 \
             --os-identity-api-version=3 \
-            --column "ID" \
             --domain $3 \
-            --column "Name" \
             | grep " $1 " | get_field 1)
     fi
     echo $user_role_id
@@ -952,19 +938,19 @@
 function get_or_add_group_project_role {
     local group_role_id
     # Gets group role id
-    group_role_id=$(openstack role list \
+    group_role_id=$(openstack role assignment list \
         --group $2 \
         --project $3 \
-        -c "ID" -f value)
+        -f value)
     if [[ -z "$group_role_id" ]]; then
         # Adds role to group and get it
         openstack role add $1 \
             --group $2 \
             --project $3
-        group_role_id=$(openstack role list \
+        group_role_id=$(openstack role assignment list \
             --group $2 \
             --project $3 \
-            -c "ID" -f value)
+            -f value)
     fi
     echo $group_role_id
 }
@@ -1330,7 +1316,7 @@
     elif is_fedora; then
         sudo ${YUM:-yum} remove -y "$@" ||:
     elif is_suse; then
-        sudo zypper rm "$@" ||:
+        sudo zypper remove -y "$@" ||:
     else
         exit_distro_not_supported "uninstalling packages"
     fi
@@ -1346,20 +1332,26 @@
 
     time_start "yum_install"
 
-    # - We run with LC_ALL=C so string matching *should* be OK
-    # - Exit 1 if the failure might get better with a retry.
-    # - Exit 2 if it is fatal.
-    parse_yum_result='             \
-        BEGIN { result=0 }         \
-        /^YUM_FAILED/ { exit $2 }  \
-        /^No package/ { result=2 } \
-        /^Failed:/    { result=2 } \
-        //{ print }                \
+    # This is a bit tricky, because yum -y assumes missing or failed
+    # packages are OK (see [1]).  We want devstack to stop if we are
+    # installing missing packages.
+    #
+    # Thus we manually match on the output (stack.sh runs in a fixed
+    # locale, so lang shouldn't change).
+    #
+    # If yum returns !0, we echo the result as "YUM_FAILED" and return
+    # that from the awk (we're subverting -e with this trick).
+    # Otherwise we use awk to look for failure strings and return "2"
+    # to indicate a terminal failure.
+    #
+    # [1] https://bugzilla.redhat.com/show_bug.cgi?id=965567
+    parse_yum_result='              \
+        BEGIN { result=0 }          \
+        /^YUM_FAILED/ { result=$2 } \
+        /^No package/ { result=2 }  \
+        /^Failed:/    { result=2 }  \
+        //{ print }                 \
         END { exit result }'
-
-    # The manual check for missing packages is because yum -y assumes
-    # missing or failed packages are OK.
-    # See https://bugzilla.redhat.com/show_bug.cgi?id=965567
     (sudo_with_proxies "${YUM:-yum}" install -y "$@" 2>&1 || echo YUM_FAILED $?) \
         | awk "$parse_yum_result" && result=$? || result=$?
 
@@ -1686,7 +1678,7 @@
     local logfile=$2
 
     if [[ "$USE_SCREEN" = "True" ]]; then
-        screen_process "$name" "sudo tail -f $logfile"
+        screen_process "$name" "sudo tail -f $logfile | sed 's/\\\\\\\\x1b/\o033/g'"
     fi
 }
 
@@ -1880,7 +1872,7 @@
             # white listed elements in tree. We want these to move out
             # over time as well, but they are in tree, so we need to
             # manage that.
-            local exceptions="60-ceph.sh 80-tempest.sh"
+            local exceptions="80-tempest.sh"
             local extra
             extra=$(basename $extra_plugin_file_name)
             if [[ ! ( $exceptions =~ "$extra" ) ]]; then
@@ -2207,6 +2199,18 @@
     echo ${1-0}.${2-0}.${3-0}.${4-0}
 }
 
+# Check if this is a valid ipv4 address string
+function is_ipv4_address {
+    local address=$1
+    local regex='([0-9]{1,3}.){3}[0-9]{1,3}'
+    # TODO(clarkb) make this more robust
+    if [[ "$address" =~ $regex ]] ; then
+        return 0
+    else
+        return 1
+    fi
+}
+
 # Gracefully cp only if source file/dir exists
 # cp_it source destination
 function cp_it {
@@ -2254,6 +2258,14 @@
     echo $subnet
 }
 
+function is_provider_network {
+    if [ "$Q_USE_PROVIDER_NETWORKING" == "True" ]; then
+        return 0
+    fi
+    return 1
+}
+
+
 # Return the current python as "python<major>.<minor>"
 function python_version {
     local python_version
@@ -2304,6 +2316,17 @@
     fi
 }
 
+# Service wrapper to reload services
+# If the service was not in running state it will start it
+# reload_service service-name
+function reload_service {
+    if [ -x /bin/systemctl ]; then
+        sudo /bin/systemctl reload-or-restart $1
+    else
+        sudo service $1 reload
+    fi
+}
+
 # Test with a finite retry loop.
 #
 function test_with_retry {
diff --git a/inc/ini-config b/inc/ini-config
index 1f12343..68d48d1 100644
--- a/inc/ini-config
+++ b/inc/ini-config
@@ -274,6 +274,170 @@
     $xtrace
 }
 
+# Set a localrc var
+function localrc_set {
+    local file=$1
+    local group="local"
+    local conf="localrc"
+    local section=""
+    local option=$2
+    local value=$3
+    localconf_set "$file" "$group" "$conf" "$section" "$option" "$value"
+}
+
+# Check if local.conf has section.
+function localconf_has_section {
+    local file=$1
+    local group=$2
+    local conf=$3
+    local section=$4
+    local sep
+    sep=$(echo -ne "\x01")
+    local line
+    line=$(sed -ne "\\${sep}^\[\[${group}|${conf}\]\]${sep},\\${sep}\[\[.*\]\]${sep}{
+        /\[${section}\]/p
+    }" "$file")
+    [ -n "$line" ]
+}
+
+# Check if local.conf has option.
+function localconf_has_option {
+    local file=$1
+    local group=$2
+    local conf=$3
+    local section=$4
+    local option=$5
+    local sep
+    sep=$(echo -ne "\x01")
+    local line
+    if [[ -z "$section" ]]; then
+        line=$(sed -ne "\\${sep}^\[\[${group}|${conf}\]\]${sep},\\${sep}\[\[.*\]\]${sep}{
+            /${option}[ \t]*=.*$/p
+        }" "$file")
+    else
+        line=$(sed -ne "\\${sep}^\[\[${group}|${conf}\]\]${sep},\\${sep}\[\[.*\]\]${sep}{
+            /\[${section}\]/,/\[\[.*\]\]\|\[.*\]/{
+                /${option}[ \t]*=.*$/p}
+        }" "$file")
+    fi
+    [ -n "$line" ]
+}
+
+# Update option in local.conf.
+function localconf_update_option {
+    local sudo=$1
+    local file=$2
+    local group=$3
+    local conf=$4
+    local section=$5
+    local option=$6
+    local value=$7
+    local sep
+    sep=$(echo -ne "\x01")
+    if [[ -z "$section" ]]; then
+        $sudo sed -i -e "\\${sep}^\[\[${group}|${conf}\]\]${sep},\\${sep}\[\[.*\]\]${sep}{
+            s${sep}^\(${option}[ \t]*=[ \t]*\).*\$${sep}\1${value}${sep}
+        }" "$file"
+    else
+        $sudo sed -i -e "\\${sep}^\[\[${group}|${conf}\]\]${sep},\\${sep}\[\[.*\]\]${sep}{
+            /\[${section}\]/,/\[\[.*\]\]\|\[.*\]/s${sep}^\(${option}[ \t]*=[ \t]*\).*\$${sep}\1${value}${sep}
+        }" "$file"
+    fi
+}
+
+# Add option in local.conf.
+function localconf_add_option {
+    local sudo=$1
+    local file=$2
+    local group=$3
+    local conf=$4
+    local section=$5
+    local option=$6
+    local value=$7
+    local sep
+    sep=$(echo -ne "\x01")
+    if [[ -z "$section" ]]; then
+        $sudo sed -i -e "\\${sep}^\[\[${group}|${conf}\]\]${sep} a $option=$value" "$file"
+    else
+        $sudo sed -i -e "\\${sep}^\[\[${group}|${conf}\]\]${sep},\\${sep}\[\[.*\]\]${sep}{
+            /\[${section}\]/ a $option=$value
+        }" "$file"
+    fi
+}
+
+# Add section and option in local.conf.
+function localconf_add_section_and_option {
+    local sudo=$1
+    local file=$2
+    local group=$3
+    local conf=$4
+    local section=$5
+    local option=$6
+    local value=$7
+    local sep
+    sep=$(echo -ne "\x01")
+    $sudo sed -i -e "\\${sep}^\[\[${group}|${conf}\]\]${sep} {
+        a [$section]
+        a $option=$value
+    }" "$file"
+}
+
+# Set an option in a local.conf file.
+# localconf_set [-sudo] config-file group conf-name section option value
+#  - if the file does not exist, it is created
+function localconf_set {
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+    local sep
+    sep=$(echo -ne "\x01")
+    local sudo=""
+    if [ $1 == "-sudo" ]; then
+        sudo="sudo "
+        shift
+    fi
+    local file=$1
+    local group=$2
+    local conf=$3
+    local section=$4
+    local option=$5
+    local value=$6
+
+    if [[ -z $group || -z $conf || -z $option || -z $value ]]; then
+        $xtrace
+        return
+    fi
+
+    if ! grep -q "^\[\[${group}|${conf}\]\]" "$file" 2>/dev/null; then
+        # Add meta section at the end if it does not exist
+        echo -e "\n[[${group}|${conf}]]" | $sudo tee --append "$file" > /dev/null
+        # Add section at the end
+        if [[ -n "$section" ]]; then
+            echo -e "[$section]" | $sudo tee --append "$file" > /dev/null
+        fi
+        # Add option at the end
+        echo -e "$option=$value" | $sudo tee --append "$file" > /dev/null
+    elif [[ -z "$section" ]]; then
+        if ! localconf_has_option "$file" "$group" "$conf" "$section" "$option"; then
+            # Add option
+            localconf_add_option "$sudo" "$file" "$group" "$conf" "$section" "$option" "$value"
+        else
+            # Replace it
+            localconf_update_option "$sudo" "$file" "$group" "$conf" "$section" "$option" "$value"
+        fi
+    elif ! localconf_has_section "$file" "$group" "$conf" "$section"; then
+        # Add section and option in specified meta section
+        localconf_add_section_and_option "$sudo" "$file" "$group" "$conf" "$section" "$option" "$value"
+    elif ! localconf_has_option "$file" "$group" "$conf" "$section" "$option"; then
+        # Add option
+        localconf_add_option "$sudo" "$file" "$group" "$conf" "$section" "$option" "$value"
+    else
+        # Replace it
+        localconf_update_option "$sudo" "$file" "$group" "$conf" "$section" "$option" "$value"
+    fi
+    $xtrace
+}
+
 # Restore xtrace
 $INC_CONF_TRACE
 
diff --git a/inc/meta-config b/inc/meta-config
index 6eb7a00..6252135 100644
--- a/inc/meta-config
+++ b/inc/meta-config
@@ -40,12 +40,10 @@
     $CONFIG_AWK_CMD -v matchgroup=$matchgroup -v configfile=$configfile '
         BEGIN { group = "" }
         /^\[\[.+\|.*\]\]/ {
-            if (group == "") {
-                gsub("[][]", "", $1);
-                split($1, a, "|");
-                if (a[1] == matchgroup && a[2] == configfile) {
-                    group=a[1]
-                }
+            gsub("[][]", "", $1);
+            split($1, a, "|");
+            if (a[1] == matchgroup && a[2] == configfile) {
+                group=a[1]
             } else {
                 group=""
             }
diff --git a/inc/python b/inc/python
index e013dfa..e4cfab8 100644
--- a/inc/python
+++ b/inc/python
@@ -148,11 +148,15 @@
     fi
 
     $xtrace
+    # adding SETUPTOOLS_SYS_PATH_TECHNIQUE is a workaround to keep
+    # the same behaviour of setuptools before version 25.0.0.
+    # related issue: https://github.com/pypa/pip/issues/3874
     $sudo_pip \
         http_proxy="${http_proxy:-}" \
         https_proxy="${https_proxy:-}" \
         no_proxy="${no_proxy:-}" \
         PIP_FIND_LINKS=$PIP_FIND_LINKS \
+        SETUPTOOLS_SYS_PATH_TECHNIQUE=rewrite \
         $cmd_pip $upgrade \
         $@
     result=$?
@@ -366,7 +370,7 @@
 # Install python3 packages
 function install_python3 {
     if is_ubuntu; then
-        apt_get install python3.4 python3.4-dev
+        apt_get install python${PYTHON3_VERSION} python${PYTHON3_VERSION}-dev
     fi
 }
 
diff --git a/lib/apache b/lib/apache
index 2c84c7a..8a38cc4 100644
--- a/lib/apache
+++ b/lib/apache
@@ -39,27 +39,44 @@
     APACHE_NAME=apache2
     APACHE_CONF_DIR=${APACHE_CONF_DIR:-/etc/$APACHE_NAME/vhosts.d}
 fi
+APACHE_LOG_DIR="/var/log/${APACHE_NAME}"
 
 # Functions
 # ---------
+
+# Enable apache mod and restart apache if it isn't already enabled.
+function enable_apache_mod {
+    local mod=$1
+    # Apache installation, because we mark it NOPRIME
+    if is_ubuntu || is_suse ; then
+        if ! a2query -m $mod ; then
+            sudo a2enmod $mod
+            restart_apache_server
+        fi
+    elif is_fedora; then
+        # pass
+        true
+    else
+        exit_distro_not_supported "apache enable mod"
+    fi
+}
+
 # install_apache_wsgi() - Install Apache server and wsgi module
 function install_apache_wsgi {
     # Apache installation, because we mark it NOPRIME
     if is_ubuntu; then
         # Install apache2, which is NOPRIME'd
         install_package apache2 libapache2-mod-wsgi
-        # WSGI isn't enabled by default, enable it
-        sudo a2enmod wsgi
     elif is_fedora; then
         sudo rm -f /etc/httpd/conf.d/000-*
         install_package httpd mod_wsgi
     elif is_suse; then
         install_package apache2 apache2-mod_wsgi
-        # WSGI isn't enabled by default, enable it
-        sudo a2enmod wsgi
     else
-        exit_distro_not_supported "apache installation"
+        exit_distro_not_supported "apache wsgi installation"
     fi
+    # WSGI isn't enabled by default, enable it
+    enable_apache_mod wsgi
 
     # ensure mod_version enabled for <IfVersion ...>.  This is
     # built-in statically on anything recent, but precise (2.2)
@@ -192,6 +209,11 @@
     time_stop "restart_apache_server"
 }
 
+# reload_apache_server
+function reload_apache_server {
+    reload_service $APACHE_NAME
+}
+
 # Restore xtrace
 $_XTRACE_LIB_APACHE
 
diff --git a/lib/ceph b/lib/ceph
deleted file mode 100644
index e999647..0000000
--- a/lib/ceph
+++ /dev/null
@@ -1,382 +0,0 @@
-#!/bin/bash
-#
-# lib/ceph
-# Functions to control the configuration and operation of the **Ceph** storage service
-
-# Dependencies:
-#
-# - ``functions`` file
-# - ``CEPH_DATA_DIR`` or ``DATA_DIR`` must be defined
-
-# ``stack.sh`` calls the entry points in this order (via ``extras.d/60-ceph.sh``):
-#
-# - install_ceph
-# - configure_ceph
-# - init_ceph
-# - start_ceph
-# - stop_ceph
-# - cleanup_ceph
-
-# Save trace setting
-_XTRACE_LIB_CEPH=$(set +o | grep xtrace)
-set +o xtrace
-
-
-# Defaults
-# --------
-
-# Set ``CEPH_DATA_DIR`` to the location of Ceph drives and objects.
-# Default is the common DevStack data directory.
-CEPH_DATA_DIR=${CEPH_DATA_DIR:-/var/lib/ceph}
-CEPH_DISK_IMAGE=${CEPH_DATA_DIR}/drives/images/ceph.img
-
-# Set ``CEPH_CONF_DIR`` to the location of the configuration files.
-# Default is ``/etc/ceph``.
-CEPH_CONF_DIR=${CEPH_CONF_DIR:-/etc/ceph}
-
-# DevStack will create a loop-back disk formatted as XFS to store the
-# Ceph data. Set ``CEPH_LOOPBACK_DISK_SIZE`` to the disk size in
-# kilobytes.
-# Default is 1 gigabyte.
-CEPH_LOOPBACK_DISK_SIZE_DEFAULT=4G
-CEPH_LOOPBACK_DISK_SIZE=${CEPH_LOOPBACK_DISK_SIZE:-$CEPH_LOOPBACK_DISK_SIZE_DEFAULT}
-
-# Common
-CEPH_FSID=$(uuidgen)
-CEPH_CONF_FILE=${CEPH_CONF_DIR}/ceph.conf
-
-# Glance
-GLANCE_CEPH_USER=${GLANCE_CEPH_USER:-glance}
-GLANCE_CEPH_POOL=${GLANCE_CEPH_POOL:-images}
-GLANCE_CEPH_POOL_PG=${GLANCE_CEPH_POOL_PG:-8}
-GLANCE_CEPH_POOL_PGP=${GLANCE_CEPH_POOL_PGP:-8}
-
-# Nova
-NOVA_CEPH_POOL=${NOVA_CEPH_POOL:-vms}
-NOVA_CEPH_POOL_PG=${NOVA_CEPH_POOL_PG:-8}
-NOVA_CEPH_POOL_PGP=${NOVA_CEPH_POOL_PGP:-8}
-
-# Cinder
-CINDER_CEPH_POOL=${CINDER_CEPH_POOL:-volumes}
-CINDER_CEPH_POOL_PG=${CINDER_CEPH_POOL_PG:-8}
-CINDER_CEPH_POOL_PGP=${CINDER_CEPH_POOL_PGP:-8}
-CINDER_CEPH_USER=${CINDER_CEPH_USER:-cinder}
-CINDER_CEPH_UUID=${CINDER_CEPH_UUID:-$(uuidgen)}
-
-# Set ``CEPH_REPLICAS`` to configure how many replicas are to be
-# configured for your Ceph cluster. By default we are configuring
-# only one replica since this is way less CPU and memory intensive. If
-# you are planning to test Ceph replication feel free to increase this value
-CEPH_REPLICAS=${CEPH_REPLICAS:-1}
-CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS})
-
-# Connect to an existing Ceph cluster
-REMOTE_CEPH=$(trueorfalse False REMOTE_CEPH)
-REMOTE_CEPH_ADMIN_KEY_PATH=${REMOTE_CEPH_ADMIN_KEY_PATH:-$CEPH_CONF_DIR/ceph.client.admin.keyring}
-
-# Cinder encrypted volume tests are not supported with a Ceph backend due to
-# bug 1463525.
-ATTACH_ENCRYPTED_VOLUME_AVAILABLE=False
-
-
-# Functions
-# ------------
-
-function get_ceph_version {
-    local ceph_version_str
-    ceph_version_str=$(sudo ceph daemon mon.$(hostname) version | cut -d '"' -f 4 | cut -f 1,2 -d '.')
-    echo $ceph_version_str
-}
-
-# import_libvirt_secret_ceph() - Imports Cinder user key into libvirt
-# so it can connect to the Ceph cluster while attaching a Cinder block device
-function import_libvirt_secret_ceph {
-    cat > secret.xml <<EOF
-<secret ephemeral='no' private='no'>
-   <uuid>${CINDER_CEPH_UUID}</uuid>
-   <usage type='ceph'>
-     <name>client.${CINDER_CEPH_USER} secret</name>
-   </usage>
-</secret>
-EOF
-    sudo virsh secret-define --file secret.xml
-    sudo virsh secret-set-value --secret ${CINDER_CEPH_UUID} --base64 $(sudo ceph -c ${CEPH_CONF_FILE} auth get-key client.${CINDER_CEPH_USER})
-    sudo rm -f secret.xml
-}
-
-# undefine_virsh_secret() - Undefine Cinder key secret from libvirt
-function undefine_virsh_secret {
-    if is_service_enabled cinder || is_service_enabled nova; then
-        local virsh_uuid
-        virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
-        sudo virsh secret-undefine ${virsh_uuid} >/dev/null 2>&1
-    fi
-}
-
-
-# check_os_support_ceph() - Check if the operating system provides a decent version of Ceph
-function check_os_support_ceph {
-    if [[ ! ${DISTRO} =~ (trusty|f22|f23|f24) ]]; then
-        echo "WARNING: your distro $DISTRO does not provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 20 (and higher)"
-        if [[ "$FORCE_CEPH_INSTALL" != "yes" ]]; then
-            die $LINENO "If you wish to install Ceph on this distribution anyway run with FORCE_CEPH_INSTALL=yes"
-        fi
-        NO_UPDATE_REPOS=False
-    fi
-}
-
-# cleanup_ceph() - Remove residual data files, anything left over from previous
-# runs that a clean run would need to clean up
-function cleanup_ceph_remote {
-    # do a proper cleanup from here to avoid leftover on the remote Ceph cluster
-    if is_service_enabled glance; then
-        sudo ceph osd pool delete $GLANCE_CEPH_POOL $GLANCE_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
-        sudo ceph auth del client.$GLANCE_CEPH_USER > /dev/null 2>&1
-    fi
-    if is_service_enabled cinder; then
-        sudo ceph osd pool delete $CINDER_CEPH_POOL $CINDER_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
-        sudo ceph auth del client.$CINDER_CEPH_USER > /dev/null 2>&1
-    fi
-    if is_service_enabled c-bak; then
-        sudo ceph osd pool delete $CINDER_BAK_CEPH_POOL $CINDER_BAK_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
-        sudo ceph auth del client.$CINDER_BAK_CEPH_USER > /dev/null 2>&1
-    fi
-    if is_service_enabled nova; then
-        iniset $NOVA_CONF libvirt rbd_secret_uuid ""
-        sudo ceph osd pool delete $NOVA_CEPH_POOL $NOVA_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
-    fi
-}
-
-function cleanup_ceph_embedded {
-    sudo killall -w -9 ceph-mon
-    sudo killall -w -9 ceph-osd
-    sudo rm -rf ${CEPH_DATA_DIR}/*/*
-    if egrep -q ${CEPH_DATA_DIR} /proc/mounts; then
-        sudo umount ${CEPH_DATA_DIR}
-    fi
-    if [[ -e ${CEPH_DISK_IMAGE} ]]; then
-        sudo rm -f ${CEPH_DISK_IMAGE}
-    fi
-
-    # purge ceph config file and keys
-    sudo rm -rf ${CEPH_CONF_DIR}/*
-}
-
-function cleanup_ceph_general {
-    undefine_virsh_secret
-}
-
-
-# configure_ceph() - Set config files, create data dirs, etc
-function configure_ceph {
-    local count=0
-
-    # create a backing file disk
-    create_disk ${CEPH_DISK_IMAGE} ${CEPH_DATA_DIR} ${CEPH_LOOPBACK_DISK_SIZE}
-
-    # populate ceph directory
-    sudo mkdir -p ${CEPH_DATA_DIR}/{bootstrap-mds,bootstrap-osd,mds,mon,osd,tmp}
-
-    # create ceph monitor initial key and directory
-    sudo ceph-authtool /var/lib/ceph/tmp/keyring.mon.$(hostname) \
-        --create-keyring --name=mon. --add-key=$(ceph-authtool --gen-print-key) \
-        --cap mon 'allow *'
-    sudo mkdir /var/lib/ceph/mon/ceph-$(hostname)
-
-    # create a default ceph configuration file
-    sudo tee ${CEPH_CONF_FILE} > /dev/null <<EOF
-[global]
-fsid = ${CEPH_FSID}
-mon_initial_members = $(hostname)
-mon_host = ${SERVICE_HOST}
-auth_cluster_required = cephx
-auth_service_required = cephx
-auth_client_required = cephx
-filestore_xattr_use_omap = true
-osd crush chooseleaf type = 0
-osd journal size = 100
-EOF
-
-    # bootstrap the ceph monitor
-    sudo ceph-mon -c ${CEPH_CONF_FILE} --mkfs -i $(hostname) \
-        --keyring /var/lib/ceph/tmp/keyring.mon.$(hostname)
-
-    if is_ubuntu; then
-        sudo touch /var/lib/ceph/mon/ceph-$(hostname)/upstart
-        sudo initctl emit ceph-mon id=$(hostname)
-    else
-        sudo touch /var/lib/ceph/mon/ceph-$(hostname)/sysvinit
-        sudo service ceph start mon.$(hostname)
-    fi
-
-    # wait for the admin key to come up otherwise we will not be able to do the actions below
-    until [ -f ${CEPH_CONF_DIR}/ceph.client.admin.keyring ]; do
-        echo_summary "Waiting for the Ceph admin key to be ready..."
-
-        count=$(($count + 1))
-        if [ $count -eq 3 ]; then
-            die $LINENO "Maximum of 3 retries reached"
-        fi
-        sleep 5
-    done
-
-    # pools data and metadata were removed in the Giant release so depending on the version we apply different commands
-    local ceph_version
-    ceph_version=$(get_ceph_version)
-    # change pool replica size according to the CEPH_REPLICAS set by the user
-    if [[ ${ceph_version%%.*} -eq 0 ]] && [[ ${ceph_version##*.} -lt 87 ]]; then
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd size ${CEPH_REPLICAS}
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set data size ${CEPH_REPLICAS}
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set metadata size ${CEPH_REPLICAS}
-    else
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd size ${CEPH_REPLICAS}
-    fi
-
-    # create a simple rule to take OSDs instead of host with CRUSH
-    # then apply this rules to the default pool
-    if [[ $CEPH_REPLICAS -ne 1 ]]; then
-        sudo ceph -c ${CEPH_CONF_FILE} osd crush rule create-simple devstack default osd
-        RULE_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd crush rule dump devstack | awk '/rule_id/ {print $3}' | cut -d ',' -f1)
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd crush_ruleset ${RULE_ID}
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set data crush_ruleset ${RULE_ID}
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set metadata crush_ruleset ${RULE_ID}
-    fi
-
-    # create the OSD(s)
-    for rep in ${CEPH_REPLICAS_SEQ}; do
-        OSD_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd create)
-        sudo mkdir -p ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}
-        sudo ceph-osd -c ${CEPH_CONF_FILE} -i ${OSD_ID} --mkfs
-        sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create osd.${OSD_ID} \
-            mon 'allow profile osd ' osd 'allow *' | \
-            sudo tee ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/keyring
-
-        # ceph's init script is parsing ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/ and looking for a file
-        # 'upstart' or 'sysinitv', thanks to these 'touches' we are able to control OSDs daemons
-        # from the init script.
-        if is_ubuntu; then
-            sudo touch ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/upstart
-        else
-            sudo touch ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/sysvinit
-        fi
-    done
-}
-
-function configure_ceph_embedded_glance {
-    # configure Glance service options, ceph pool, ceph user and ceph key
-    sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} size ${CEPH_REPLICAS}
-    if [[ $CEPH_REPLICAS -ne 1 ]]; then
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} crush_ruleset ${RULE_ID}
-    fi
-}
-
-# configure_ceph_glance() - Glance config needs to come after Glance is set up
-function configure_ceph_glance {
-    sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP}
-    sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GLANCE_CEPH_USER} \
-        mon "allow r" \
-        osd "allow class-read object_prefix rbd_children, allow rwx pool=${GLANCE_CEPH_POOL}" | \
-        sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
-    sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
-
-    iniset $GLANCE_API_CONF DEFAULT show_image_direct_url True
-    iniset $GLANCE_API_CONF glance_store default_store rbd
-    iniset $GLANCE_API_CONF glance_store stores "file, http, rbd"
-    iniset $GLANCE_API_CONF glance_store rbd_store_ceph_conf $CEPH_CONF_FILE
-    iniset $GLANCE_API_CONF glance_store rbd_store_user $GLANCE_CEPH_USER
-    iniset $GLANCE_API_CONF glance_store rbd_store_pool $GLANCE_CEPH_POOL
-}
-
-function configure_ceph_embedded_nova {
-    # configure Nova service options, ceph pool, ceph user and ceph key
-    sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} size ${CEPH_REPLICAS}
-    if [[ $CEPH_REPLICAS -ne 1 ]]; then
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID}
-    fi
-}
-
-# configure_ceph_nova() - Nova config needs to come after Nova is set up
-function configure_ceph_nova {
-    sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP}
-    iniset $NOVA_CONF libvirt rbd_user ${CINDER_CEPH_USER}
-    iniset $NOVA_CONF libvirt rbd_secret_uuid ${CINDER_CEPH_UUID}
-    iniset $NOVA_CONF libvirt inject_key false
-    iniset $NOVA_CONF libvirt inject_partition -2
-    iniset $NOVA_CONF libvirt disk_cachemodes "network=writeback"
-    iniset $NOVA_CONF libvirt images_type rbd
-    iniset $NOVA_CONF libvirt images_rbd_pool ${NOVA_CEPH_POOL}
-    iniset $NOVA_CONF libvirt images_rbd_ceph_conf ${CEPH_CONF_FILE}
-
-    if ! is_service_enabled cinder; then
-        sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} \
-            mon "allow r" \
-            osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rwx pool=${GLANCE_CEPH_POOL}" | \
-            sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring > /dev/null
-        sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
-    fi
-}
-
-function configure_ceph_embedded_cinder {
-    # Configure Cinder service options, ceph pool, ceph user and ceph key
-    sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} size ${CEPH_REPLICAS}
-    if [[ $CEPH_REPLICAS -ne 1 ]]; then
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} crush_ruleset ${RULE_ID}
-    fi
-}
-
-# configure_ceph_cinder() - Cinder config needs to come after Cinder is set up
-function configure_ceph_cinder {
-    sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP}
-    sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} \
-        mon "allow r" \
-        osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rwx pool=${GLANCE_CEPH_POOL}" | \
-        sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
-    sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
-}
-
-# init_ceph() - Initialize databases, etc.
-function init_ceph {
-    # clean up from previous (possibly aborted) runs
-    # make sure to kill all ceph processes first
-    sudo pkill -f ceph-mon || true
-    sudo pkill -f ceph-osd || true
-}
-
-# install_ceph() - Collect source and prepare
-function install_ceph_remote {
-    install_package ceph-common
-}
-
-function install_ceph {
-    install_package ceph
-}
-
-# start_ceph() - Start running processes, including screen
-function start_ceph {
-    if is_ubuntu; then
-        sudo initctl emit ceph-mon id=$(hostname)
-        for id in $(sudo ceph -c ${CEPH_CONF_FILE} osd ls); do
-            sudo start ceph-osd id=${id}
-        done
-    else
-        sudo service ceph start
-    fi
-}
-
-# stop_ceph() - Stop running processes (non-screen)
-function stop_ceph {
-    if is_ubuntu; then
-        sudo service ceph-mon-all stop > /dev/null 2>&1
-        sudo service ceph-osd-all stop > /dev/null 2>&1
-    else
-        sudo service ceph stop > /dev/null 2>&1
-    fi
-}
-
-
-# Restore xtrace
-$_XTRACE_LIB_CEPH
-
-## Local variables:
-## mode: shell-script
-## End:
diff --git a/lib/cinder b/lib/cinder
index 0ebf195..9ff74e8 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -39,7 +39,6 @@
 
 # set up default directories
 GITDIR["python-cinderclient"]=$DEST/python-cinderclient
-GITDIR["os-brick"]=$DEST/os-brick
 GITDIR["python-brick-cinderclient-ext"]=$DEST/python-brick-cinderclient-ext
 CINDER_DIR=$DEST/cinder
 
@@ -71,6 +70,9 @@
 # What type of LVM device should Cinder use for LVM backend
 # Defaults to default, which is thick, the other valid choice
 # is thin, which as the name implies utilizes lvm thin provisioning.
+# Thinly provisioned LVM volumes may be more efficient when using the Cinder
+# image cache, but there are also known race failures with volume snapshots
+# and thinly provisioned LVM volumes, see bug 1642111 for details.
 CINDER_LVM_TYPE=${CINDER_LVM_TYPE:-default}
 
 # Default backends
@@ -129,6 +131,17 @@
 CINDER_NOVA_CATALOG_INFO=${CINDER_NOVA_CATALOG_INFO:-compute:nova:publicURL}
 CINDER_NOVA_CATALOG_ADMIN_INFO=${CINDER_NOVA_CATALOG_ADMIN_INFO:-compute:nova:adminURL}
 
+# Environment variables to configure the image-volume cache
+CINDER_IMG_CACHE_ENABLED=${CINDER_IMG_CACHE_ENABLED:-True}
+
+# For limits, if left unset, it will use cinder defaults of 0 for unlimited
+CINDER_IMG_CACHE_SIZE_GB=${CINDER_IMG_CACHE_SIZE_GB:-}
+CINDER_IMG_CACHE_SIZE_COUNT=${CINDER_IMG_CACHE_SIZE_COUNT:-}
+
+# Configure which cinder backends will have the image-volume cache, this takes the same
+# form as the CINDER_ENABLED_BACKENDS config option. By default it will
+# enable the cache for all cinder backends.
+CINDER_CACHE_ENABLED_FOR_BACKENDS=${CINDER_CACHE_ENABLED_FOR_BACKENDS:-$CINDER_ENABLED_BACKENDS}
 
 # Functions
 # ---------
@@ -274,8 +287,6 @@
 
     iniset $CINDER_CONF DEFAULT os_region_name "$REGION_NAME"
 
-    iniset $CINDER_CONF privsep_osbrick helper_command "sudo cinder-rootwrap \$rootwrap_config privsep-helper --config-file $CINDER_CONF"
-
     if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
         local enabled_backends=""
         local default_name=""
@@ -295,6 +306,7 @@
         if [[ -n "$default_name" ]]; then
             iniset $CINDER_CONF DEFAULT default_volume_type ${default_name}
         fi
+        configure_cinder_image_volume_cache
     fi
 
     if is_service_enabled swift; then
@@ -308,8 +320,8 @@
     if is_service_enabled tls-proxy; then
         # Set the service port for a proxy to take the original
         iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT
-
         iniset $CINDER_CONF DEFAULT public_endpoint $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT
+        iniset $CINDER_CONF DEFAULT osapi_volume_base_URL $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT
     fi
 
     if [ "$SYSLOG" != "False" ]; then
@@ -400,6 +412,8 @@
             "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s" \
             "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s" \
             "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s"
+
+        configure_cinder_internal_tenant
     fi
 }
 
@@ -445,13 +459,6 @@
 
 # install_cinder() - Collect source and prepare
 function install_cinder {
-    # Install os-brick from git so we make sure we're testing
-    # the latest code.
-    if use_library_from_git "os-brick"; then
-        git_clone_by_name "os-brick"
-        setup_dev_lib "os-brick"
-    fi
-
     git_clone $CINDER_REPO $CINDER_DIR $CINDER_BRANCH
     setup_develop $CINDER_DIR
     if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then
@@ -544,7 +551,7 @@
 
     # Start proxies if enabled
     if is_service_enabled c-api && is_service_enabled tls-proxy; then
-        start_tls_proxy '*' $CINDER_SERVICE_PORT $CINDER_SERVICE_HOST $CINDER_SERVICE_PORT_INT &
+        start_tls_proxy cinder '*' $CINDER_SERVICE_PORT $CINDER_SERVICE_HOST $CINDER_SERVICE_PORT_INT
     fi
 }
 
@@ -584,6 +591,31 @@
     :
 }
 
+function configure_cinder_internal_tenant {
+    # Re-use the Cinder service account for simplicity.
+    iniset $CINDER_CONF DEFAULT cinder_internal_tenant_project_id $(get_or_create_project $SERVICE_PROJECT_NAME)
+    iniset $CINDER_CONF DEFAULT cinder_internal_tenant_user_id $(get_or_create_user "cinder")
+}
+
+function configure_cinder_image_volume_cache {
+    # Expect CINDER_CACHE_ENABLED_FOR_BACKENDS to be a list of backends
+    # similar to CINDER_ENABLED_BACKENDS with NAME:TYPE where NAME will
+    # be the backend specific configuration stanza in cinder.conf.
+    for be in ${CINDER_CACHE_ENABLED_FOR_BACKENDS//,/ }; do
+        local be_name=${be##*:}
+
+        iniset $CINDER_CONF $be_name image_volume_cache_enabled $CINDER_IMG_CACHE_ENABLED
+
+        if [[ -n $CINDER_IMG_CACHE_SIZE_GB ]]; then
+            iniset $CINDER_CONF $be_name image_volume_cache_max_size_gb $CINDER_IMG_CACHE_SIZE_GB
+        fi
+
+        if [[ -n $CINDER_IMG_CACHE_SIZE_COUNT ]]; then
+            iniset $CINDER_CONF $be_name image_volume_cache_max_count $CINDER_IMG_CACHE_SIZE_COUNT
+        fi
+    done
+}
+
 
 # Restore xtrace
 $_XTRACE_CINDER
diff --git a/lib/cinder_backends/ceph b/lib/cinder_backends/ceph
index 9bff5be..ba86ccf 100644
--- a/lib/cinder_backends/ceph
+++ b/lib/cinder_backends/ceph
@@ -45,7 +45,7 @@
 
     iniset $CINDER_CONF $be_name volume_backend_name $be_name
     iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.rbd.RBDDriver"
-    iniset $CINDER_CONF $be_name rbd_ceph_conf "$CEPH_CONF"
+    iniset $CINDER_CONF $be_name rbd_ceph_conf "$CEPH_CONF_FILE"
     iniset $CINDER_CONF $be_name rbd_pool "$CINDER_CEPH_POOL"
     iniset $CINDER_CONF $be_name rbd_user "$CINDER_CEPH_USER"
     iniset $CINDER_CONF $be_name rbd_uuid "$CINDER_CEPH_UUID"
@@ -66,7 +66,7 @@
         sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
 
         iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph"
-        iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF"
+        iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF_FILE"
         iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL"
         iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER"
         iniset $CINDER_CONF DEFAULT backup_ceph_stripe_unit 0
diff --git a/lib/cinder_backends/xiv b/lib/cinder_backends/xiv
deleted file mode 100644
index e8b5da0..0000000
--- a/lib/cinder_backends/xiv
+++ /dev/null
@@ -1,86 +0,0 @@
-#!/bin/bash
-#
-# Copyright 2014 IBM Corp.
-# Copyright (c) 2014 OpenStack Foundation
-# All Rights Reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-#
-# Authors:
-#   Alon Marx <alonma@il.ibm.com>
-#
-
-# lib/cinder_plugins/xiv
-# Configure the xiv_ds8k driver for xiv testing
-
-# Enable xiv_ds8k driver for xiv with:
-#
-#   CINDER_ENABLED_BACKENDS+=,xiv:<volume-type-name>
-#   XIV_DRIVER_VERSION=<version-string>
-#   SAN_IP=<storage-ip-or-hostname>
-#   SAN_LOGIN=<storage-admin-account>
-#   SAN_PASSWORD=<storage-admin-password>
-#   SAN_CLUSTERNAME=<cluster-name>
-#   CONNECTION_TYPE=<connection-type> iscsi|fc
-#   XIV_CHAP=<chap-type> disabled|enabled
-
-# Dependencies:
-#
-# - ``functions`` file
-# - ``cinder`` configurations
-
-# configure_cinder_backend_xiv - Configure Cinder for xiv backends
-
-# Save trace setting
-_XTRACE_CINDER_XIV=$(set +o | grep xtrace)
-set +o xtrace
-
-# Defaults
-# --------
-# Set up default directories
-
-
-# Entry Points
-# ------------
-
-# configure_cinder_backend_xiv - Set config files, create data dirs, etc
-function configure_cinder_backend_xiv {
-
-    local be_name=$1
-
-    python -c 'from xiv_ds8k_openstack.xiv_nova_proxy import XIVNovaProxy'
-    if [ $? -ne 0 ]; then
-        die $LINENO "XIV_DS8K driver is missing. Please install first"
-    fi
-
-    # For reference:
-    # ``XIV_DS8K_BACKEND='IBM-XIV_'${SAN_IP}'_'${SAN_CLUSTERNAME}'_'${CONNECTION_TYPE}``
-    iniset $CINDER_CONF DEFAULT xiv_ds8k_driver_version $XIV_DRIVER_VERSION
-
-    iniset $CINDER_CONF $be_name san_ip $SAN_IP
-    iniset $CINDER_CONF $be_name san_login $SAN_LOGIN
-    iniset $CINDER_CONF $be_name san_password $SAN_PASSWORD
-    iniset $CINDER_CONF $be_name san_clustername $SAN_CLUSTERNAME
-    iniset $CINDER_CONF $be_name xiv_ds8k_connection_type $CONNECTION_TYPE
-    iniset $CINDER_CONF $be_name volume_backend_name $be_name
-    iniset $CINDER_CONF $be_name volume_driver 'cinder.volume.drivers.ibm.xiv_ds8k.XIVDS8KDriver'
-    iniset $CINDER_CONF $be_name xiv_ds8k_proxy 'xiv_ds8k_openstack.xiv_nova_proxy.XIVNovaProxy'
-    iniset $CINDER_CONF $be_name xiv_chap $XIV_CHAP
-}
-
-# Restore xtrace
-$_XTRACE_CINDER_XIV
-
-# Local variables:
-# mode: shell-script
-# End:
diff --git a/lib/glance b/lib/glance
index 8d95aad..5259174 100644
--- a/lib/glance
+++ b/lib/glance
@@ -187,8 +187,6 @@
 
         iniset $GLANCE_SWIFT_STORE_CONF ref1 key $SERVICE_PASSWORD
         iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $KEYSTONE_SERVICE_URI/v3
-        iniset $GLANCE_SWIFT_STORE_CONF ref1 user_domain_name $SERVICE_DOMAIN_NAME
-        iniset $GLANCE_SWIFT_STORE_CONF ref1 project_domain_name $SERVICE_DOMAIN_NAME
         iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_version 3
 
         # commenting is not strictly necessary but it's confusing to have bad values in conf
@@ -312,6 +310,11 @@
             "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" \
             "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" \
             "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT"
+
+        # Note(frickler): Crude workaround for https://bugs.launchpad.net/glance-store/+bug/1620999
+        service_domain_id=$(get_or_create_domain $SERVICE_DOMAIN_NAME)
+        iniset $GLANCE_SWIFT_STORE_CONF ref1 project_domain_id $service_domain_id
+        iniset $GLANCE_SWIFT_STORE_CONF ref1 user_domain_id $service_domain_id
     fi
 
     # Add glance-glare service and endpoints
@@ -383,8 +386,8 @@
 function start_glance {
     local service_protocol=$GLANCE_SERVICE_PROTOCOL
     if is_service_enabled tls-proxy; then
-        start_tls_proxy '*' $GLANCE_SERVICE_PORT $GLANCE_SERVICE_HOST $GLANCE_SERVICE_PORT_INT &
-        start_tls_proxy '*' $GLANCE_REGISTRY_PORT $GLANCE_SERVICE_HOST $GLANCE_REGISTRY_PORT_INT &
+        start_tls_proxy glance-service '*' $GLANCE_SERVICE_PORT $GLANCE_SERVICE_HOST $GLANCE_SERVICE_PORT_INT
+        start_tls_proxy glance-registry '*' $GLANCE_REGISTRY_PORT $GLANCE_SERVICE_HOST $GLANCE_REGISTRY_PORT_INT
     fi
 
     run_process g-reg "$GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf"
diff --git a/lib/heat b/lib/heat
index c841e0a..0863128 100644
--- a/lib/heat
+++ b/lib/heat
@@ -40,7 +40,6 @@
 HEAT_CFNTOOLS_DIR=$DEST/heat-cfntools
 HEAT_TEMPLATES_REPO_DIR=$DEST/heat-templates
 OCC_DIR=$DEST/os-collect-config
-DIB_UTILS_DIR=$DEST/dib-utils
 ORC_DIR=$DEST/os-refresh-config
 OAC_DIR=$DEST/os-apply-config
 
@@ -276,7 +275,6 @@
     git_clone $OAC_REPO $OAC_DIR $OAC_BRANCH
     git_clone $OCC_REPO $OCC_DIR $OCC_BRANCH
     git_clone $ORC_REPO $ORC_DIR $ORC_BRANCH
-    git_clone $DIB_UTILS_REPO $DIB_UTILS_DIR $DIB_UTILS_BRANCH
 }
 
 # start_heat() - Start running processes, including screen
@@ -420,7 +418,7 @@
 
 # build_heat_pip_mirror() - Build a pip mirror containing heat agent projects
 function build_heat_pip_mirror {
-    local project_dirs="$OCC_DIR $OAC_DIR $ORC_DIR $HEAT_CFNTOOLS_DIR $DIB_UTILS_DIR"
+    local project_dirs="$OCC_DIR $OAC_DIR $ORC_DIR $HEAT_CFNTOOLS_DIR"
     local projpath proj package
 
     rm -rf $HEAT_PIP_REPO
diff --git a/lib/horizon b/lib/horizon
index 0517e32..c0faed7 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -69,9 +69,8 @@
 # cleanup_horizon() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
 function cleanup_horizon {
-    local horizon_conf
-    horizon_conf=$(apache_site_config_for horizon)
-    sudo rm -f $horizon_conf
+    disable_apache_site horizon
+    sudo rm -f $(apache_site_config_for horizon)
 }
 
 # configure_horizon() - Set config files, create data dirs, etc
@@ -82,7 +81,7 @@
     # Horizon is installed as develop mode, so we can compile here.
     # Message catalog compilation is handled by Django admin script,
     # so compiling them after the installation avoids Django installation twice.
-    (cd $HORIZON_DIR; ./run_tests.sh -N --compilemessages)
+    (cd $HORIZON_DIR; python manage.py compilemessages)
 
     # ``local_settings.py`` is used to override horizon default settings.
     local local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py
@@ -98,6 +97,11 @@
     _horizon_config_set $local_settings "" OPENSTACK_API_VERSIONS {\"identity\":3}
     _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_URI}/v3\""
 
+    # note(trebskit): if HOST_IP points at non-localhost ip address, horizon cannot be accessed
+    # from outside the virtual machine. This fixes is meant primarily for local development
+    # purpose
+    _horizon_config_set $local_settings "" ALLOWED_HOSTS [\"*\"]
+
     if [ -f $SSL_BUNDLE_FILE ]; then
         _horizon_config_set $local_settings "" OPENSTACK_SSL_CACERT \"${SSL_BUNDLE_FILE}\"
     fi
diff --git a/lib/keystone b/lib/keystone
index 6198e43..948d5b4 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -25,7 +25,6 @@
 # - create_keystone_accounts
 # - stop_keystone
 # - cleanup_keystone
-# - _cleanup_keystone_apache_wsgi
 
 # Save trace setting
 _XTRACE_KEYSTONE=$(set +o | grep xtrace)
@@ -52,9 +51,6 @@
 KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf
 KEYSTONE_PASTE_INI=${KEYSTONE_PASTE_INI:-$KEYSTONE_CONF_DIR/keystone-paste.ini}
 
-# NOTE(sdague): remove in Newton
-KEYSTONE_CATALOG_BACKEND="sql"
-
 # Toggle for deploying Keystone under HTTPD + mod_wsgi
 # Deprecated in Mitaka, use KEYSTONE_DEPLOY instead.
 KEYSTONE_USE_MOD_WSGI=${KEYSTONE_USE_MOD_WSGI:-${ENABLE_HTTPD_MOD_WSGI_SERVICES}}
@@ -89,7 +85,7 @@
 
 # Select Keystone's token provider (and format)
 # Choose from 'uuid', 'pki', 'pkiz', or 'fernet'
-KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-}
+KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-fernet}
 KEYSTONE_TOKEN_FORMAT=$(echo ${KEYSTONE_TOKEN_FORMAT} | tr '[:upper:]' '[:lower:]')
 
 # Set Keystone interface configuration
@@ -124,7 +120,7 @@
 # complete URIs
 if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then
     # If running in Apache, use path access rather than port.
-    KEYSTONE_AUTH_URI=${KEYSTONE_AUTH_PROTOCOL}://${KEYSTONE_AUTH_HOST}/identity_v2_admin
+    KEYSTONE_AUTH_URI=${KEYSTONE_AUTH_PROTOCOL}://${KEYSTONE_AUTH_HOST}/identity_admin
     KEYSTONE_SERVICE_URI=${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}/identity
 else
     KEYSTONE_AUTH_URI=${KEYSTONE_AUTH_PROTOCOL}://${KEYSTONE_AUTH_HOST}:${KEYSTONE_AUTH_PORT}
@@ -149,11 +145,7 @@
 # cleanup_keystone() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
 function cleanup_keystone {
-    _cleanup_keystone_apache_wsgi
-}
-
-# _cleanup_keystone_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file
-function _cleanup_keystone_apache_wsgi {
+    disable_apache_site keystone
     sudo rm -f $(apache_site_config_for keystone)
 }
 
@@ -226,13 +218,6 @@
         iniset $KEYSTONE_CONF ldap password $LDAP_PASSWORD
         iniset $KEYSTONE_CONF ldap user $LDAP_MANAGER_DN
         iniset $KEYSTONE_CONF ldap suffix $LDAP_BASE_DN
-        iniset $KEYSTONE_CONF ldap use_dumb_member "True"
-        iniset $KEYSTONE_CONF ldap user_attribute_ignore "enabled,email,tenants,default_project_id"
-        iniset $KEYSTONE_CONF ldap tenant_attribute_ignore "enabled"
-        iniset $KEYSTONE_CONF ldap tenant_domain_id_attribute "businessCategory"
-        iniset $KEYSTONE_CONF ldap tenant_desc_attribute "description"
-        iniset $KEYSTONE_CONF ldap tenant_tree_dn "ou=Projects,$LDAP_BASE_DN"
-        iniset $KEYSTONE_CONF ldap user_domain_id_attribute "businessCategory"
         iniset $KEYSTONE_CONF ldap user_tree_dn "ou=Users,$LDAP_BASE_DN"
         iniset $KEYSTONE_CONF DEFAULT member_role_id "9fe2ff9ee4384b1894a90878d3e92bab"
         iniset $KEYSTONE_CONF DEFAULT member_role_name "_member_"
@@ -245,12 +230,9 @@
 
     # Enable caching
     iniset $KEYSTONE_CONF cache enabled "True"
-    iniset $KEYSTONE_CONF cache backend "oslo_cache.memcache_pool"
+    iniset $KEYSTONE_CONF cache backend "dogpile.cache.memcached"
     iniset $KEYSTONE_CONF cache memcache_servers localhost:11211
 
-    # Do not cache the catalog backend due to https://bugs.launchpad.net/keystone/+bug/1537617
-    iniset $KEYSTONE_CONF catalog caching "False"
-
     iniset_rpc_backend keystone $KEYSTONE_CONF
 
     # Register SSL certificates if provided
@@ -338,6 +320,8 @@
             iniset "$file" uwsgi buffer-size 65535
             # Make sure the client doesn't try to re-use the connection.
             iniset "$file" uwsgi add-header "Connection: close"
+            # This ensures that file descriptors aren't shared between processes.
+            iniset "$file" uwsgi lazy-apps true
         done
     fi
 
@@ -345,11 +329,13 @@
 
     iniset $KEYSTONE_CONF fernet_tokens key_repository "$KEYSTONE_CONF_DIR/fernet-keys/"
 
+    iniset $KEYSTONE_CONF credential key_repository "$KEYSTONE_CONF_DIR/credential-keys/"
+
     # Configure the project created by the 'keystone-manage bootstrap' as the cloud-admin project.
     # The users from this project are globally admin as before, but it also
     # allows policy changes in order to clarify the adminess scope.
-    iniset $KEYSTONE_CONF resource admin_project_domain_name Default
-    iniset $KEYSTONE_CONF resource admin_project_name admin
+    #iniset $KEYSTONE_CONF resource admin_project_domain_name Default
+    #iniset $KEYSTONE_CONF resource admin_project_name admin
 }
 
 # create_keystone_accounts() - Sets up common required keystone accounts
@@ -514,6 +500,9 @@
         rm -rf "$KEYSTONE_CONF_DIR/fernet-keys/"
         $KEYSTONE_BIN_DIR/keystone-manage --config-file $KEYSTONE_CONF fernet_setup
     fi
+    rm -rf "$KEYSTONE_CONF_DIR/credential-keys/"
+    $KEYSTONE_BIN_DIR/keystone-manage --config-file $KEYSTONE_CONF credential_setup
+
 }
 
 # install_keystoneauth() - Collect source and prepare
@@ -609,8 +598,8 @@
 
     # Start proxies if enabled
     if is_service_enabled tls-proxy; then
-        start_tls_proxy '*' $KEYSTONE_SERVICE_PORT $KEYSTONE_SERVICE_HOST $KEYSTONE_SERVICE_PORT_INT &
-        start_tls_proxy '*' $KEYSTONE_AUTH_PORT $KEYSTONE_AUTH_HOST $KEYSTONE_AUTH_PORT_INT &
+        start_tls_proxy keystone-service '*' $KEYSTONE_SERVICE_PORT $KEYSTONE_SERVICE_HOST $KEYSTONE_SERVICE_PORT_INT
+        start_tls_proxy keystone-auth '*' $KEYSTONE_AUTH_PORT $KEYSTONE_AUTH_HOST $KEYSTONE_AUTH_PORT_INT
     fi
 
     # (re)start memcached to make sure we have a clean memcache.
diff --git a/lib/lvm b/lib/lvm
index b9d7c39..99c7ba9 100644
--- a/lib/lvm
+++ b/lib/lvm
@@ -23,11 +23,7 @@
 # Defaults
 # --------
 # Name of the lvm volume groups to use/create for iscsi volumes
-# This monkey-motion is for compatibility with icehouse-generation Grenade
-# If ``VOLUME_GROUP`` is set, use it, otherwise we'll build a VG name based
-# on ``VOLUME_GROUP_NAME`` that includes the backend name
-# Grenade doesn't use ``VOLUME_GROUP2`` so it is left out
-VOLUME_GROUP_NAME=${VOLUME_GROUP:-${VOLUME_GROUP_NAME:-stack-volumes}}
+VOLUME_GROUP_NAME=${VOLUME_GROUP_NAME:-stack-volumes}
 DEFAULT_VOLUME_GROUP_NAME=$VOLUME_GROUP_NAME-default
 
 # Backing file name is of the form $VOLUME_GROUP$BACKING_FILE_SUFFIX
@@ -58,7 +54,9 @@
     if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then
         local vg_dev
         vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/'$BACKING_FILE_SUFFIX'/ { print $1}')
-        sudo losetup -d $vg_dev
+        if [[ -n "$vg_dev" ]]; then
+            sudo losetup -d $vg_dev
+        fi
         rm -f $backing_file
     fi
 }
diff --git a/lib/neutron b/lib/neutron
index ad68d8e..d30e185 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -47,10 +47,10 @@
 NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron}
 
 # By default, use the ML2 plugin
-NEUTRON_PLUGIN=${NEUTRON_PLUGIN:-ml2}
-NEUTRON_PLUGIN_CONF_FILENAME=${NEUTRON_PLUGIN_CONF_FILENAME:-ml2_conf.ini}
-NEUTRON_PLUGIN_CONF_PATH=$NEUTRON_CONF_DIR/plugins/$NEUTRON_PLUGIN
-NEUTRON_PLUGIN_CONF=$NEUTRON_PLUGIN_CONF_PATH/$NEUTRON_PLUGIN_CONF_FILENAME
+NEUTRON_CORE_PLUGIN=${NEUTRON_CORE_PLUGIN:-ml2}
+NEUTRON_CORE_PLUGIN_CONF_FILENAME=${NEUTRON_CORE_PLUGIN_CONF_FILENAME:-ml2_conf.ini}
+NEUTRON_CORE_PLUGIN_CONF_PATH=$NEUTRON_CONF_DIR/plugins/$NEUTRON_CORE_PLUGIN
+NEUTRON_CORE_PLUGIN_CONF=$NEUTRON_CORE_PLUGIN_CONF_PATH/$NEUTRON_CORE_PLUGIN_CONF_FILENAME
 
 NEUTRON_AGENT_BINARY=${NEUTRON_AGENT_BINARY:-neutron-$NEUTRON_AGENT-agent}
 NEUTRON_L3_BINARY=${NEUTRON_L3_BINARY:-neutron-l3-agent}
@@ -117,16 +117,16 @@
 
     configure_neutron_rootwrap
 
-    mkdir -p $NEUTRON_PLUGIN_CONF_PATH
+    mkdir -p $NEUTRON_CORE_PLUGIN_CONF_PATH
 
-    cp $NEUTRON_DIR/etc/neutron/plugins/$NEUTRON_PLUGIN/$NEUTRON_PLUGIN_CONF_FILENAME.sample $NEUTRON_PLUGIN_CONF
+    cp $NEUTRON_DIR/etc/neutron/plugins/$NEUTRON_CORE_PLUGIN/$NEUTRON_CORE_PLUGIN_CONF_FILENAME.sample $NEUTRON_CORE_PLUGIN_CONF
 
     iniset $NEUTRON_CONF database connection `database_connection_url neutron`
     iniset $NEUTRON_CONF DEFAULT state_path $NEUTRON_STATE_PATH
     iniset $NEUTRON_CONF oslo_concurrency lock_path $NEUTRON_STATE_PATH/lock
     iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG
 
-    iniset $NEUTRON_CONF DEFAULT debug True
+    iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
 
     iniset_rpc_backend neutron $NEUTRON_CONF
 
@@ -139,7 +139,7 @@
 
         cp $NEUTRON_DIR/etc/api-paste.ini $NEUTRON_CONF_DIR/api-paste.ini
 
-        iniset $NEUTRON_CONF DEFAULT core_plugin ml2
+        iniset $NEUTRON_CONF DEFAULT core_plugin $NEUTRON_CORE_PLUGIN
 
         iniset $NEUTRON_CONF DEFAULT policy_file $policy_file
         iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips True
@@ -147,10 +147,6 @@
         iniset $NEUTRON_CONF DEFAULT auth_strategy $NEUTRON_AUTH_STRATEGY
         configure_auth_token_middleware $NEUTRON_CONF neutron $NEUTRON_AUTH_CACHE_DIR keystone_authtoken
 
-        # Configuration for neutron notifations to nova.
-        iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES
-        iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES
-
         iniset $NEUTRON_CONF nova auth_type password
         iniset $NEUTRON_CONF nova auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v3"
         iniset $NEUTRON_CONF nova username nova
@@ -162,32 +158,40 @@
 
         # Configure VXLAN
         # TODO(sc68cal) not hardcode?
-        iniset $NEUTRON_PLUGIN_CONF ml2 tenant_network_types vxlan
-        iniset $NEUTRON_PLUGIN_CONF ml2 type_drivers vxlan
-        iniset $NEUTRON_PLUGIN_CONF ml2 mechanism_drivers openvswitch,linuxbridge
-        iniset $NEUTRON_PLUGIN_CONF ml2_type_vxlan vni_ranges 1001:2000
+        iniset $NEUTRON_CORE_PLUGIN_CONF ml2 tenant_network_types vxlan
+        iniset $NEUTRON_CORE_PLUGIN_CONF ml2 type_drivers vxlan
+        iniset $NEUTRON_CORE_PLUGIN_CONF ml2 mechanism_drivers openvswitch,linuxbridge
+        iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vxlan vni_ranges 1001:2000
+        if [[ "$NEUTRON_PORT_SECURITY" = "True" ]]; then
+            iniset $NEUTRON_CORE_PLUGIN_CONF ml2 extension_drivers port_security
+        fi
     fi
 
     # Neutron OVS or LB agent
     if is_service_enabled neutron-agent; then
-        iniset $NEUTRON_PLUGIN_CONF agent tunnel_types vxlan
-        iniset $NEUTRON_PLUGIN_CONF DEFAULT debug True
+        iniset $NEUTRON_CORE_PLUGIN_CONF agent tunnel_types vxlan
+        iniset $NEUTRON_CORE_PLUGIN_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
 
         # Configure the neutron agent
         if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then
-            iniset $NEUTRON_PLUGIN_CONF securitygroup iptables
-            iniset $NEUTRON_PLUGIN_CONF vxlan local_ip $HOST_IP
+            iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup iptables
+            iniset $NEUTRON_CORE_PLUGIN_CONF vxlan local_ip $HOST_IP
         else
-            iniset $NEUTRON_PLUGIN_CONF securitygroup iptables_hybrid
-            iniset $NEUTRON_PLUGIN_CONF ovs local_ip $HOST_IP
+            iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup iptables_hybrid
+            iniset $NEUTRON_CORE_PLUGIN_CONF ovs local_ip $HOST_IP
         fi
+
+        enable_kernel_bridge_firewall
     fi
 
     # DHCP Agent
     if is_service_enabled neutron-dhcp; then
         cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $NEUTRON_DHCP_CONF
 
-        iniset $NEUTRON_DHCP_CONF DEFAULT debug True
+        iniset $NEUTRON_DHCP_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+        # make it so we have working DNS from guests
+        iniset $NEUTRON_DHCP_CONF DEFAULT dnsmasq_local_resolv True
+
         iniset $NEUTRON_DHCP_CONF agent root_helper_daemon "$NEUTRON_ROOTWRAP_DAEMON_CMD"
         iniset $NEUTRON_DHCP_CONF DEFAULT interface_driver $NEUTRON_AGENT
         neutron_plugin_configure_dhcp_agent $NEUTRON_DHCP_CONF
@@ -196,9 +200,9 @@
     if is_service_enabled neutron-l3; then
         cp $NEUTRON_DIR/etc/l3_agent.ini.sample $NEUTRON_L3_CONF
         iniset $NEUTRON_L3_CONF DEFAULT interface_driver $NEUTRON_AGENT
-        iniset $NEUTRON_CONF DEFAULT service_plugins router
+        neutron_service_plugin_class_add router
         iniset $NEUTRON_L3_CONF agent root_helper_daemon "$NEUTRON_ROOTWRAP_DAEMON_CMD"
-        iniset $NEUTRON_L3_CONF DEFAULT debug True
+        iniset $NEUTRON_L3_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
         neutron_plugin_configure_l3_agent $NEUTRON_L3_CONF
     fi
 
@@ -206,7 +210,7 @@
     if is_service_enabled neutron-metadata-agent; then
         cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $NEUTRON_META_CONF
 
-        iniset $NEUTRON_META_CONF DEFAULT debug True
+        iniset $NEUTRON_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
         iniset $NEUTRON_META_CONF DEFAULT nova_metadata_ip $SERVICE_HOST
         iniset $NEUTRON_META_CONF agent root_helper_daemon "$NEUTRON_ROOTWRAP_DAEMON_CMD"
 
@@ -245,8 +249,8 @@
         source $TOP_DIR/lib/neutron_plugins/services/metering
         neutron_agent_metering_configure_common
         neutron_agent_metering_configure_agent
+        neutron_service_plugin_class_add metering
     fi
-
 }
 
 # configure_neutron_rootwrap() - configure Neutron's rootwrap
@@ -295,6 +299,9 @@
 
     iniset $NOVA_CONF DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
 
+    # optionally set options in nova_conf
+    neutron_plugin_create_nova_conf
+
     if is_service_enabled neutron-metadata-agent; then
         iniset $NOVA_CONF neutron service_metadata_proxy "True"
     fi
@@ -384,7 +391,7 @@
 
     # Start the Neutron service
     # TODO(sc68cal) Stop hard coding this
-    run_process neutron-api "$NEUTRON_BIN_DIR/neutron-server --config-file $NEUTRON_CONF --config-file $NEUTRON_PLUGIN_CONF"
+    run_process neutron-api "$NEUTRON_BIN_DIR/neutron-server --config-file $NEUTRON_CONF --config-file $NEUTRON_CORE_PLUGIN_CONF"
 
     if is_ssl_enabled_service "neutron"; then
         ssl_ca="--ca-certificate=${SSL_BUNDLE_FILE}"
@@ -399,7 +406,7 @@
 
     # Start proxy if enabled
     if is_service_enabled tls-proxy; then
-        start_tls_proxy '*' $NEUTRON_SERVICE_PORT $NEUTRON_SERVICE_HOST $NEUTRON_SERVICE_PORT_INT &
+        start_tls_proxy neutron '*' $NEUTRON_SERVICE_PORT $NEUTRON_SERVICE_HOST $NEUTRON_SERVICE_PORT_INT
     fi
 }
 
@@ -419,6 +426,8 @@
     fi
     if is_service_enabled neutron-l3; then
         run_process neutron-l3 "$NEUTRON_BIN_DIR/$NEUTRON_L3_BINARY $NEUTRON_CONFIG_ARG"
+    fi
+    if is_service_enabled neutron-api; then
         # XXX(sc68cal) - Here's where plugins can wire up their own networks instead
         # of the code in lib/neutron_plugins/services/l3
         if type -p neutron_plugin_create_initial_networks > /dev/null; then
@@ -462,9 +471,9 @@
 
     NEUTRON_CONFIG_ARG+=" --config-file $NEUTRON_CONF"
 
-    #TODO(sc68cal) OVS and LB agent uses settings in NEUTRON_PLUGIN_CONF (ml2_conf.ini) but others may not
+    #TODO(sc68cal) OVS and LB agent uses settings in NEUTRON_CORE_PLUGIN_CONF (ml2_conf.ini) but others may not
     if is_service_enabled neutron-agent; then
-        NEUTRON_CONFIG_ARG+=" --config-file $NEUTRON_PLUGIN_CONF"
+        NEUTRON_CONFIG_ARG+=" --config-file $NEUTRON_CORE_PLUGIN_CONF"
     fi
 
     if is_service_enabled neutron-dhcp; then
@@ -481,6 +490,16 @@
 
 }
 
+# neutron_service_plugin_class_add() - add service plugin class
+function neutron_service_plugin_class_add_new {
+    local service_plugin_class=$1
+    local plugins=""
+
+    plugins=$(iniget $NEUTRON_CONF DEFAULT service_plugins)
+    plugins+=",${service_plugin_class}"
+    iniset $NEUTRON_CONF DEFAULT service_plugins $plugins
+}
+
 # Dispatch functions
 # These are needed for compatibility between the old and new implementations
 # where there are function name overlaps.  These will be removed when
@@ -540,6 +559,15 @@
     fi
 }
 
+function neutron_service_plugin_class_add {
+    if is_neutron_legacy_enabled; then
+        # Call back to old function
+        _neutron_service_plugin_class_add "$@"
+    else
+        neutron_service_plugin_class_add_new "$@"
+    fi
+}
+
 function start_neutron {
     if is_neutron_legacy_enabled; then
         # Call back to old function
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index dca2e98..613e0f1 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -24,11 +24,9 @@
 # - check_neutron_third_party_integration
 # - start_neutron_agents
 # - create_neutron_initial_network
-# - setup_neutron_debug
 #
 # ``unstack.sh`` calls the entry points in this order:
 #
-# - teardown_neutron_debug
 # - stop_neutron
 # - stop_neutron_third_party
 # - cleanup_neutron
@@ -74,7 +72,6 @@
 
 NEUTRON_DIR=$DEST/neutron
 NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas
-NEUTRON_LBAAS_DIR=$DEST/neutron-lbaas
 NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron}
 
 # Support entry points installation of console scripts
@@ -88,9 +85,6 @@
 NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf
 export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"}
 
-# Default provider for load balancer service
-DEFAULT_LB_PROVIDER=LOADBALANCER:Haproxy:neutron_lbaas.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
-
 # Agent binaries.  Note, binary paths for other agents are set in per-service
 # scripts in lib/neutron_plugins/services/
 AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent"
@@ -101,7 +95,6 @@
 # loaded from per-plugin  scripts in lib/neutron_plugins/
 Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini
 Q_L3_CONF_FILE=$NEUTRON_CONF_DIR/l3_agent.ini
-Q_FWAAS_CONF_FILE=$NEUTRON_CONF_DIR/fwaas_driver.ini
 Q_META_CONF_FILE=$NEUTRON_CONF_DIR/metadata_agent.ini
 
 # Default name for Neutron database
@@ -130,8 +123,6 @@
 Q_META_DATA_IP=${Q_META_DATA_IP:-$SERVICE_HOST}
 # Allow Overlapping IP among subnets
 Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True}
-# The name of the default q-l3 router
-Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1}
 Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True}
 Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True}
 VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True}
@@ -204,7 +195,7 @@
 # agent, as described below.
 #
 # Example: ``PHYSICAL_NETWORK=default``
-PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-}
+PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-public}
 
 # With the openvswitch agent, if using VLANs for tenant networks,
 # or if using flat or VLAN provider networks, set in ``localrc`` to
@@ -214,15 +205,17 @@
 # port for external connectivity.
 #
 # Example: ``OVS_PHYSICAL_BRIDGE=br-eth1``
-OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-}
+OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-br-ex}
 
+default_route_dev=$(ip route | grep ^default | awk '{print $5}')
+die_if_not_set $LINENO default_route_dev "Failure retrieving default route device"
 # With the linuxbridge agent, if using VLANs for tenant networks,
 # or if using flat or VLAN provider networks, set in ``localrc`` to
 # the name of the network interface to use for the physical
 # network.
 #
 # Example: ``LB_PHYSICAL_INTERFACE=eth1``
-LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-}
+LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-$default_route_dev}
 
 # When Neutron tunnels are enabled it is needed to specify the
 # IP address of the end point in the local server. This IP is set
@@ -253,25 +246,14 @@
     source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN
 fi
 
-# Agent loadbalancer service plugin functions
-# -------------------------------------------
-
-# Hardcoding for 1 service plugin for now
-source $TOP_DIR/lib/neutron_plugins/services/loadbalancer
-
 # Agent metering service plugin functions
 # -------------------------------------------
 
 # Hardcoding for 1 service plugin for now
 source $TOP_DIR/lib/neutron_plugins/services/metering
 
-# Firewall Service Plugin functions
-# ---------------------------------
-source $TOP_DIR/lib/neutron_plugins/services/firewall
-
 # L3 Service functions
 source $TOP_DIR/lib/neutron_plugins/services/l3
-
 # Use security group or not
 if has_neutron_plugin_security_group; then
     Q_USE_SECGROUP=${Q_USE_SECGROUP:-True}
@@ -298,9 +280,6 @@
 
 function _determine_config_l3 {
     local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE"
-    if is_service_enabled q-fwaas; then
-        opts+=" --config-file $Q_FWAAS_CONF_FILE"
-    fi
     echo "$opts"
 }
 
@@ -325,17 +304,9 @@
     iniset_rpc_backend neutron $NEUTRON_CONF
 
     # goes before q-svc to init Q_SERVICE_PLUGIN_CLASSES
-    if is_service_enabled q-lbaas; then
-        deprecated "Configuring q-lbaas through devstack is deprecated"
-        _configure_neutron_lbaas
-    fi
     if is_service_enabled q-metering; then
         _configure_neutron_metering
     fi
-    if is_service_enabled q-fwaas; then
-        deprecated "Configuring q-fwaas through devstack is deprecated"
-        _configure_neutron_fwaas
-    fi
     if is_service_enabled q-agt q-svc; then
         _configure_neutron_service
     fi
@@ -431,14 +402,6 @@
 
     git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH
     setup_develop $NEUTRON_DIR
-    if is_service_enabled q-fwaas; then
-        git_clone $NEUTRON_FWAAS_REPO $NEUTRON_FWAAS_DIR $NEUTRON_FWAAS_BRANCH
-        setup_develop $NEUTRON_FWAAS_DIR
-    fi
-    if is_service_enabled q-lbaas; then
-        git_clone $NEUTRON_LBAAS_REPO $NEUTRON_LBAAS_DIR $NEUTRON_LBAAS_BRANCH
-        setup_develop $NEUTRON_LBAAS_DIR
-    fi
 
     if [ "$VIRT_DRIVER" == 'xenserver' ]; then
         local dom0_ip
@@ -469,10 +432,6 @@
     if is_service_enabled q-agt q-dhcp q-l3; then
         neutron_plugin_install_agent_packages
     fi
-
-    if is_service_enabled q-lbaas; then
-        neutron_agent_lbaas_install_agent_packages
-    fi
 }
 
 # Start running processes, including screen
@@ -499,7 +458,7 @@
 
     # Start proxy if enabled
     if is_service_enabled tls-proxy; then
-        start_tls_proxy '*' $Q_PORT $Q_HOST $Q_PORT_INT &
+        start_tls_proxy neutron '*' $Q_PORT $Q_HOST $Q_PORT_INT
     fi
 }
 
@@ -533,7 +492,6 @@
     fi
 
     run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file $Q_META_CONF_FILE"
-    run_process q-lbaas "$AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file $LBAAS_AGENT_CONF_FILENAME"
     run_process q-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME"
 
     if [ "$VIRT_DRIVER" = 'xenserver' ]; then
@@ -573,12 +531,6 @@
         stop_process q-meta
     fi
 
-    if is_service_enabled q-lbaas; then
-        neutron_lbaas_stop
-    fi
-    if is_service_enabled q-fwaas; then
-        neutron_fwaas_stop
-    fi
     if is_service_enabled q-metering; then
         neutron_metering_stop
     fi
@@ -610,7 +562,7 @@
         # on configure we will also add $from_intf as a port on $to_intf,
         # assuming it is an OVS bridge.
 
-        local IP_ADD=""
+        local IP_REPLACE=""
         local IP_DEL=""
         local IP_UP=""
         local DEFAULT_ROUTE_GW
@@ -635,7 +587,7 @@
 
         if [[ "$IP_BRD" != "" ]]; then
             IP_DEL="sudo ip addr del $IP_BRD dev $from_intf"
-            IP_ADD="sudo ip addr add $IP_BRD dev $to_intf"
+            IP_REPLACE="sudo ip addr replace $IP_BRD dev $to_intf"
             IP_UP="sudo ip link set $to_intf up"
             if [[ "$af" == "inet" ]]; then
                 IP=$(echo $IP_BRD | awk '{ print $1; exit }' | grep -o -E '(.*)/' | cut -d "/" -f1)
@@ -645,7 +597,7 @@
 
         # The add/del OVS port calls have to happen either before or
         # after the address is moved in order to not leave it orphaned.
-        $DEL_OVS_PORT; $IP_DEL; $IP_ADD; $IP_UP; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE; $ARP_CMD
+        $DEL_OVS_PORT; $IP_DEL; $IP_REPLACE; $IP_UP; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE; $ARP_CMD
     fi
 }
 
@@ -682,7 +634,7 @@
     fi
 
     # delete all namespaces created by neutron
-    for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|qlbaas|fip|snat)-[0-9a-f-]*'); do
+    for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|fip|snat)-[0-9a-f-]*'); do
         sudo ip netns delete ${ns}
     done
 }
@@ -799,6 +751,8 @@
     cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $Q_DHCP_CONF_FILE
 
     iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+    # make it so we have working DNS from guests
+    iniset $Q_DHCP_CONF_FILE DEFAULT dnsmasq_local_resolv True
     iniset $Q_DHCP_CONF_FILE AGENT root_helper "$Q_RR_COMMAND"
     if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
         iniset $Q_DHCP_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND"
@@ -817,7 +771,7 @@
 
     _neutron_setup_interface_driver $Q_DHCP_CONF_FILE
 
-    neutron_plugin_configure_dhcp_agent
+    neutron_plugin_configure_dhcp_agent $Q_DHCP_CONF_FILE
 }
 
 
@@ -836,31 +790,11 @@
     iniset $NEUTRON_CONF oslo_messaging_notifications driver messaging
 }
 
-function _configure_neutron_lbaas {
-    # Uses oslo config generator to generate LBaaS sample configuration files
-    (cd $NEUTRON_LBAAS_DIR && exec ./tools/generate_config_file_samples.sh)
-
-    if [ -f $NEUTRON_LBAAS_DIR/etc/neutron_lbaas.conf.sample ]; then
-        cp $NEUTRON_LBAAS_DIR/etc/neutron_lbaas.conf.sample $NEUTRON_CONF_DIR/neutron_lbaas.conf
-        iniset $NEUTRON_CONF_DIR/neutron_lbaas.conf service_providers service_provider $DEFAULT_LB_PROVIDER
-    fi
-    neutron_agent_lbaas_configure_common
-    neutron_agent_lbaas_configure_agent
-}
-
 function _configure_neutron_metering {
     neutron_agent_metering_configure_common
     neutron_agent_metering_configure_agent
 }
 
-function _configure_neutron_fwaas {
-    if [ -f $NEUTRON_FWAAS_DIR/etc/neutron_fwaas.conf ]; then
-        cp $NEUTRON_FWAAS_DIR/etc/neutron_fwaas.conf $NEUTRON_CONF_DIR
-    fi
-    neutron_fwaas_configure_common
-    neutron_fwaas_configure_driver
-}
-
 function _configure_dvr {
     iniset $NEUTRON_CONF DEFAULT router_distributed True
     iniset $Q_L3_CONF_FILE DEFAULT agent_mode $Q_DVR_MODE
@@ -1028,55 +962,6 @@
     test_with_retry "$testcmd" "server $ip didn't become ssh-able" $timeout_sec
 }
 
-# Neutron 3rd party programs
-#---------------------------
-
-# please refer to ``lib/neutron_thirdparty/README.md`` for details
-NEUTRON_THIRD_PARTIES=""
-for f in $TOP_DIR/lib/neutron_thirdparty/*; do
-    third_party=$(basename $f)
-    if is_service_enabled $third_party; then
-        source $TOP_DIR/lib/neutron_thirdparty/$third_party
-        NEUTRON_THIRD_PARTIES="$NEUTRON_THIRD_PARTIES,$third_party"
-    fi
-done
-
-function _neutron_third_party_do {
-    for third_party in ${NEUTRON_THIRD_PARTIES//,/ }; do
-        ${1}_${third_party}
-    done
-}
-
-# configure_neutron_third_party() - Set config files, create data dirs, etc
-function configure_neutron_third_party {
-    _neutron_third_party_do configure
-}
-
-# init_neutron_third_party() - Initialize databases, etc.
-function init_neutron_third_party {
-    _neutron_third_party_do init
-}
-
-# install_neutron_third_party() - Collect source and prepare
-function install_neutron_third_party {
-    _neutron_third_party_do install
-}
-
-# start_neutron_third_party() - Start running processes, including screen
-function start_neutron_third_party {
-    _neutron_third_party_do start
-}
-
-# stop_neutron_third_party - Stop running processes (non-screen)
-function stop_neutron_third_party {
-    _neutron_third_party_do stop
-}
-
-# check_neutron_third_party_integration() - Check that third party integration is sane
-function check_neutron_third_party_integration {
-    _neutron_third_party_do check
-}
-
 # Restore xtrace
 $_XTRACE_NEUTRON
 
diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent
index 0a06635..d0de2f5 100644
--- a/lib/neutron_plugins/linuxbridge_agent
+++ b/lib/neutron_plugins/linuxbridge_agent
@@ -44,14 +44,14 @@
 
 function neutron_plugin_configure_dhcp_agent {
     local conf_file=$1
-    iniset $conf_file DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport
+    :
 }
 
 function neutron_plugin_configure_l3_agent {
     local conf_file=$1
     sudo brctl addbr $PUBLIC_BRIDGE
+    set_mtu $PUBLIC_BRIDGE $PUBLIC_BRIDGE_MTU
     iniset $conf_file DEFAULT external_network_bridge
-    iniset $conf_file DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport
 }
 
 function neutron_plugin_configure_plugin_agent {
@@ -61,11 +61,15 @@
     if [[ "$LB_INTERFACE_MAPPINGS" == "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$LB_PHYSICAL_INTERFACE" != "" ]]; then
         LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE
     fi
+    if [[ "$PUBLIC_BRIDGE" != "" ]] && [[ "$PUBLIC_PHYSICAL_NETWORK" != "" ]]; then
+        iniset /$Q_PLUGIN_CONF_FILE linux_bridge bridge_mappings "$PUBLIC_PHYSICAL_NETWORK:$PUBLIC_BRIDGE"
+    fi
     if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then
         iniset /$Q_PLUGIN_CONF_FILE linux_bridge physical_interface_mappings $LB_INTERFACE_MAPPINGS
     fi
     if [[ "$Q_USE_SECGROUP" == "True" ]]; then
         iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
+        enable_kernel_bridge_firewall
     else
         iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver
     fi
diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2
index 2ece210..e429714 100644
--- a/lib/neutron_plugins/ml2
+++ b/lib/neutron_plugins/ml2
@@ -35,7 +35,11 @@
 Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS:-vni_ranges=$TENANT_TUNNEL_RANGES}
 # List of extension drivers to load, use '-' instead of ':-' to allow people to
 # explicitly override this to blank
-Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS-port_security}
+if [[ "$NEUTRON_PORT_SECURITY" = "True" ]]; then
+    Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS-port_security}
+else
+    Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS:-}
+fi
 
 # L3 Plugin to load for ML2
 # For some flat network environment, they not want to extend L3 plugin.
@@ -95,8 +99,16 @@
 
 
     # Allow for setup the flat type network
-    if [[ -z "$Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS" && -n "$PHYSICAL_NETWORK" ]]; then
-            Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS="flat_networks=$PHYSICAL_NETWORK"
+    if [[ -z "$Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS" ]]; then
+        if [[ -n "$PHYSICAL_NETWORK" || -n "$PUBLIC_PHYSICAL_NETWORK" ]]; then
+            Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS="flat_networks="
+            if [[ -n "$PHYSICAL_NETWORK" ]]; then
+                Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS+="${PHYSICAL_NETWORK},"
+            fi
+            if [[ -n "$PUBLIC_PHYSICAL_NETWORK" ]]; then
+                Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS+="${PUBLIC_PHYSICAL_NETWORK},"
+            fi
+        fi
     fi
     # REVISIT(rkukura): Setting firewall_driver here for
     # neutron.agent.securitygroups_rpc.is_firewall_enabled() which is
diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent
index 69e38f4..e27b8a6 100644
--- a/lib/neutron_plugins/openvswitch_agent
+++ b/lib/neutron_plugins/openvswitch_agent
@@ -29,13 +29,12 @@
 
 function neutron_plugin_configure_dhcp_agent {
     local conf_file=$1
-    iniset $conf_file DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport
+    :
 }
 
 function neutron_plugin_configure_l3_agent {
     local conf_file=$1
     _neutron_ovs_base_configure_l3_agent
-    iniset $conf_file DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport
 }
 
 function neutron_plugin_configure_plugin_agent {
@@ -104,7 +103,7 @@
         sudo ovs-vsctl -- --may-exist add-port "br-$VLAN_INTERFACE" $VLAN_INTERFACE
 
         # Create external bridge and add port
-        _neutron_ovs_base_add_bridge $PUBLIC_BRIDGE
+        _neutron_ovs_base_add_public_bridge
         sudo ovs-vsctl -- --may-exist add-port $PUBLIC_BRIDGE $PUBLIC_INTERFACE
 
         # Set bridge mappings to "physnet1:br-$GUEST_INTERFACE_DEFAULT"
diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base
index ecf252f..baf7d7f 100644
--- a/lib/neutron_plugins/ovs_base
+++ b/lib/neutron_plugins/ovs_base
@@ -19,7 +19,7 @@
 
 function _neutron_ovs_base_add_bridge {
     local bridge=$1
-    local addbr_cmd="sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge"
+    local addbr_cmd="sudo ovs-vsctl -- --may-exist add-br $bridge"
 
     if [ "$OVS_DATAPATH_TYPE" != "system" ] ; then
         addbr_cmd="$addbr_cmd -- set Bridge $bridge datapath_type=${OVS_DATAPATH_TYPE}"
@@ -83,9 +83,10 @@
 
 function _neutron_ovs_base_configure_firewall_driver {
     if [[ "$Q_USE_SECGROUP" == "True" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
+        iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver iptables_hybrid
+        enable_kernel_bridge_firewall
     else
-        iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver
+        iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver noop
     fi
 }
 
@@ -105,11 +106,16 @@
         sudo ip link set $Q_PUBLIC_VETH_EX up
         sudo ip addr flush dev $Q_PUBLIC_VETH_EX
     else
-        _neutron_ovs_base_add_bridge $PUBLIC_BRIDGE
+        _neutron_ovs_base_add_public_bridge
         sudo ovs-vsctl br-set-external-id $PUBLIC_BRIDGE bridge-id $PUBLIC_BRIDGE
     fi
 }
 
+function _neutron_ovs_base_add_public_bridge {
+    _neutron_ovs_base_add_bridge $PUBLIC_BRIDGE
+    set_mtu $PUBLIC_BRIDGE $PUBLIC_BRIDGE_MTU
+}
+
 function _neutron_ovs_base_configure_nova_vif_driver {
     :
 }
diff --git a/lib/neutron_plugins/services/firewall b/lib/neutron_plugins/services/firewall
deleted file mode 100644
index 40968fa..0000000
--- a/lib/neutron_plugins/services/firewall
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/bash
-
-# Neutron firewall plugin
-# ---------------------------
-
-# Save trace setting
-_XTRACE_NEUTRON_FIREWALL=$(set +o | grep xtrace)
-set +o xtrace
-
-FWAAS_PLUGIN=${FWAAS_PLUGIN:-neutron_fwaas.services.firewall.fwaas_plugin.FirewallPlugin}
-FWAAS_DRIVER=${FWAAS_DRIVER:-neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver}
-
-function neutron_fwaas_configure_common {
-    _neutron_service_plugin_class_add $FWAAS_PLUGIN
-}
-
-function neutron_fwaas_configure_driver {
-    # Uses oslo config generator to generate FWaaS sample configuration files
-    (cd $NEUTRON_FWAAS_DIR && exec ./tools/generate_config_file_samples.sh)
-
-    FWAAS_DRIVER_CONF_FILENAME=/etc/neutron/fwaas_driver.ini
-    cp $NEUTRON_FWAAS_DIR/etc/fwaas_driver.ini.sample $FWAAS_DRIVER_CONF_FILENAME
-
-    iniset_multiline $FWAAS_DRIVER_CONF_FILENAME fwaas enabled True
-    iniset_multiline $FWAAS_DRIVER_CONF_FILENAME fwaas driver "$FWAAS_DRIVER"
-}
-
-function neutron_fwaas_stop {
-    :
-}
-
-# Restore xtrace
-$_XTRACE_NEUTRON_FIREWALL
diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3
index 4ce87bd..569a366 100644
--- a/lib/neutron_plugins/services/l3
+++ b/lib/neutron_plugins/services/l3
@@ -15,6 +15,15 @@
 IPV6_PROVIDER_NETWORK_GATEWAY=${IPV6_PROVIDER_NETWORK_GATEWAY:-}
 
 PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex}
+PUBLIC_BRIDGE_MTU=${PUBLIC_BRIDGE_MTU:-1500}
+
+# If Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE=True, assign the gateway IP of the public
+# subnet to the public bridge interface even if Q_USE_PROVIDERNET_FOR_PUBLIC is
+# used.
+Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE=${Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE:-True}
+
+# The name of the default router
+Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1}
 
 # If Q_USE_PUBLIC_VETH=True, create and use a veth pair instead of
 # PUBLIC_BRIDGE.  This is intended to be used with
@@ -50,7 +59,7 @@
 #    Q_USE_PROVIDERNET_FOR_PUBLIC=True
 #    PUBLIC_PHYSICAL_NETWORK=public
 #    OVS_BRIDGE_MAPPINGS=public:br-ex
-Q_USE_PROVIDERNET_FOR_PUBLIC=${Q_USE_PROVIDERNET_FOR_PUBLIC:-False}
+Q_USE_PROVIDERNET_FOR_PUBLIC=${Q_USE_PROVIDERNET_FOR_PUBLIC:-True}
 PUBLIC_PHYSICAL_NETWORK=${PUBLIC_PHYSICAL_NETWORK:-public}
 
 # Generate 40-bit IPv6 Global ID to comply with RFC 4193
@@ -61,27 +70,36 @@
 IPV6_ADDRESS_MODE=${IPV6_ADDRESS_MODE:-slaac}
 IPV6_PUBLIC_SUBNET_NAME=${IPV6_PUBLIC_SUBNET_NAME:-ipv6-public-subnet}
 IPV6_PRIVATE_SUBNET_NAME=${IPV6_PRIVATE_SUBNET_NAME:-ipv6-private-subnet}
-FIXED_RANGE_V6=${FIXED_RANGE_V6:-fd$IPV6_GLOBAL_ID::/64}
-IPV6_PRIVATE_NETWORK_GATEWAY=${IPV6_PRIVATE_NETWORK_GATEWAY:-fd$IPV6_GLOBAL_ID::1}
+IPV6_ADDRS_SAFE_TO_USE=${IPV6_ADDRS_SAFE_TO_USE:-fd$IPV6_GLOBAL_ID::/56}
+# if we got larger than a /64 safe to use, we only use the first /64 to
+# avoid side effects outlined in rfc7421
+FIXED_RANGE_V6=${FIXED_RANGE_V6:-$(echo $IPV6_ADDRS_SAFE_TO_USE | awk -F '/' '{ print $1"/"($2>63 ? $2 : 64) }')}
+IPV6_PRIVATE_NETWORK_GATEWAY=${IPV6_PRIVATE_NETWORK_GATEWAY:-}
 IPV6_PUBLIC_RANGE=${IPV6_PUBLIC_RANGE:-2001:db8::/64}
 IPV6_PUBLIC_NETWORK_GATEWAY=${IPV6_PUBLIC_NETWORK_GATEWAY:-2001:db8::2}
 IPV6_ROUTER_GW_IP=${IPV6_ROUTER_GW_IP:-2001:db8::1}
 
 # Gateway and subnet defaults, in case they are not customized in localrc
-NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1}
-PUBLIC_NETWORK_GATEWAY=${PUBLIC_NETWORK_GATEWAY:-172.24.4.1}
+NETWORK_GATEWAY=${NETWORK_GATEWAY:-}
+PUBLIC_NETWORK_GATEWAY=${PUBLIC_NETWORK_GATEWAY:-}
 PRIVATE_SUBNET_NAME=${PRIVATE_SUBNET_NAME:-"private-subnet"}
 PUBLIC_SUBNET_NAME=${PUBLIC_SUBNET_NAME:-"public-subnet"}
 
 # Subnetpool defaults
+USE_SUBNETPOOL=${USE_SUBNETPOOL:-True}
 SUBNETPOOL_NAME=${SUBNETPOOL_NAME:-"shared-default-subnetpool"}
 
-SUBNETPOOL_PREFIX_V4=${SUBNETPOOL_PREFIX_V4:-10.0.0.0/8}
-SUBNETPOOL_PREFIX_V6=${SUBNETPOOL_PREFIX_V6:-2001:db8:8000::/48}
+SUBNETPOOL_PREFIX_V4=${SUBNETPOOL_PREFIX_V4:-$IPV4_ADDRS_SAFE_TO_USE}
+SUBNETPOOL_PREFIX_V6=${SUBNETPOOL_PREFIX_V6:-$IPV6_ADDRS_SAFE_TO_USE}
 
-SUBNETPOOL_SIZE_V4=${SUBNETPOOL_SIZE_V4:-24}
+SUBNETPOOL_SIZE_V4=${SUBNETPOOL_SIZE_V4:-26}
 SUBNETPOOL_SIZE_V6=${SUBNETPOOL_SIZE_V6:-64}
 
+default_v4_route_devs=$(ip -4 route | grep ^default | awk '{print $5}')
+die_if_not_set $LINENO default_v4_route_devs "Failure retrieving default IPv4 route devices"
+
+default_v6_route_devs=$(ip -6 route | grep ^default | awk '{print $5}')
+
 function _determine_config_l3 {
     local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE"
     echo "$opts"
@@ -101,10 +119,20 @@
 
     neutron_plugin_configure_l3_agent $Q_L3_CONF_FILE
 
-    _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet"
+    # If we've given a PUBLIC_INTERFACE to take over, then we assume
+    # that we can own the whole thing, and privot it into the OVS
+    # bridge. If we are not, we're probably on a single interface
+    # machine, and we just setup NAT so that fixed guests can get out.
+    if [[ -n "$PUBLIC_INTERFACE" ]]; then
+        _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet"
 
-    if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then
-        _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6"
+        if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then
+            _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6"
+        fi
+    else
+        for d in $default_v4_route_devs; do
+            sudo iptables -t nat -A POSTROUTING -o $d -s $FLOATING_RANGE -j MASQUERADE
+        done
     fi
 }
 
@@ -138,21 +166,38 @@
         neutron_plugin_create_initial_network_profile $PHYSICAL_NETWORK
     fi
 
+    if is_networking_extension_supported "auto-allocated-topology"; then
+        if [[ "$USE_SUBNETPOOL" == "True" ]]; then
+            if [[ "$IP_VERSION" =~ 4.* ]]; then
+                SUBNETPOOL_V4_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME --default-prefix-length $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --share --default | grep ' id ' | get_field 2)
+            fi
+            if [[ "$IP_VERSION" =~ .*6 ]]; then
+                SUBNETPOOL_V6_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME --default-prefix-length $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --share --default | grep ' id ' | get_field 2)
+            fi
+        fi
+    fi
+
     if is_provider_network; then
         die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK"
         die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specify the PROVIDER_NETWORK_TYPE"
-        NET_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" net-create $PHYSICAL_NETWORK --tenant_id $project_id --provider:network_type $PROVIDER_NETWORK_TYPE --provider:physical_network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider:segmentation_id $SEGMENTATION_ID} --shared | grep ' id ' | get_field 2)
+        NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --project $project_id --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share | grep ' id ' | get_field 2)
         die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $project_id"
 
         if [[ "$IP_VERSION" =~ 4.* ]]; then
-            SUBNET_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create --tenant_id $project_id --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --name $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2)
+            if [ -z $SUBNETPOOL_V4_ID ]; then
+                fixed_range_v4=$FIXED_RANGE
+            fi
+            SUBNET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID --subnet-range $fixed_range_v4 | grep ' id ' | get_field 2)
             die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME $project_id"
         fi
 
         if [[ "$IP_VERSION" =~ .*6 ]]; then
             die_if_not_set $LINENO IPV6_PROVIDER_FIXED_RANGE "IPV6_PROVIDER_FIXED_RANGE has not been set, but Q_USE_PROVIDERNET_FOR_PUBLIC is true and IP_VERSION includes 6"
             die_if_not_set $LINENO IPV6_PROVIDER_NETWORK_GATEWAY "IPV6_PROVIDER_NETWORK_GATEWAY has not been set, but Q_USE_PROVIDERNET_FOR_PUBLIC is true and IP_VERSION includes 6"
-            SUBNET_V6_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create --tenant_id $project_id --ip_version 6 --ipv6-address-mode $IPV6_ADDRESS_MODE --gateway $IPV6_PROVIDER_NETWORK_GATEWAY --name $IPV6_PROVIDER_SUBNET_NAME $NET_ID $IPV6_PROVIDER_FIXED_RANGE | grep 'id' | get_field 2)
+            if [ -z $SUBNETPOOL_V6_ID ]; then
+                fixed_range_v6=$IPV6_PROVIDER_FIXED_RANGE
+            fi
+            SUBNET_V6_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 6 --ipv6-address-mode $IPV6_ADDRESS_MODE --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID $fixed_range_v6 | grep 'id' | get_field 2)
             die_if_not_set $LINENO SUBNET_V6_ID "Failure creating SUBNET_V6_ID for $IPV6_PROVIDER_SUBNET_NAME $project_id"
         fi
 
@@ -162,7 +207,7 @@
             sudo ip link set $PUBLIC_INTERFACE up
         fi
     else
-        NET_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" net-create --tenant-id $project_id "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2)
+        NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create --project $project_id "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2)
         die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME $project_id"
 
         if [[ "$IP_VERSION" =~ 4.* ]]; then
@@ -180,29 +225,23 @@
         # Create a router, and add the private subnet as one of its interfaces
         if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then
             # create a tenant-owned router.
-            ROUTER_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" router-create --tenant-id $project_id $Q_ROUTER_NAME | grep ' id ' | get_field 2)
+            ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create --project $project_id $Q_ROUTER_NAME | grep ' id ' | get_field 2)
             die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $project_id $Q_ROUTER_NAME"
         else
             # Plugin only supports creating a single router, which should be admin owned.
-            ROUTER_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" router-create $Q_ROUTER_NAME | grep ' id ' | get_field 2)
+            ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create $Q_ROUTER_NAME | grep ' id ' | get_field 2)
             die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $Q_ROUTER_NAME"
         fi
 
-        EXTERNAL_NETWORK_FLAGS="--router:external"
-        if is_networking_extension_supported "auto-allocated-topology" && is_networking_extension_supported "subnet_allocation"; then
-            EXTERNAL_NETWORK_FLAGS="$EXTERNAL_NETWORK_FLAGS --is-default"
-            if [[ "$IP_VERSION" =~ 4.* ]]; then
-                SUBNETPOOL_V4_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnetpool-create $SUBNETPOOL_NAME --default-prefixlen $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --shared --is-default=True | grep ' id ' | get_field 2)
-            fi
-            if [[ "$IP_VERSION" =~ .*6 ]]; then
-                SUBNETPOOL_V6_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnetpool-create $SUBNETPOOL_NAME --default-prefixlen $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --shared --is-default=True | grep ' id ' | get_field 2)
-            fi
+        EXTERNAL_NETWORK_FLAGS="--external"
+        if is_networking_extension_supported "auto-allocated-topology"; then
+            EXTERNAL_NETWORK_FLAGS="$EXTERNAL_NETWORK_FLAGS --default"
         fi
         # Create an external network, and a subnet. Configure the external network as router gw
         if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then
-            EXT_NET_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" net-create "$PUBLIC_NETWORK_NAME" -- $EXTERNAL_NETWORK_FLAGS --provider:network_type=flat --provider:physical_network=${PUBLIC_PHYSICAL_NETWORK} | grep ' id ' | get_field 2)
+            EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type flat --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} | grep ' id ' | get_field 2)
         else
-            EXT_NET_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" net-create "$PUBLIC_NETWORK_NAME" -- $EXTERNAL_NETWORK_FLAGS | grep ' id ' | get_field 2)
+            EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS | grep ' id ' | get_field 2)
         fi
         die_if_not_set $LINENO EXT_NET_ID "Failure creating EXT_NET_ID for $PUBLIC_NETWORK_NAME"
 
@@ -221,13 +260,19 @@
 # Create private IPv4 subnet
 function _neutron_create_private_subnet_v4 {
     local project_id=$1
-    local subnet_params="--tenant-id $project_id "
-    subnet_params+="--ip_version 4 "
-    subnet_params+="--gateway $NETWORK_GATEWAY "
-    subnet_params+="--name $PRIVATE_SUBNET_NAME "
-    subnet_params+="$NET_ID $FIXED_RANGE"
+    if [ -z $SUBNETPOOL_V4_ID ]; then
+        fixed_range_v4=$FIXED_RANGE
+    fi
+    local subnet_params="--project $project_id "
+    subnet_params+="--ip-version 4 "
+    if [[ -n "$NETWORK_GATEWAY" ]]; then
+        subnet_params+="--gateway $NETWORK_GATEWAY "
+    fi
+    subnet_params+="${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} "
+    subnet_params+="${fixed_range_v4:+--subnet-range $fixed_range_v4} "
+    subnet_params+="--network $NET_ID $PRIVATE_SUBNET_NAME"
     local subnet_id
-    subnet_id=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create $subnet_params | grep ' id ' | get_field 2)
+    subnet_id=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2)
     die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet for $project_id"
     echo $subnet_id
 }
@@ -238,47 +283,53 @@
     die_if_not_set $LINENO IPV6_RA_MODE "IPV6 RA Mode not set"
     die_if_not_set $LINENO IPV6_ADDRESS_MODE "IPV6 Address Mode not set"
     local ipv6_modes="--ipv6-ra-mode $IPV6_RA_MODE --ipv6-address-mode $IPV6_ADDRESS_MODE"
-    local subnet_params="--tenant-id $project_id "
-    subnet_params+="--ip_version 6 "
-    subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY "
-    subnet_params+="--name $IPV6_PRIVATE_SUBNET_NAME "
-    subnet_params+="$NET_ID $FIXED_RANGE_V6 $ipv6_modes"
+    if [ -z $SUBNETPOOL_V6_ID ]; then
+        fixed_range_v6=$FIXED_RANGE_V6
+    fi
+    local subnet_params="--project $project_id "
+    subnet_params+="--ip-version 6 "
+    if [[ -n "$IPV6_PRIVATE_NETWORK_GATEWAY" ]]; then
+        subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY "
+    fi
+    subnet_params+="${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} "
+    subnet_params+="${fixed_range_v6:+--subnet-range $fixed_range_v6 $ipv6_modes} "
+    subnet_params+="--network $NET_ID $IPV6_PRIVATE_SUBNET_NAME "
     local ipv6_subnet_id
-    ipv6_subnet_id=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create $subnet_params | grep ' id ' | get_field 2)
+    ipv6_subnet_id=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2)
     die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet for $project_id"
     echo $ipv6_subnet_id
 }
 
 # Create public IPv4 subnet
 function _neutron_create_public_subnet_v4 {
-    local subnet_params+="--ip_version 4 "
+    local subnet_params="--ip-version 4 "
     subnet_params+="${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} "
-    subnet_params+="--gateway $PUBLIC_NETWORK_GATEWAY "
-    subnet_params+="--name $PUBLIC_SUBNET_NAME "
-    subnet_params+="$EXT_NET_ID $FLOATING_RANGE "
-    subnet_params+="-- --enable_dhcp=False"
+    if [[ -n "$PUBLIC_NETWORK_GATEWAY" ]]; then
+        subnet_params+="--gateway $PUBLIC_NETWORK_GATEWAY "
+    fi
+    subnet_params+="--network $EXT_NET_ID --subnet-range $FLOATING_RANGE --no-dhcp "
+    subnet_params+="$PUBLIC_SUBNET_NAME"
     local id_and_ext_gw_ip
-    id_and_ext_gw_ip=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ')
+    id_and_ext_gw_ip=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ')
     die_if_not_set $LINENO id_and_ext_gw_ip "Failure creating public IPv4 subnet"
     echo $id_and_ext_gw_ip
 }
 
 # Create public IPv6 subnet
 function _neutron_create_public_subnet_v6 {
-    local subnet_params="--ip_version 6 "
+    local subnet_params="--ip-version 6 "
     subnet_params+="--gateway $IPV6_PUBLIC_NETWORK_GATEWAY "
-    subnet_params+="--name $IPV6_PUBLIC_SUBNET_NAME "
-    subnet_params+="$EXT_NET_ID $IPV6_PUBLIC_RANGE "
-    subnet_params+="-- --enable_dhcp=False"
+    subnet_params+="--network $EXT_NET_ID --subnet-range $IPV6_PUBLIC_RANGE --no-dhcp "
+    subnet_params+="$IPV6_PUBLIC_SUBNET_NAME"
     local ipv6_id_and_ext_gw_ip
-    ipv6_id_and_ext_gw_ip=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ')
+    ipv6_id_and_ext_gw_ip=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ')
     die_if_not_set $LINENO ipv6_id_and_ext_gw_ip "Failure creating an IPv6 public subnet"
     echo $ipv6_id_and_ext_gw_ip
 }
 
 # Configure neutron router for IPv4 public access
 function _neutron_configure_router_v4 {
-    neutron --os-cloud devstack-admin --os-region "$REGION_NAME" router-interface-add $ROUTER_ID $SUBNET_ID
+    openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router add subnet $ROUTER_ID $SUBNET_ID
     # Create a public subnet on the external network
     local id_and_ext_gw_ip
     id_and_ext_gw_ip=$(_neutron_create_public_subnet_v4 $EXT_NET_ID)
@@ -295,22 +346,26 @@
         if is_neutron_ovs_base_plugin; then
             ext_gw_interface=$(_neutron_get_ext_gw_interface)
         elif [[ "$Q_AGENT" = "linuxbridge" ]]; then
-            # Search for the brq device the neutron router and network for $FIXED_RANGE
+            # Get the device the neutron router and network for $FIXED_RANGE
             # will be using.
-            # e.x. brq3592e767-da for NET_ID 3592e767-da66-4bcb-9bec-cdb03cd96102
-            ext_gw_interface=brq${EXT_NET_ID:0:11}
+            if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then
+                # in provider nets a bridge mapping uses the public bridge directly
+                ext_gw_interface=$PUBLIC_BRIDGE
+            else
+                # e.x. brq3592e767-da for NET_ID 3592e767-da66-4bcb-9bec-cdb03cd96102
+                ext_gw_interface=brq${EXT_NET_ID:0:11}
+            fi
         fi
         if [[ "$ext_gw_interface" != "none" ]]; then
             local cidr_len=${FLOATING_RANGE#*/}
             local testcmd="ip -o link | grep -q $ext_gw_interface"
             test_with_retry "$testcmd" "$ext_gw_interface creation failed"
-            if [[ $(ip addr show dev $ext_gw_interface | grep -c $ext_gw_ip) == 0 && ( $Q_USE_PROVIDERNET_FOR_PUBLIC == "False" || $Q_USE_PUBLIC_VETH == "True" ) ]]; then
+            if [[ $(ip addr show dev $ext_gw_interface | grep -c $ext_gw_ip) == 0 && ( $Q_USE_PROVIDERNET_FOR_PUBLIC == "False" || $Q_USE_PUBLIC_VETH == "True" || $Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE == "True" ) ]]; then
                 sudo ip addr add $ext_gw_ip/$cidr_len dev $ext_gw_interface
                 sudo ip link set $ext_gw_interface up
             fi
-            ROUTER_GW_IP=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F'ip_address'  '{ print $2 }' | cut -f3 -d\" | tr '\n' ' ')
+            ROUTER_GW_IP=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' --device-owner network:router_gateway | awk -F'ip_address'  '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ')
             die_if_not_set $LINENO ROUTER_GW_IP "Failure retrieving ROUTER_GW_IP"
-            sudo ip route replace  $FIXED_RANGE via $ROUTER_GW_IP
         fi
         _neutron_set_router_id
     fi
@@ -318,7 +373,7 @@
 
 # Configure neutron router for IPv6 public access
 function _neutron_configure_router_v6 {
-    neutron --os-cloud devstack-admin --os-region "$REGION_NAME" router-interface-add $ROUTER_ID $IPV6_SUBNET_ID
+    openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router add subnet $ROUTER_ID $IPV6_SUBNET_ID
     # Create a public subnet on the external network
     local ipv6_id_and_ext_gw_ip
     ipv6_id_and_ext_gw_ip=$(_neutron_create_public_subnet_v6 $EXT_NET_ID)
@@ -335,11 +390,21 @@
 
     # This logic is specific to using the l3-agent for layer 3
     if is_service_enabled q-l3 || is_service_enabled neutron-l3; then
+        # Ensure IPv6 RAs are accepted on interfaces with a default route.
+        # This is needed for neutron-based devstack clouds to work in
+        # IPv6-only clouds in the gate. Please do not remove this without
+        # talking to folks in Infra.
+        for d in $default_v6_route_devs; do
+            # Slashes must be used in this sysctl command because route devices
+            # can have dots in their names. If dots were used, dots in the
+            # device name would be reinterpreted as a slash, causing an error.
+            sudo sysctl -w net/ipv6/conf/$d/accept_ra=2
+        done
         # Ensure IPv6 forwarding is enabled on the host
         sudo sysctl -w net.ipv6.conf.all.forwarding=1
         # Configure and enable public bridge
         # Override global IPV6_ROUTER_GW_IP with the true value from neutron
-        IPV6_ROUTER_GW_IP=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" port-list -c fixed_ips | grep $ipv6_pub_subnet_id | awk -F'ip_address' '{ print $2 }' | cut -f3 -d\" | tr '\n' ' ')
+        IPV6_ROUTER_GW_IP=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' | grep $ipv6_pub_subnet_id | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ')
         die_if_not_set $LINENO IPV6_ROUTER_GW_IP "Failure retrieving IPV6_ROUTER_GW_IP"
 
         if is_neutron_ovs_base_plugin; then
@@ -349,22 +414,19 @@
 
             # Configure interface for public bridge
             sudo ip -6 addr replace $ipv6_ext_gw_ip/$ipv6_cidr_len dev $ext_gw_interface
-            sudo ip -6 route replace $FIXED_RANGE_V6 via $IPV6_ROUTER_GW_IP dev $ext_gw_interface
+            local replace_range=${SUBNETPOOL_PREFIX_V6}
+            if [[ -z "${SUBNETPOOL_V6_ID}" ]]; then
+                replace_range=${FIXED_RANGE_V6}
+            fi
+            sudo ip -6 route replace $replace_range via $IPV6_ROUTER_GW_IP dev $ext_gw_interface
         fi
         _neutron_set_router_id
     fi
 }
 
-function is_provider_network {
-    if [ "$Q_USE_PROVIDER_NETWORKING" == "True" ]; then
-        return 0
-    fi
-    return 1
-}
-
 function is_networking_extension_supported {
     local extension=$1
     # TODO(sc68cal) cache this instead of calling every time
-    EXT_LIST=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" ext-list -c alias -f value)
+    EXT_LIST=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" extension list --network -c Alias -f value)
     [[ $EXT_LIST =~ $extension ]] && return 0
 }
diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer
deleted file mode 100644
index 30e9480..0000000
--- a/lib/neutron_plugins/services/loadbalancer
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/bash
-
-# Neutron loadbalancer plugin
-# ---------------------------
-
-# Save trace setting
-_XTRACE_NEUTRON_LB=$(set +o | grep xtrace)
-set +o xtrace
-
-
-AGENT_LBAAS_BINARY="$NEUTRON_BIN_DIR/neutron-lbaas-agent"
-LBAAS_PLUGIN=neutron_lbaas.services.loadbalancer.plugin.LoadBalancerPlugin
-
-function neutron_agent_lbaas_install_agent_packages {
-    if is_ubuntu || is_fedora || is_suse; then
-        install_package haproxy
-    fi
-}
-
-function neutron_agent_lbaas_configure_common {
-    _neutron_service_plugin_class_add $LBAAS_PLUGIN
-    _neutron_deploy_rootwrap_filters $NEUTRON_LBAAS_DIR
-}
-
-function neutron_agent_lbaas_configure_agent {
-    LBAAS_AGENT_CONF_PATH=/etc/neutron/services/loadbalancer/haproxy
-    mkdir -p $LBAAS_AGENT_CONF_PATH
-
-    LBAAS_AGENT_CONF_FILENAME="$LBAAS_AGENT_CONF_PATH/lbaas_agent.ini"
-
-    cp $NEUTRON_LBAAS_DIR/etc/lbaas_agent.ini.sample $LBAAS_AGENT_CONF_FILENAME
-
-    # ovs_use_veth needs to be set before the plugin configuration
-    # occurs to allow plugins to override the setting.
-    iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT ovs_use_veth $Q_OVS_USE_VETH
-
-    neutron_plugin_setup_interface_driver $LBAAS_AGENT_CONF_FILENAME
-
-    if is_fedora; then
-        iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT user_group "nobody"
-        iniset $LBAAS_AGENT_CONF_FILENAME haproxy user_group "nobody"
-    fi
-}
-
-function neutron_lbaas_stop {
-    pids=$(ps aux | awk '/haproxy/ { print $2 }')
-    [ ! -z "$pids" ] && sudo kill $pids || true
-}
-
-# Restore xtrace
-$_XTRACE_NEUTRON_LB
diff --git a/lib/neutron_thirdparty/README.md b/lib/neutron_thirdparty/README.md
deleted file mode 100644
index 905ae77..0000000
--- a/lib/neutron_thirdparty/README.md
+++ /dev/null
@@ -1,41 +0,0 @@
-Neutron third party specific files
-==================================
-Some Neutron plugins require third party programs to function.
-The files under the directory, ``lib/neutron_thirdparty/``, will be used
-when their service are enabled.
-Third party program specific configuration variables should be in this file.
-
-* filename: ``<third_party>``
-  * The corresponding file name should be same to service name, ``<third_party>``.
-
-functions
----------
-``lib/neutron-legacy`` calls the following functions when the ``<third_party>`` is enabled
-
-functions to be implemented
-* ``configure_<third_party>``:
-  set config files, create data dirs, etc
-  e.g.
-  sudo python setup.py deploy
-  iniset $XXXX_CONF...
-
-* ``init_<third_party>``:
-  initialize databases, etc
-
-* ``install_<third_party>``:
-  collect source and prepare
-  e.g.
-  git clone xxx
-
-* ``start_<third_party>``:
-  start running processes, including screen if USE_SCREEN=True
-  e.g.
-  run_process XXXX "$XXXX_DIR/bin/XXXX-bin"
-
-* ``stop_<third_party>``:
-  stop running processes (non-screen)
-  e.g.
-  stop_process XXXX
-
-* ``check_<third_party>``:
-  verify that the integration between neutron server and third-party components is sane
diff --git a/lib/neutron_thirdparty/bigswitch_floodlight b/lib/neutron_thirdparty/bigswitch_floodlight
deleted file mode 100644
index 45a4f2e..0000000
--- a/lib/neutron_thirdparty/bigswitch_floodlight
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/bin/bash
-#
-# Big Switch/FloodLight  OpenFlow Controller
-# ------------------------------------------
-
-# Save trace setting
-_XTRACE_NEUTRON_BIGSWITCH=$(set +o | grep xtrace)
-set +o xtrace
-
-BS_FL_CONTROLLERS_PORT=${BS_FL_CONTROLLERS_PORT:-localhost:80}
-BS_FL_OF_PORT=${BS_FL_OF_PORT:-6633}
-
-function configure_bigswitch_floodlight {
-    :
-}
-
-function init_bigswitch_floodlight {
-    install_neutron_agent_packages
-
-    echo -n "Installing OVS managed by the openflow controllers:"
-    echo ${BS_FL_CONTROLLERS_PORT}
-
-    # Create local OVS bridge and configure it
-    sudo ovs-vsctl --no-wait -- --if-exists del-br ${OVS_BRIDGE}
-    sudo ovs-vsctl --no-wait add-br ${OVS_BRIDGE}
-    sudo ovs-vsctl --no-wait br-set-external-id ${OVS_BRIDGE} bridge-id ${OVS_BRIDGE}
-
-    ctrls=
-    for ctrl in `echo ${BS_FL_CONTROLLERS_PORT} | tr ',' ' '`; do
-        ctrl=${ctrl%:*}
-        ctrls="${ctrls} tcp:${ctrl}:${BS_FL_OF_PORT}"
-    done
-    echo "Adding Network conttrollers: " ${ctrls}
-    sudo ovs-vsctl --no-wait set-controller ${OVS_BRIDGE} ${ctrls}
-}
-
-function install_bigswitch_floodlight {
-    :
-}
-
-function start_bigswitch_floodlight {
-    :
-}
-
-function stop_bigswitch_floodlight {
-    :
-}
-
-function check_bigswitch_floodlight {
-    :
-}
-
-# Restore xtrace
-$_XTRACE_NEUTRON_BIGSWITCH
diff --git a/lib/neutron_thirdparty/vmware_nsx b/lib/neutron_thirdparty/vmware_nsx
deleted file mode 100644
index e182fca..0000000
--- a/lib/neutron_thirdparty/vmware_nsx
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/bin/bash
-
-# REVISIT(roeyc): this file left empty so that 'enable_service vmware_nsx'
-# continues to work.
diff --git a/lib/nova b/lib/nova
index 67a80b9..ca9a6c7 100644
--- a/lib/nova
+++ b/lib/nova
@@ -83,7 +83,10 @@
 
 # Option to enable/disable config drive
 # NOTE: Set ``FORCE_CONFIG_DRIVE="False"`` to turn OFF config drive
-FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"True"}
+FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"False"}
+
+# Option to initialize CellsV2 environment
+NOVA_CONFIGURE_CELLSV2=$(trueorfalse False NOVA_CONFIGURE_CELLSV2)
 
 # Nova supports pluggable schedulers.  The default ``FilterScheduler``
 # should work in most cases.
@@ -128,7 +131,7 @@
 # --------------------------
 
 NETWORK_MANAGER=${NETWORK_MANAGER:-${NET_MAN:-FlatDHCPManager}}
-PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT}
+
 VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
 FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT}
 
@@ -299,8 +302,6 @@
     # Put config files in ``/etc/nova`` for everyone to find
     sudo install -d -o $STACK_USER $NOVA_CONF_DIR
 
-    install_default_policy nova
-
     configure_rootwrap nova
 
     if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
@@ -458,7 +459,6 @@
     iniset $NOVA_CONF DEFAULT rootwrap_config "$NOVA_CONF_DIR/rootwrap.conf"
     iniset $NOVA_CONF DEFAULT scheduler_driver "$SCHEDULER"
     iniset $NOVA_CONF DEFAULT scheduler_default_filters "$FILTERS"
-    iniset $NOVA_CONF DEFAULT force_dhcp_release "True"
     iniset $NOVA_CONF DEFAULT default_floating_pool "$PUBLIC_NETWORK_NAME"
     iniset $NOVA_CONF DEFAULT s3_host "$SERVICE_HOST"
     iniset $NOVA_CONF DEFAULT s3_port "$S3_SERVICE_PORT"
@@ -481,11 +481,6 @@
         iniset $NOVA_CONF DEFAULT bindir "/usr/bin"
     fi
 
-    iniset $NOVA_CONF privsep_osbrick helper_command "sudo nova-rootwrap \$rootwrap_config privsep-helper --config-file $NOVA_CONF"
-
-    iniset $NOVA_CONF vif_plug_ovs_privileged helper_command "sudo nova-rootwrap \$rootwrap_config privsep-helper --config-file $NOVA_CONF"
-    iniset $NOVA_CONF vif_plug_linux_bridge_privileged helper_command "sudo nova-rootwrap \$rootwrap_config privsep-helper --config-file $NOVA_CONF"
-
     if is_service_enabled n-api; then
         if is_service_enabled n-api-meta; then
             # If running n-api-meta as a separate service
@@ -541,7 +536,6 @@
         iniset $NOVA_CONF DEFAULT instance_usage_audit "True"
         iniset $NOVA_CONF DEFAULT instance_usage_audit_period "hour"
         iniset $NOVA_CONF DEFAULT notify_on_state_change "vm_and_task_state"
-        iniset $NOVA_CONF oslo_messaging_notifications driver "messaging"
     fi
 
     # All nova-compute workers need to know the vnc configuration options
@@ -560,7 +554,6 @@
         # For multi-host, this should be the management ip of the compute host.
         VNCSERVER_LISTEN=${VNCSERVER_LISTEN=$NOVA_SERVICE_LOCAL_HOST}
         VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=$NOVA_SERVICE_LOCAL_HOST}
-        iniset $NOVA_CONF vnc enabled true
         iniset $NOVA_CONF vnc vncserver_listen "$VNCSERVER_LISTEN"
         iniset $NOVA_CONF vnc vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS"
         iniset $NOVA_CONF vnc novncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
@@ -578,10 +571,11 @@
         iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN"
         iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS"
         iniset $NOVA_CONF spice html5proxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
-    else
-        iniset $NOVA_CONF spice enabled false
     fi
 
+    # Set the oslo messaging driver to the typical default. This does not
+    # enable notifications, but it will allow them to function when enabled.
+    iniset $NOVA_CONF oslo_messaging_notifications driver "messaging"
     iniset_rpc_backend nova $NOVA_CONF
     iniset $NOVA_CONF glance api_servers "${GLANCE_SERVICE_PROTOCOL}://${GLANCE_HOSTPORT}"
 
@@ -664,8 +658,9 @@
 }
 
 function create_nova_conf_nova_network {
+    local public_interface=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT}
     iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NETWORK_MANAGER"
-    iniset $NOVA_CONF DEFAULT public_interface "$PUBLIC_INTERFACE"
+    iniset $NOVA_CONF DEFAULT public_interface "$public_interface"
     iniset $NOVA_CONF DEFAULT vlan_interface "$VLAN_INTERFACE"
     iniset $NOVA_CONF DEFAULT flat_network_bridge "$FLAT_NETWORK_BRIDGE"
     if [ -n "$FLAT_INTERFACE" ]; then
@@ -684,10 +679,15 @@
     # All nova components talk to a central database.
     # Only do this step once on the API node for an entire cluster.
     if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then
-        # (Re)create nova database
+        # (Re)create nova databases
         recreate_database nova
+        if [ "$NOVA_CONFIGURE_CELLSV2" != "False" ]; then
+            recreate_database nova_api_cell0
+        fi
 
-        # Migrate nova database
+        # Migrate nova database. If "nova-manage cell_v2 simple_cell_setup" has
+        # been run this migrates the "nova" and "nova_api_cell0" database.
+        # Otherwise it just migrates the "nova" database.
         $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db sync
 
         if is_service_enabled n-cell; then
@@ -802,7 +802,7 @@
 
     # Start proxies if enabled
     if is_service_enabled tls-proxy; then
-        start_tls_proxy '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT &
+        start_tls_proxy nova '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT
     fi
 
     export PATH=$old_path
@@ -825,6 +825,10 @@
         # ``sg`` is used in run_process to execute nova-compute as a member of the
         # **$LIBVIRT_GROUP** group.
         run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" $LIBVIRT_GROUP
+    elif [[ "$VIRT_DRIVER" = 'lxd' ]]; then
+        run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" $LXD_GROUP
+    elif [[ "$VIRT_DRIVER" = 'docker' ]]; then
+        run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" $DOCKER_GROUP
     elif [[ "$VIRT_DRIVER" = 'fake' ]]; then
         local i
         for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do
@@ -862,9 +866,13 @@
     run_process n-cond "$NOVA_BIN_DIR/nova-conductor --config-file $compute_cell_conf"
     run_process n-cell-region "$NOVA_BIN_DIR/nova-cells --config-file $api_cell_conf"
     run_process n-cell-child "$NOVA_BIN_DIR/nova-cells --config-file $compute_cell_conf"
-
     run_process n-crt "$NOVA_BIN_DIR/nova-cert --config-file $api_cell_conf"
+
+    if is_service_enabled n-net; then
+        enable_kernel_bridge_firewall
+    fi
     run_process n-net "$NOVA_BIN_DIR/nova-network --config-file $compute_cell_conf"
+
     run_process n-sch "$NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf"
     run_process n-api-meta "$NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf"
 
@@ -940,6 +948,15 @@
     fi
 }
 
+# create_cell(): Group the available hosts into a cell
+function create_cell {
+    if ! is_service_enabled n-cell; then
+        nova-manage cell_v2 simple_cell_setup --transport-url $(get_transport_url)
+    else
+        echo 'Skipping cellsv2 setup for this cellsv1 configuration'
+    fi
+}
+
 # Restore xtrace
 $_XTRACE_LIB_NOVA
 
diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt
index 4e5a748..5e7695a 100644
--- a/lib/nova_plugins/functions-libvirt
+++ b/lib/nova_plugins/functions-libvirt
@@ -23,12 +23,7 @@
 # Installs required distro-specific libvirt packages.
 function install_libvirt {
     if is_ubuntu; then
-        if is_arch "aarch64" && [[ ${DISTRO} == "trusty" ]]; then
-            install_package qemu-system
-        else
-            install_package qemu-kvm
-            install_package libguestfs0
-        fi
+        install_package qemu-system
         install_package libvirt-bin libvirt-dev
         pip_install_gr libvirt-python
         if [[ "$EBTABLES_RACE_FIX" == "True" ]]; then
@@ -65,6 +60,7 @@
     "/dev/random", "/dev/urandom",
     "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
     "/dev/rtc", "/dev/hpet","/dev/net/tun",
+    "/dev/vfio/vfio",
 ]
 EOF
     fi
@@ -124,6 +120,12 @@
     # Service needs to be started on redhat/fedora -- do a restart for
     # sanity after fiddling the config.
     restart_service $LIBVIRT_DAEMON
+
+    # Restart virtlogd companion service to ensure it is running properly
+    #  https://bugs.launchpad.net/ubuntu/+source/libvirt/+bug/1577455
+    #  https://bugzilla.redhat.com/show_bug.cgi?id=1290357
+    # (not all platforms have it; libvirt 1.3+ only, thus the ignore)
+    restart_service virtlogd || true
 }
 
 
diff --git a/lib/nova_plugins/hypervisor-fake b/lib/nova_plugins/hypervisor-fake
index 2434dce..f9b95c1 100644
--- a/lib/nova_plugins/hypervisor-fake
+++ b/lib/nova_plugins/hypervisor-fake
@@ -36,7 +36,7 @@
 
 # configure_nova_hypervisor - Set config files, create data dirs, etc
 function configure_nova_hypervisor {
-    iniset $NOVA_CONF DEFAULT compute_driver "nova.virt.fake.FakeDriver"
+    iniset $NOVA_CONF DEFAULT compute_driver "fake.FakeDriver"
     # Disable arbitrary limits
     iniset $NOVA_CONF DEFAULT quota_instances -1
     iniset $NOVA_CONF DEFAULT quota_cores -1
@@ -45,7 +45,7 @@
     iniset $NOVA_CONF DEFAULT quota_fixed_ips -1
     iniset $NOVA_CONF DEFAULT quota_metadata_items -1
     iniset $NOVA_CONF DEFAULT quota_injected_files -1
-    iniset $NOVA_CONF DEFAULT quota_injected_file_path_bytes -1
+    iniset $NOVA_CONF DEFAULT quota_injected_file_path_length -1
     iniset $NOVA_CONF DEFAULT quota_security_groups -1
     iniset $NOVA_CONF DEFAULT quota_security_group_rules -1
     iniset $NOVA_CONF DEFAULT quota_key_pairs -1
diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic
index c40427c..7ffd14d 100644
--- a/lib/nova_plugins/hypervisor-ironic
+++ b/lib/nova_plugins/hypervisor-ironic
@@ -45,11 +45,13 @@
     iniset $NOVA_CONF DEFAULT ram_allocation_ratio 1.0
     iniset $NOVA_CONF DEFAULT reserved_host_memory_mb 0
     # ironic section
-    iniset $NOVA_CONF ironic admin_username admin
-    iniset $NOVA_CONF ironic admin_password $ADMIN_PASSWORD
-    iniset $NOVA_CONF ironic admin_url $KEYSTONE_AUTH_URI/v2.0
-    iniset $NOVA_CONF ironic admin_tenant_name demo
-    iniset $NOVA_CONF ironic api_endpoint $IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT/v1
+    iniset $NOVA_CONF ironic auth_type password
+    iniset $NOVA_CONF ironic username admin
+    iniset $NOVA_CONF ironic password $ADMIN_PASSWORD
+    iniset $NOVA_CONF ironic auth_url $KEYSTONE_AUTH_URI/v3
+    iniset $NOVA_CONF ironic project_domain_id default
+    iniset $NOVA_CONF ironic user_domain_id default
+    iniset $NOVA_CONF ironic project_name demo
 }
 
 # install_nova_hypervisor() - Install external components
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index d0e364e..167ab6f 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -40,7 +40,8 @@
     configure_libvirt
     iniset $NOVA_CONF libvirt virt_type "$LIBVIRT_TYPE"
     iniset $NOVA_CONF libvirt cpu_mode "none"
-    iniset $NOVA_CONF libvirt use_usb_tablet "False"
+    # Do not enable USB tablet input devices to avoid QEMU CPU overhead.
+    iniset $NOVA_CONF DEFAULT pointer_model "ps2mouse"
     iniset $NOVA_CONF libvirt live_migration_uri "qemu+ssh://$STACK_USER@%s/system"
     iniset $NOVA_CONF DEFAULT default_ephemeral_format "ext4"
     iniset $NOVA_CONF DEFAULT compute_driver "libvirt.LibvirtDriver"
@@ -55,11 +56,16 @@
     if is_arch "aarch64"; then
         # arm64 architecture currently does not support graphical consoles.
         iniset $NOVA_CONF vnc enabled "false"
+        iniset $NOVA_CONF libvirt cpu_mode "host-passthrough"
     fi
 
-    # File injection is being disabled by default in the near future -
-    # disable it here for now to avoid surprises later.
-    iniset $NOVA_CONF libvirt inject_partition '-2'
+    if isset ENABLE_FILE_INJECTION; then
+        if [ "$ENABLE_FILE_INJECTION" == "True" ]; then
+            # -1 means use libguestfs to inspect the guest OS image for the
+            # root partition to use for file injection.
+            iniset $NOVA_CONF libvirt inject_partition '-1'
+        fi
+    fi
 
     if [[ "$LIBVIRT_TYPE" = "parallels" ]]; then
         iniset $NOVA_CONF libvirt connection_uri "parallels+unix:///system"
@@ -95,6 +101,14 @@
             yum_install libcgroup-tools
         fi
     fi
+
+    if [[ "$ENABLE_FILE_INJECTION" == "True" ]] ; then
+        if is_ubuntu; then
+            install_package python-guestfs
+        elif is_fedora || is_suse; then
+            install_package python-libguestfs
+        fi
+    fi
 }
 
 # start_nova_hypervisor - Start any required external services
diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver
index e7f1e87..e5d25da 100644
--- a/lib/nova_plugins/hypervisor-xenserver
+++ b/lib/nova_plugins/hypervisor-xenserver
@@ -24,8 +24,6 @@
 # Defaults
 # --------
 
-PUBLIC_INTERFACE_DEFAULT=eth2
-GUEST_INTERFACE_DEFAULT=eth1
 # Allow ``build_domU.sh`` to specify the flat network bridge via kernel args
 FLAT_NETWORK_BRIDGE_DEFAULT=$(sed -e 's/.* flat_network_bridge=\([[:alnum:]]*\).*$/\1/g' /proc/cmdline)
 if is_service_enabled neutron; then
@@ -89,6 +87,7 @@
         cat $TOP_DIR/tools/xen/functions
         echo "create_directory_for_images"
         echo "create_directory_for_kernels"
+        echo "install_conntrack_tools"
     } | $ssh_dom0
 
 }
diff --git a/lib/os_brick b/lib/os_brick
new file mode 100644
index 0000000..d1cca4a
--- /dev/null
+++ b/lib/os_brick
@@ -0,0 +1,32 @@
+#!/bin/bash
+#
+# lib/os_brick
+# Install **os-brick** python module from source
+
+# Dependencies:
+#
+# - functions
+# - DEST, DATA_DIR must be defined
+
+# stack.sh
+# ---------
+# - install_os_brick
+
+# Save trace setting
+_XTRACE_OS_BRICK=$(set +o | grep xtrace)
+set +o xtrace
+
+
+GITDIR["os-brick"]=$DEST/os-brick
+
+# Install os_brick from git only if requested, otherwise it will be pulled from
+# pip repositories by requirements of projects that need it.
+function install_os_brick {
+    if use_library_from_git "os-brick"; then
+        git_clone_by_name "os-brick"
+        setup_dev_lib "os-brick"
+    fi
+}
+
+# Restore xtrace
+$_XTRACE_OS_BRICK
\ No newline at end of file
diff --git a/lib/oslo b/lib/oslo
index 1773da2..e34e48a 100644
--- a/lib/oslo
+++ b/lib/oslo
@@ -26,6 +26,8 @@
 GITDIR["cliff"]=$DEST/cliff
 GITDIR["debtcollector"]=$DEST/debtcollector
 GITDIR["futurist"]=$DEST/futurist
+GITDIR["os-client-config"]=$DEST/os-client-config
+GITDIR["osc-lib"]=$DEST/osc-lib
 GITDIR["oslo.cache"]=$DEST/oslo.cache
 GITDIR["oslo.concurrency"]=$DEST/oslo.concurrency
 GITDIR["oslo.config"]=$DEST/oslo.config
@@ -71,6 +73,8 @@
     _do_install_oslo_lib "cliff"
     _do_install_oslo_lib "debtcollector"
     _do_install_oslo_lib "futurist"
+    _do_install_oslo_lib "osc-lib"
+    _do_install_oslo_lib "os-client-config"
     _do_install_oslo_lib "oslo.cache"
     _do_install_oslo_lib "oslo.concurrency"
     _do_install_oslo_lib "oslo.config"
diff --git a/lib/placement b/lib/placement
new file mode 100644
index 0000000..165c670
--- /dev/null
+++ b/lib/placement
@@ -0,0 +1,193 @@
+#!/bin/bash
+#
+# lib/placement
+# Functions to control the configuration and operation of the **Placement** service
+#
+# Currently the placement service is embedded in nova. Eventually we
+# expect this to change so this file is started as a separate entity
+# despite making use of some *NOVA* variables and files.
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
+# - ``FILES``
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# - install_placement
+# - cleanup_placement
+# - configure_placement
+# - init_placement
+# - start_placement
+# - stop_placement
+
+# Save trace setting
+_XTRACE_LIB_PLACEMENT=$(set +o | grep xtrace)
+set +o xtrace
+
+# Defaults
+# --------
+
+PLACEMENT_CONF_DIR=/etc/nova
+PLACEMENT_CONF=$PLACEMENT_CONF_DIR/nova.conf
+PLACEMENT_AUTH_STRATEGY=${PLACEMENT_AUTH_STRATEGY:-placement}
+
+
+# The placement service can optionally use a separate database
+# connection. Set PLACEMENT_DB_ENABLED to True to use it.
+# NOTE(cdent): This functionality depends on some code that is not
+# yet merged in nova but is coming soon.
+PLACEMENT_DB_ENABLED=$(trueorfalse False PLACEMENT_DB_ENABLED)
+
+if is_ssl_enabled_service "placement-api" || is_service_enabled tls-proxy; then
+    PLACEMENT_SERVICE_PROTOCOL="https"
+fi
+
+# Public facing bits
+PLACEMENT_SERVICE_PROTOCOL=${PLACEMENT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
+PLACEMENT_SERVICE_HOST=${PLACEMENT_SERVICE_HOST:-$SERVICE_HOST}
+PLACEMENT_SERVICE_PORT=${PLACEMENT_SERVICE_PORT:-8778}
+
+# Functions
+# ---------
+
+# Test if any placement services are enabled
+# is_placement_enabled
+function is_placement_enabled {
+    [[ ,${ENABLED_SERVICES} =~ ,"placement-" ]] && return 0
+    return 1
+}
+
+# cleanup_placement() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_placement {
+    sudo rm -f $(apache_site_config_for placement-api)
+}
+
+# _config_placement_apache_wsgi() - Set WSGI config files
+function _config_placement_apache_wsgi {
+    local placement_api_apache_conf
+    local placement_api_port=$PLACEMENT_SERVICE_PORT
+    local venv_path=""
+    local nova_bin_dir=""
+    nova_bin_dir=$(get_python_exec_prefix)
+    placement_api_apache_conf=$(apache_site_config_for placement-api)
+
+    # reuse nova's cert if a cert is being used
+    if is_ssl_enabled_service "placement-api"; then
+        placement_ssl="SSLEngine On"
+        placement_certfile="SSLCertificateFile $NOVA_SSL_CERT"
+        placement_keyfile="SSLCertificateKeyFile $NOVA_SSL_KEY"
+    fi
+    # reuse nova's venv if there is one as placement code lives
+    # there
+    if [[ ${USE_VENV} = True ]]; then
+        venv_path="python-path=${PROJECT_VENV["nova"]}/lib/$(python_version)/site-packages"
+        nova_bin_dir=${PROJECT_VENV["nova"]}/bin
+    fi
+
+    sudo cp $FILES/apache-placement-api.template $placement_api_apache_conf
+    sudo sed -e "
+        s|%PUBLICPORT%|$placement_api_port|g;
+        s|%APACHE_NAME%|$APACHE_NAME|g;
+        s|%PUBLICWSGI%|$nova_bin_dir/nova-placement-api|g;
+        s|%SSLENGINE%|$placement_ssl|g;
+        s|%SSLCERTFILE%|$placement_certfile|g;
+        s|%SSLKEYFILE%|$placement_keyfile|g;
+        s|%USER%|$STACK_USER|g;
+        s|%VIRTUALENV%|$venv_path|g
+        s|%APIWORKERS%|$API_WORKERS|g
+    " -i $placement_api_apache_conf
+}
+
+# configure_placement() - Set config files, create data dirs, etc
+function configure_placement {
+    if [ "$PLACEMENT_DB_ENABLED" != False ]; then
+        iniset $PLACEMENT_CONF placement_database connection `database_connection_url placement`
+    fi
+
+    iniset $NOVA_CONF placement auth_type "password"
+    iniset $NOVA_CONF placement auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v3"
+    iniset $NOVA_CONF placement username placement
+    iniset $NOVA_CONF placement password "$SERVICE_PASSWORD"
+    iniset $NOVA_CONF placement user_domain_name "Default"
+    iniset $NOVA_CONF placement project_name "$SERVICE_TENANT_NAME"
+    iniset $NOVA_CONF placement project_domain_name "Default"
+    iniset $NOVA_CONF placement os_region_name "$REGION_NAME"
+    # TODO(cdent): auth_strategy, which is common to see in these
+    # blocks is not currently used here. For the time being the
+    # placement api uses the auth_strategy configuration setting
+    # established by the nova api. This avoids, for the time, being,
+    # creating redundant configuration items that are just used for
+    # testing.
+
+    _config_placement_apache_wsgi
+}
+
+# create_placement_accounts() - Set up required placement accounts
+# and service and endpoints.
+function create_placement_accounts {
+    create_service_user "placement" "admin"
+    local placement_api_url="$PLACEMENT_SERVICE_PROTOCOL://$PLACEMENT_SERVICE_HOST/placement"
+    get_or_create_service "placement" "placement" "Placement Service"
+    get_or_create_endpoint \
+        "placement" \
+        "$REGION_NAME" \
+        "$placement_api_url" \
+        "$placement_api_url" \
+        "$placement_api_url"
+}
+
+# init_placement() - Create service user and endpoints
+# If PLACEMENT_DB_ENABLED is true, create the separate placement db
+# using, for now, the api_db migrations.
+function init_placement {
+    if [ "$PLACEMENT_DB_ENABLED" != False ]; then
+        recreate_database placement
+        $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF api_db sync
+    fi
+    create_placement_accounts
+}
+
+# install_placement() - Collect source and prepare
+function install_placement {
+    install_apache_wsgi
+    if is_ssl_enabled_service "placement-api"; then
+        enable_mod_ssl
+    fi
+}
+
+# start_placement_api() - Start the API processes ahead of other things
+function start_placement_api {
+    # Get right service port for testing
+    local service_port=$PLACEMENT_SERVICE_PORT
+    local placement_api_port=$PLACEMENT_SERVICE_PORT
+
+    enable_apache_site placement-api
+    restart_apache_server
+    tail_log placement-api /var/log/$APACHE_NAME/placement-api.log
+
+    echo "Waiting for placement-api to start..."
+    if ! wait_for_service $SERVICE_TIMEOUT $PLACEMENT_SERVICE_PROTOCOL://$PLACEMENT_SERVICE_HOST/placement; then
+        die $LINENO "placement-api did not start"
+    fi
+}
+
+function start_placement {
+    start_placement_api
+}
+
+# stop_placement() - Disable the api service and stop it.
+function stop_placement {
+    disable_apache_site placement-api
+    restart_apache_server
+}
+
+# Restore xtrace
+$_XTRACE_LIB_PLACEMENT
+
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/rpc_backend b/lib/rpc_backend
index 0ee46dc..97b1aa4 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -24,6 +24,8 @@
 _XTRACE_RPC_BACKEND=$(set +o | grep xtrace)
 set +o xtrace
 
+RABBIT_USERID=${RABBIT_USERID:-stackrabbit}
+
 # Functions
 # ---------
 
diff --git a/lib/swift b/lib/swift
index 0c74411..f9ea028 100644
--- a/lib/swift
+++ b/lib/swift
@@ -806,7 +806,7 @@
     done
     if is_service_enabled tls-proxy; then
         local proxy_port=${SWIFT_DEFAULT_BIND_PORT}
-        start_tls_proxy '*' $proxy_port $SERVICE_HOST $SWIFT_DEFAULT_BIND_PORT_INT &
+        start_tls_proxy swift '*' $proxy_port $SERVICE_HOST $SWIFT_DEFAULT_BIND_PORT_INT
     fi
     run_process s-proxy "$SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v"
     if [[ ${SWIFT_REPLICAS} == 1 ]]; then
diff --git a/lib/tempest b/lib/tempest
index aa09e9a..a5dd531 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -15,7 +15,6 @@
 #   - ``SERVICE_HOST``
 #   - ``BASE_SQL_CONN`` ``lib/database`` declares
 #   - ``PUBLIC_NETWORK_NAME``
-#   - ``Q_ROUTER_NAME``
 #   - ``VIRT_DRIVER``
 #   - ``LIBVIRT_TYPE``
 #   - ``KEYSTONE_SERVICE_PROTOCOL``, ``KEYSTONE_SERVICE_HOST`` from lib/keystone
@@ -194,11 +193,11 @@
         available_flavors=$(nova flavor-list)
         if  [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then
             if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then
-                nova flavor-create m1.nano 42 64 0 1
+                openstack flavor create --id 42 --ram 64 --disk 0 --vcpus 1 m1.nano
             fi
             flavor_ref=42
             if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then
-                nova flavor-create m1.micro 84 128 0 1
+                openstack flavor create --id 84 --ram 128 --disk 0 --vcpus 1 m1.micro
             fi
             flavor_ref_alt=84
         else
@@ -236,13 +235,14 @@
         fi
     fi
 
+    iniset $TEMPEST_CONFIG network project_network_cidr $FIXED_RANGE
+
     ssh_connect_method=${TEMPEST_SSH_CONNECT_METHOD:-$ssh_connect_method}
 
     # the public network (for floating ip access) is only available
     # if the extension is enabled.
     if is_networking_extension_supported 'external-net'; then
-        public_network_id=$(neutron net-list | grep $PUBLIC_NETWORK_NAME | \
-            awk '{print $2}')
+        public_network_id=$(openstack network show -f value -c id $PUBLIC_NETWORK_NAME)
     fi
 
     iniset $TEMPEST_CONFIG DEFAULT use_syslog $SYSLOG
@@ -266,8 +266,7 @@
     if [[ "$TEMPEST_HAS_ADMIN" == "True" ]]; then
         iniset $TEMPEST_CONFIG auth admin_username $admin_username
         iniset $TEMPEST_CONFIG auth admin_password "$password"
-        iniset $TEMPEST_CONFIG auth admin_tenant_name $admin_project_name
-        iniset $TEMPEST_CONFIG auth admin_tenant_id $admin_project_id
+        iniset $TEMPEST_CONFIG auth admin_project_name $admin_project_name
         iniset $TEMPEST_CONFIG auth admin_domain_name $admin_domain_name
     fi
     if [ "$ENABLE_IDENTITY_V2" == "False" ]; then
@@ -295,7 +294,6 @@
     fi
     if [ "$VIRT_DRIVER" = "xenserver" ]; then
         iniset $TEMPEST_CONFIG image disk_formats "ami,ari,aki,vhd,raw,iso"
-        iniset $TEMPEST_CONFIG scenario img_disk_format vhd
     fi
 
     # Image Features
@@ -305,17 +303,12 @@
     fi
 
     # Compute
-    iniset $TEMPEST_CONFIG compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED
     iniset $TEMPEST_CONFIG compute image_ref $image_uuid
     iniset $TEMPEST_CONFIG compute image_ref_alt $image_uuid_alt
-    iniset $TEMPEST_CONFIG compute image_alt_ssh_user ${ALT_INSTANCE_USER:-cirros}
     iniset $TEMPEST_CONFIG compute flavor_ref $flavor_ref
     iniset $TEMPEST_CONFIG compute flavor_ref_alt $flavor_ref_alt
-    iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method
-    # set the equiv validation option here as well to ensure they are
-    # in sync. They shouldn't be separate options.
     iniset $TEMPEST_CONFIG validation connect_method $ssh_connect_method
-    if [[ ! $(is_service_enabled n-cell) && ! $(is_service_enabled neutron) ]]; then
+    if ! is_service_enabled n-cell && ! is_service_enabled neutron; then
         iniset $TEMPEST_CONFIG compute fixed_network_name $PRIVATE_NETWORK_NAME
     fi
 
@@ -352,19 +345,14 @@
         iniset $TEMPEST_CONFIG compute max_microversion $tempest_compute_max_microversion
     fi
 
+    # TODO(mriedem): Remove allow_port_security_disabled after liberty-eol.
+    iniset $TEMPEST_CONFIG compute-feature-enabled allow_port_security_disabled True
+    iniset $TEMPEST_CONFIG compute-feature-enabled personality ${ENABLE_FILE_INJECTION:-False}
     iniset $TEMPEST_CONFIG compute-feature-enabled resize True
     iniset $TEMPEST_CONFIG compute-feature-enabled live_migration ${LIVE_MIGRATION_AVAILABLE:-False}
     iniset $TEMPEST_CONFIG compute-feature-enabled change_password False
     iniset $TEMPEST_CONFIG compute-feature-enabled block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False}
-    # TODO(mriedem): Remove the preserve_ports flag when Juno is end of life.
-    iniset $TEMPEST_CONFIG compute-feature-enabled preserve_ports True
-    # TODO(gilliard): Remove the live_migrate_paused_instances flag when Juno is end of life.
-    iniset $TEMPEST_CONFIG compute-feature-enabled live_migrate_paused_instances True
     iniset $TEMPEST_CONFIG compute-feature-enabled attach_encrypted_volume ${ATTACH_ENCRYPTED_VOLUME_AVAILABLE:-True}
-    # TODO(mriedem): Remove this when kilo-eol happens since the
-    # neutron.allow_duplicate_networks option was removed from nova in Liberty
-    # and is now the default behavior.
-    iniset $TEMPEST_CONFIG compute-feature-enabled allow_duplicate_networks ${NOVA_ALLOW_DUPLICATE_NETWORKS:-True}
     if is_service_enabled n-cell; then
         # Cells doesn't support shelving/unshelving
         iniset $TEMPEST_CONFIG compute-feature-enabled shelve False
@@ -391,6 +379,7 @@
     iniset $TEMPEST_CONFIG network default_network "$FIXED_RANGE"
     iniset $TEMPEST_CONFIG network-feature-enabled ipv6 "$IPV6_ENABLED"
     iniset $TEMPEST_CONFIG network-feature-enabled ipv6_subnet_attributes "$IPV6_SUBNET_ATTRIBUTES_ENABLED"
+    iniset $TEMPEST_CONFIG network-feature-enabled port_security $NEUTRON_PORT_SECURITY
 
     # Orchestration Tests
     if is_service_enabled heat; then
@@ -407,7 +396,7 @@
             # build a specialized heat flavor
             available_flavors=$(nova flavor-list)
             if [[ ! ( $available_flavors =~ 'm1.heat' ) ]]; then
-                nova flavor-create m1.heat 451 512 0 1
+                openstack flavor create --id 451 --ram 512 --disk 0 --vcpus 1 m1.heat
             fi
             iniset $TEMPEST_CONFIG orchestration instance_type "m1.heat"
         fi
@@ -416,29 +405,36 @@
     fi
 
     # Scenario
-    SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES/images/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec}
+    if [ "$VIRT_DRIVER" = "xenserver" ]; then
+        SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES}
+        SCENARIO_IMAGE_FILE="cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.vhd.tgz"
+        iniset $TEMPEST_CONFIG scenario img_disk_format vhd
+        iniset $TEMPEST_CONFIG scenario img_container_format ovf
+    else
+        SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES/images/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec}
+        SCENARIO_IMAGE_FILE="cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img"
+    fi
     iniset $TEMPEST_CONFIG scenario img_dir $SCENARIO_IMAGE_DIR
+    iniset $TEMPEST_CONFIG scenario img_file $SCENARIO_IMAGE_FILE
     iniset $TEMPEST_CONFIG scenario ami_img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-blank.img"
     iniset $TEMPEST_CONFIG scenario ari_img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-initrd"
     iniset $TEMPEST_CONFIG scenario aki_img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-vmlinuz"
-    iniset $TEMPEST_CONFIG scenario img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img"
 
-    # Large Ops Number
-    iniset $TEMPEST_CONFIG scenario large_ops_number ${TEMPEST_LARGE_OPS_NUMBER:-0}
-
-    # Telemetry
-    iniset $TEMPEST_CONFIG telemetry-feature-enabled events "True"
-
+    # If using provider networking, use the physical network for validation rather than private
+    TEMPEST_SSH_NETWORK_NAME=$PRIVATE_NETWORK_NAME
+    if is_provider_network; then
+        TEMPEST_SSH_NETWORK_NAME=$PHYSICAL_NETWORK
+    fi
     # Validation
     iniset $TEMPEST_CONFIG validation run_validation ${TEMPEST_RUN_VALIDATION:-False}
     iniset $TEMPEST_CONFIG validation ip_version_for_ssh 4
     iniset $TEMPEST_CONFIG validation ssh_timeout $BUILD_TIMEOUT
     iniset $TEMPEST_CONFIG validation image_ssh_user ${DEFAULT_INSTANCE_USER:-cirros}
-    iniset $TEMPEST_CONFIG validation network_for_ssh $PRIVATE_NETWORK_NAME
+    iniset $TEMPEST_CONFIG validation network_for_ssh $TEMPEST_SSH_NETWORK_NAME
 
     # Volume
-    # TODO(obutenko): Remove the incremental_backup_force flag when Kilo and Juno is end of life.
-    iniset $TEMPEST_CONFIG volume-feature-enabled incremental_backup_force True
+    # TODO(obutenko): Remove snapshot_backup when liberty-eol happens.
+    iniset $TEMPEST_CONFIG volume-feature-enabled snapshot_backup True
     # TODO(ynesenenko): Remove the volume_services flag when Liberty and Kilo will correct work with host info.
     iniset $TEMPEST_CONFIG volume-feature-enabled volume_services True
     # TODO(ameade): Remove the api_v3 flag when Mitaka and Liberty are end of life.
@@ -462,15 +458,26 @@
     fi
 
     # Using ``CINDER_ENABLED_BACKENDS``
+    # Cinder uses a comma separated list with "type:backend_name":
+    #  CINDER_ENABLED_BACKENDS = ceph:cephBE1,lvm:lvmBE2,foo:my_foo
     if [[ -n "$CINDER_ENABLED_BACKENDS" ]] && [[ $CINDER_ENABLED_BACKENDS =~ .*,.* ]]; then
+        # We have at least 2 backends
         iniset $TEMPEST_CONFIG volume-feature-enabled multi_backend "True"
-        local i=1
+        local add_comma_seperator=0
+        local backends_list=''
         local be
+        # Tempest uses a comma separated list of backend_names:
+        #   backend_names = BACKEND_1,BACKEND_2
         for be in ${CINDER_ENABLED_BACKENDS//,/ }; do
-            local be_name=${be##*:}
-            iniset $TEMPEST_CONFIG volume "backend${i}_name" "$be_name"
-            i=$(( i + 1 ))
+            if [ "$add_comma_seperator" -eq "1" ]; then
+                backends_list+=,${be##*:}
+            else
+            # first element in the list
+                backends_list+=${be##*:}
+                add_comma_seperator=1
+            fi
         done
+        iniset $TEMPEST_CONFIG volume "backend_names" "$backends_list"
     fi
 
     if [ $TEMPEST_VOLUME_DRIVER != "default" -o \
@@ -493,6 +500,7 @@
         iniset $TEMPEST_CONFIG baremetal driver_enabled True
         iniset $TEMPEST_CONFIG baremetal unprovision_timeout $BUILD_TIMEOUT
         iniset $TEMPEST_CONFIG baremetal active_timeout $BUILD_TIMEOUT
+        iniset $TEMPEST_CONFIG baremetal deploywait_timeout $BUILD_TIMEOUT
         iniset $TEMPEST_CONFIG baremetal deploy_img_dir $FILES
         iniset $TEMPEST_CONFIG baremetal node_uuid $IRONIC_NODE_UUID
         iniset $TEMPEST_CONFIG compute-feature-enabled change_password False
@@ -570,16 +578,14 @@
     # Run ``verify_tempest_config -ur`` to retrieve enabled extensions on API endpoints
     # NOTE(mtreinish): This must be done after auth settings are added to the tempest config
     tox -evenv -- tempest verify-config -uro $tmp_cfg_file
-    # Nova API extensions
-    local compute_api_extensions=${COMPUTE_API_EXTENSIONS:-"all"}
-    if [[ ! -z "$DISABLE_COMPUTE_API_EXTENSIONS" ]]; then
-        # Enabled extensions are either the ones explicitly specified or those available on the API endpoint
-        compute_api_extensions=${COMPUTE_API_EXTENSIONS:-$(iniget $tmp_cfg_file compute-feature-enabled api_extensions | tr -d " ")}
-        # Remove disabled extensions
-        compute_api_extensions=$(remove_disabled_extensions $compute_api_extensions $DISABLE_COMPUTE_API_EXTENSIONS)
-    fi
-    iniset $TEMPEST_CONFIG compute-feature-enabled api_extensions $compute_api_extensions
+
     # Neutron API Extensions
+
+    # disable metering if we didn't enable the service
+    if ! is_service_enabled q-metering; then
+        DISABLE_NETWORK_API_EXTENSIONS+=", metering"
+    fi
+
     local network_api_extensions=${NETWORK_API_EXTENSIONS:-"all"}
     if [[ ! -z "$DISABLE_NETWORK_API_EXTENSIONS" ]]; then
         # Enabled extensions are either the ones explicitly specified or those available on the API endpoint
diff --git a/lib/tls b/lib/tls
index ca57ed4..14cdf19 100644
--- a/lib/tls
+++ b/lib/tls
@@ -16,7 +16,6 @@
 #
 # - configure_CA
 # - init_CA
-# - cleanup_CA
 
 # - configure_proxy
 # - start_tls_proxy
@@ -202,7 +201,6 @@
 # Create root and intermediate CAs
 # init_CA
 function init_CA {
-    fix_system_ca_bundle_path
     # Ensure CAs are built
     make_root_CA $ROOT_CA_DIR
     make_int_CA $INT_CA_DIR $ROOT_CA_DIR
@@ -221,26 +219,13 @@
     fi
 }
 
-# Clean up the CA files
-# cleanup_CA
-function cleanup_CA {
-    if is_fedora; then
-        sudo rm -f /usr/share/pki/ca-trust-source/anchors/devstack-chain.pem
-        sudo update-ca-trust
-    elif is_ubuntu; then
-        sudo rm -f /usr/local/share/ca-certificates/devstack-int.crt
-        sudo rm -f /usr/local/share/ca-certificates/devstack-root.crt
-        sudo update-ca-certificates
-    fi
-}
-
 # Create an initial server cert
 # init_cert
 function init_cert {
     if [[ ! -r $DEVSTACK_CERT ]]; then
         if [[ -n "$TLS_IP" ]]; then
             # Lie to let incomplete match routines work
-            TLS_IP="DNS:$TLS_IP"
+            TLS_IP="DNS:$TLS_IP,IP:$TLS_IP"
         fi
         make_cert $INT_CA_DIR $DEVSTACK_CERT_NAME $DEVSTACK_HOSTNAME "$TLS_IP"
 
@@ -263,6 +248,9 @@
         else
             alt_names="$alt_names,DNS:$SERVICE_HOST"
         fi
+        if is_ipv4_address "$SERVICE_HOST" ; then
+            alt_names="$alt_names,IP:$SERVICE_HOST"
+        fi
     fi
 
     # Only generate the certificate if it doesn't exist yet on the disk
@@ -336,15 +324,17 @@
     create_CA_base $ca_dir
     create_CA_config $ca_dir 'Root CA'
 
-    # Create a self-signed certificate valid for 5 years
-    $OPENSSL req -config $ca_dir/ca.conf \
-        -x509 \
-        -nodes \
-        -newkey rsa \
-        -days 21360 \
-        -keyout $ca_dir/private/cacert.key \
-        -out $ca_dir/cacert.pem \
-        -outform PEM
+    if [ ! -r "$ca_dir/cacert.pem" ]; then
+        # Create a self-signed certificate valid for 5 years
+        $OPENSSL req -config $ca_dir/ca.conf \
+            -x509 \
+            -nodes \
+            -newkey rsa \
+            -days 21360 \
+            -keyout $ca_dir/private/cacert.key \
+            -out $ca_dir/cacert.pem \
+            -outform PEM
+    fi
 }
 
 # If a non-system python-requests is installed then it will use the
@@ -455,27 +445,85 @@
 # Starts the TLS proxy for the given IP/ports
 # start_tls_proxy front-host front-port back-host back-port
 function start_tls_proxy {
-    local f_host=$1
-    local f_port=$2
-    local b_host=$3
-    local b_port=$4
+    local b_service="$1-tls-proxy"
+    local f_host=$2
+    local f_port=$3
+    local b_host=$4
+    local b_port=$5
 
-    stud $STUD_PROTO -f $f_host,$f_port -b $b_host,$b_port $DEVSTACK_CERT 2>/dev/null
+    local config_file
+    config_file=$(apache_site_config_for $b_service)
+    local listen_string
+    # Default apache configs on ubuntu and centos listen on 80 and 443
+    # newer apache seems fine with duplicate listen directive but older
+    # apache does not so special case 80 and 443.
+    if [[ "$f_port" == "80" ]] || [[ "$f_port" == "443" ]]; then
+        listen_string=""
+    elif [[ "$f_host" == '*' ]] ; then
+        listen_string="Listen $f_port"
+    else
+        listen_string="Listen $f_host:$f_port"
+    fi
+    sudo bash -c "cat >$config_file" << EOF
+$listen_string
+
+<VirtualHost $f_host:$f_port>
+    SSLEngine On
+    SSLCertificateFile $DEVSTACK_CERT
+
+    <Location />
+        ProxyPass http://$b_host:$b_port/ retry=5 nocanon
+        ProxyPassReverse http://$b_host:$b_port/
+    </Location>
+    ErrorLog $APACHE_LOG_DIR/tls-proxy_error.log
+    ErrorLogFormat "[%{u}t] [%-m:%l] [pid %P:tid %T] %7F: %E: [client\ %a] [frontend\ %A] %M% ,\ referer\ %{Referer}i"
+    LogLevel info
+    CustomLog $APACHE_LOG_DIR/tls-proxy_access.log common
+    LogFormat "%v %h %l %u %t \"%r\" %>s %b"
+</VirtualHost>
+EOF
+    for mod in ssl proxy proxy_http; do
+        enable_apache_mod $mod
+    done
+    enable_apache_site $b_service
+    # Only a reload is required to pull in new vhosts
+    # Note that a restart reliably fails on centos7 and trusty
+    # because apache can't open port 80 because the old apache
+    # still has it open. Using reload fixes trusty but centos7
+    # still doesn't work.
+    reload_apache_server
 }
 
+# Follow TLS proxy
+function follow_tls_proxy {
+    sudo touch /var/log/$APACHE_NAME/tls-proxy_error.log
+    tail_log tls-error /var/log/$APACHE_NAME/tls-proxy_error.log
+    sudo touch /var/log/$APACHE_NAME/tls-proxy_access.log
+    tail_log tls-proxy /var/log/$APACHE_NAME/tls-proxy_access.log
+}
 
 # Cleanup Functions
 # =================
 
-# Stops all stud processes. This should be done only after all services
+# Stops the apache service. This should be done only after all services
 # using tls configuration are down.
 function stop_tls_proxy {
-    killall stud
+    stop_apache_server
 }
 
-# Remove CA along with configuration, as well as the local server certificate
+# Clean up the CA files
+# cleanup_CA
 function cleanup_CA {
-    rm -rf "$DATA_DIR/CA" "$DEVSTACK_CERT"
+    if is_fedora; then
+        sudo rm -f /usr/share/pki/ca-trust-source/anchors/devstack-chain.pem
+        sudo update-ca-trust
+    elif is_ubuntu; then
+        sudo rm -f /usr/local/share/ca-certificates/devstack-int.crt
+        sudo rm -f /usr/local/share/ca-certificates/devstack-root.crt
+        sudo update-ca-certificates
+    fi
+
+    rm -rf "$INT_CA_DIR" "$ROOT_CA_DIR" "$DEVSTACK_CERT"
 }
 
 # Tell emacs to use shell-script-mode
diff --git a/samples/local.conf b/samples/local.conf
index 06ac185..6d5351f 100644
--- a/samples/local.conf
+++ b/samples/local.conf
@@ -10,7 +10,7 @@
 
 # This is a collection of some of the settings we have found to be useful
 # in our DevStack development environments. Additional settings are described
-# in http://devstack.org/local.conf.html
+# in http://docs.openstack.org/developer/devstack/configuration.html#local-conf
 # These should be considered as samples and are unsupported DevStack code.
 
 # The ``localrc`` section replaces the old ``localrc`` configuration file.
diff --git a/samples/local.sh b/samples/local.sh
index 634f6dd..9cd0bdc 100755
--- a/samples/local.sh
+++ b/samples/local.sh
@@ -36,7 +36,7 @@
     # Add first keypair found in localhost:$HOME/.ssh
     for i in $HOME/.ssh/id_rsa.pub $HOME/.ssh/id_dsa.pub; do
         if [[ -r $i ]]; then
-            nova keypair-add --pub_key=$i `hostname`
+            openstack keypair create --public-key $i `hostname`
             break
         fi
     done
@@ -53,8 +53,8 @@
     MI_NAME=m1.micro
 
     # Create micro flavor if not present
-    if [[ -z $(nova flavor-list | grep $MI_NAME) ]]; then
-        nova flavor-create $MI_NAME 6 128 0 1
+    if [[ -z $(openstack flavor list | grep $MI_NAME) ]]; then
+        openstack flavor create $MI_NAME --id 6 --ram 128 --disk 0 --vcpus 1
     fi
 
 
@@ -62,7 +62,7 @@
     # ----------
 
     # Add tcp/22 and icmp to default security group
-    nova secgroup-add-rule default tcp 22 22 0.0.0.0/0
-    nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0
+    openstack security group rule create --project $OS_PROJECT_NAME default --protocol tcp --ingress --dst-port 22
+    openstack security group rule create --project $OS_PROJECT_NAME default --protocol icmp
 
 fi
diff --git a/stack.sh b/stack.sh
index 6fbb0be..74edb10 100755
--- a/stack.sh
+++ b/stack.sh
@@ -27,6 +27,13 @@
 # Make sure custom grep options don't get in the way
 unset GREP_OPTIONS
 
+# Sanitize language settings to avoid commands bailing out
+# with "unsupported locale setting" errors.
+unset LANG
+unset LANGUAGE
+LC_ALL=C
+export LC_ALL
+
 # Make sure umask is sane
 umask 022
 
@@ -185,7 +192,7 @@
 
 # Warn users who aren't on an explicitly supported distro, but allow them to
 # override check and attempt installation with ``FORCE=yes ./stack``
-if [[ ! ${DISTRO} =~ (trusty|wily|xenial|7.0|wheezy|sid|testing|jessie|f22|f23|f24|rhel7|kvmibm1) ]]; then
+if [[ ! ${DISTRO} =~ (trusty|xenial|yakkety|7.0|wheezy|sid|testing|jessie|f23|f24|f25|rhel7|kvmibm1) ]]; then
     echo "WARNING: this script has not been tested on $DISTRO"
     if [[ "$FORCE" != "yes" ]]; then
         die $LINENO "If you wish to run this script anyway run with FORCE=yes"
@@ -562,6 +569,7 @@
 source $TOP_DIR/lib/keystone
 source $TOP_DIR/lib/glance
 source $TOP_DIR/lib/nova
+source $TOP_DIR/lib/placement
 source $TOP_DIR/lib/cinder
 source $TOP_DIR/lib/swift
 source $TOP_DIR/lib/heat
@@ -570,6 +578,7 @@
 source $TOP_DIR/lib/ldap
 source $TOP_DIR/lib/dstat
 source $TOP_DIR/lib/dlm
+source $TOP_DIR/lib/os_brick
 
 # Extras Source
 # --------------
@@ -655,7 +664,6 @@
 # Rabbit connection info
 # In multi node DevStack, second node needs ``RABBIT_USERID``, but rabbit
 # isn't enabled.
-RABBIT_USERID=${RABBIT_USERID:-stackrabbit}
 if is_service_enabled rabbit; then
     RABBIT_HOST=${RABBIT_HOST:-$SERVICE_HOST}
     read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT."
@@ -796,6 +804,18 @@
     install_heatclient
 fi
 
+# Install shared libraries
+if is_service_enabled cinder nova; then
+    install_os_brick
+fi
+
+# Setup TLS certs
+if is_service_enabled tls-proxy || [ "$USE_SSL" == "True" ]; then
+    configure_CA
+    init_CA
+    init_cert
+fi
+
 # Install middleware
 install_keystonemiddleware
 
@@ -837,7 +857,6 @@
 if is_service_enabled neutron; then
     # Network service
     stack_install_service neutron
-    install_neutron_third_party
 fi
 
 if is_service_enabled nova; then
@@ -847,6 +866,13 @@
     configure_nova
 fi
 
+if is_service_enabled placement; then
+    # placement api
+    stack_install_service placement
+    cleanup_placement
+    configure_placement
+fi
+
 if is_service_enabled horizon; then
     # django openstack_auth
     install_django_openstack_auth
@@ -862,14 +888,9 @@
 fi
 
 if is_service_enabled tls-proxy || [ "$USE_SSL" == "True" ]; then
-    configure_CA
-    init_CA
-    init_cert
-    # Add name to ``/etc/hosts``.
-    # Don't be naive and add to existing line!
+    fix_system_ca_bundle_path
 fi
 
-
 # Extras Install
 # --------------
 
@@ -974,6 +995,10 @@
     fi
     screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS"
     screen -r $SCREEN_NAME -X setenv PROMPT_COMMAND /bin/true
+
+    if is_service_enabled tls-proxy; then
+        follow_tls_proxy
+    fi
 fi
 
 # Clear ``screenrc`` file
@@ -1002,21 +1027,12 @@
 # Keystone
 # --------
 
-if is_service_enabled keystone; then
-    echo_summary "Starting Keystone"
-
-    if [ "$KEYSTONE_AUTH_HOST" == "$SERVICE_HOST" ]; then
-        init_keystone
-        start_keystone
-        bootstrap_keystone
-    fi
-
-    # Rather than just export these, we write them out to a
-    # intermediate userrc file that can also be used to debug if
-    # something goes wrong between here and running
-    # tools/create_userrc.sh (this script relies on services other
-    # than keystone being available, so we can't call it right now)
-    cat > $TOP_DIR/userrc_early <<EOF
+# Rather than just export these, we write them out to a
+# intermediate userrc file that can also be used to debug if
+# something goes wrong between here and running
+# tools/create_userrc.sh (this script relies on services other
+# than keystone being available, so we can't call it right now)
+cat > $TOP_DIR/userrc_early <<EOF
 # Use this for debugging issues before files in accrc are created
 
 # Set up password auth credentials now that Keystone is bootstrapped
@@ -1031,11 +1047,21 @@
 
 EOF
 
-    if is_service_enabled tls-proxy; then
-        echo "export OS_CACERT=$INT_CA_DIR/ca-chain.pem" >> $TOP_DIR/userrc_early
-    fi
+if is_service_enabled tls-proxy; then
+    echo "export OS_CACERT=$INT_CA_DIR/ca-chain.pem" >> $TOP_DIR/userrc_early
+    start_tls_proxy http-services '*' 443 $SERVICE_HOST 80
+fi
 
-    source $TOP_DIR/userrc_early
+source $TOP_DIR/userrc_early
+
+if is_service_enabled keystone; then
+    echo_summary "Starting Keystone"
+
+    if [ "$KEYSTONE_AUTH_HOST" == "$SERVICE_HOST" ]; then
+        init_keystone
+        start_keystone
+        bootstrap_keystone
+    fi
 
     create_keystone_accounts
     create_nova_accounts
@@ -1087,15 +1113,6 @@
     fi
 fi
 
-# Some Neutron plugins require network controllers which are not
-# a part of the OpenStack project. Configure and start them.
-if is_service_enabled neutron; then
-    configure_neutron_third_party
-    init_neutron_third_party
-    start_neutron_third_party
-fi
-
-
 # Nova
 # ----
 
@@ -1157,6 +1174,11 @@
     init_nova_cells
 fi
 
+if is_service_enabled placement; then
+    echo_summary "Configuring placement"
+    init_placement
+fi
+
 
 # Extras Configuration
 # ====================
@@ -1205,11 +1227,6 @@
 
     echo_summary "Uploading images"
 
-    # Option to upload legacy ami-tty, which works with xenserver
-    if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then
-        IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz"
-    fi
-
     for image_url in ${IMAGE_URLS//,/ }; do
         upload_image $image_url
     done
@@ -1229,11 +1246,9 @@
 if is_service_enabled neutron-api; then
     echo_summary "Starting Neutron"
     start_neutron_api
-    # check_neutron_third_party_integration
 elif is_service_enabled q-svc; then
     echo_summary "Starting Neutron"
     start_neutron_service_and_check
-    check_neutron_third_party_integration
 elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then
     NM_CONF=${NOVA_CONF}
     if is_service_enabled n-cell; then
@@ -1264,6 +1279,10 @@
     start_nova
     create_flavors
 fi
+if is_service_enabled placement; then
+    echo_summary "Starting Placement"
+    start_placement
+fi
 if is_service_enabled cinder; then
     echo_summary "Starting Cinder"
     start_cinder
@@ -1362,6 +1381,14 @@
 check_libs_from_git
 
 
+# Configure nova cellsv2
+# ----------------------
+
+# Do this late because it requires compute hosts to have started
+if is_service_enabled n-api && [ "$NOVA_CONFIGURE_CELLSV2" == "True" ]; then
+    create_cell
+fi
+
 # Bash completion
 # ===============
 
diff --git a/stackrc b/stackrc
index acb7d3f..b5018de 100644
--- a/stackrc
+++ b/stackrc
@@ -7,13 +7,6 @@
 [[ -z "$_DEVSTACK_STACKRC" ]] || return 0
 declare -r _DEVSTACK_STACKRC=1
 
-# Sanitize language settings to avoid commands bailing out
-# with "unsupported locale setting" errors.
-unset LANG
-unset LANGUAGE
-LC_ALL=C
-export LC_ALL
-
 # Find the other rc files
 RC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd)
 
@@ -51,30 +44,22 @@
 # Specify which services to launch.  These generally correspond to
 # screen tabs. To change the default list, use the ``enable_service`` and
 # ``disable_service`` functions in ``local.conf``.
-# For example, to enable Swift add this to ``local.conf``:
-#  enable_service s-proxy s-object s-container s-account
-# In order to enable Neutron (a single node setup) add the following
+# For example, to enable Swift as part of DevStack add the following
 # settings in ``local.conf``:
 #  [[local|localrc]]
-#  disable_service n-net
-#  enable_service q-svc
-#  enable_service q-agt
-#  enable_service q-dhcp
-#  enable_service q-l3
-#  enable_service q-meta
-#  # Optional, to enable tempest configuration as part of DevStack
-#  enable_service tempest
-
+#  enable_service s-proxy s-object s-container s-account
 # This allows us to pass ``ENABLED_SERVICES``
 if ! isset ENABLED_SERVICES ; then
     # Keystone - nothing works without keystone
     ENABLED_SERVICES=key
     # Nova - services to support libvirt based openstack clouds
-    ENABLED_SERVICES+=,n-api,n-cpu,n-net,n-cond,n-sch,n-novnc,n-cauth
+    ENABLED_SERVICES+=,n-api,n-cpu,n-cond,n-sch,n-novnc,n-cauth
     # Glance services needed for Nova
     ENABLED_SERVICES+=,g-api,g-reg
     # Cinder
     ENABLED_SERVICES+=,c-sch,c-api,c-vol
+    # Neutron
+    ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt,q-l3
     # Dashboard
     ENABLED_SERVICES+=,horizon
     # Additional services
@@ -264,10 +249,6 @@
 NEUTRON_FWAAS_REPO=${NEUTRON_FWAAS_REPO:-${GIT_BASE}/openstack/neutron-fwaas.git}
 NEUTRON_FWAAS_BRANCH=${NEUTRON_FWAAS_BRANCH:-master}
 
-# neutron lbaas service
-NEUTRON_LBAAS_REPO=${NEUTRON_LBAAS_REPO:-${GIT_BASE}/openstack/neutron-lbaas.git}
-NEUTRON_LBAAS_BRANCH=${NEUTRON_LBAAS_BRANCH:-master}
-
 # compute service
 NOVA_REPO=${NOVA_REPO:-${GIT_BASE}/openstack/nova.git}
 NOVA_BRANCH=${NOVA_BRANCH:-master}
@@ -506,10 +487,19 @@
 GITREPO["os-brick"]=${OS_BRICK_REPO:-${GIT_BASE}/openstack/os-brick.git}
 GITBRANCH["os-brick"]=${OS_BRICK_BRANCH:-master}
 
+# os-client-config to manage clouds.yaml and friends
+GITREPO["os-client-config"]=${OS_CLIENT_CONFIG_REPO:-${GIT_BASE}/openstack/os-client-config.git}
+GITBRANCH["os-client-config"]=${OS_CLIENT_CONFIG_BRANCH:-master}
+GITDIR["os-client-config"]=$DEST/os-client-config
+
 # os-vif library to communicate between Neutron to Nova
 GITREPO["os-vif"]=${OS_VIF_REPO:-${GIT_BASE}/openstack/os-vif.git}
 GITBRANCH["os-vif"]=${OS_VIF_BRANCH:-master}
 
+# osc-lib OpenStackClient common lib
+GITREPO["osc-lib"]=${OSC_LIB_REPO:-${GIT_BASE}/openstack/osc-lib.git}
+GITBRANCH["osc-lib"]=${OSC_LIB_BRANCH:-master}
+
 # ironic common lib
 GITREPO["ironic-lib"]=${IRONIC_LIB_REPO:-${GIT_BASE}/openstack/ironic-lib.git}
 GITBRANCH["ironic-lib"]=${IRONIC_LIB_BRANCH:-master}
@@ -586,6 +576,12 @@
             LIBVIRT_GROUP=libvirtd
         fi
         ;;
+    lxd)
+        LXD_GROUP=${LXD_GROUP:-"lxd"}
+        ;;
+    docker)
+        DOCKER_GROUP=${DOCKER_GROUP:-"docker"}
+        ;;
     fake)
         NUMBER_FAKE_NOVA_COMPUTE=${NUMBER_FAKE_NOVA_COMPUTE:-1}
         ;;
@@ -710,6 +706,8 @@
 PRIVATE_NETWORK_NAME=${PRIVATE_NETWORK_NAME:-"private"}
 PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-"public"}
 
+PUBLIC_INTERFACE=${PUBLIC_INTERFACE:-""}
+
 # Set default screen name
 SCREEN_NAME=${SCREEN_NAME:-stack}
 
@@ -760,7 +758,8 @@
 # Note that setting ``FIXED_RANGE`` may be necessary when running DevStack
 # in an OpenStack cloud that uses either of these address ranges internally.
 FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24}
-FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24}
+IPV4_ADDRS_SAFE_TO_USE=${IPV4_ADDRS_SAFE_TO_USE:-10.0.0.0/22}
+FIXED_RANGE=${FIXED_RANGE:-$IPV4_ADDRS_SAFE_TO_USE}
 FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256}
 HOST_IP_IFACE=${HOST_IP_IFACE:-}
 HOST_IP=${HOST_IP:-}
@@ -773,6 +772,9 @@
 
 HOST_IPV6=$(get_default_host_ip "" "" "$HOST_IP_IFACE" "$HOST_IPV6" "inet6")
 
+# Whether or not the port_security extension should be enabled for Neutron.
+NEUTRON_PORT_SECURITY=$(trueorfalse True NEUTRON_PORT_SECURITY)
+
 # SERVICE IP version
 # This is the IP version that services should be listening on, as well
 # as using to register their endpoints with keystone.
diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh
index bb58088..fb55023 100755
--- a/tests/test_libs_from_pypi.sh
+++ b/tests/test_libs_from_pypi.sh
@@ -36,8 +36,8 @@
 ALL_LIBS+=" python-cinderclient glance_store oslo.concurrency oslo.db"
 ALL_LIBS+=" oslo.versionedobjects oslo.vmware keystonemiddleware"
 ALL_LIBS+=" oslo.serialization django_openstack_auth"
-ALL_LIBS+=" python-openstackclient oslo.rootwrap oslo.i18n"
-ALL_LIBS+=" oslo.utils python-swiftclient"
+ALL_LIBS+=" python-openstackclient osc-lib os-client-config oslo.rootwrap"
+ALL_LIBS+=" oslo.i18n oslo.utils python-swiftclient"
 ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy"
 ALL_LIBS+=" debtcollector os-brick automaton futurist oslo.service"
 ALL_LIBS+=" oslo.cache oslo.reports osprofiler"
diff --git a/tests/test_localconf.sh b/tests/test_localconf.sh
new file mode 100755
index 0000000..d8075df
--- /dev/null
+++ b/tests/test_localconf.sh
@@ -0,0 +1,475 @@
+#!/usr/bin/env bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied. See the License for the specific language governing
+# permissions and limitations under the License.
+
+# Tests for DevStack INI functions
+
+TOP=$(cd $(dirname "$0")/.. && pwd)
+
+# Import config functions
+source $TOP/inc/ini-config
+
+source $TOP/tests/unittest.sh
+
+echo "Testing INI local.conf functions"
+
+# test that can determine if file has section in specified meta-section
+
+function test_localconf_has_section {
+    local file_localconf
+    local file_conf1
+    local file_conf2
+    file_localconf=`mktemp`
+    file_conf1=`mktemp`
+    file_conf2=`mktemp`
+
+    cat <<- EOF > $file_localconf
+[[local|localrc]]
+LOCALRC_VAR1=localrc_val1
+LOCALRC_VAR2=localrc_val2
+LOCALRC_VAR3=localrc_val3
+
+[[post-config|$file_conf1]]
+[conf1_t1]
+conf1_t1_opt1=conf1_t1_val1
+conf1_t1_opt2=conf1_t1_val2
+conf1_t1_opt3=conf1_t1_val3
+[conf1_t2]
+conf1_t2_opt1=conf1_t2_val1
+conf1_t2_opt2=conf1_t2_val2
+conf1_t2_opt3=conf1_t2_val3
+[conf1_t3]
+conf1_t3_opt1=conf1_t3_val1
+conf1_t3_opt2=conf1_t3_val2
+conf1_t3_opt3=conf1_t3_val3
+
+[[post-extra|$file_conf2]]
+[conf2_t1]
+conf2_t1_opt1=conf2_t1_val1
+conf2_t1_opt2=conf2_t1_val2
+conf2_t1_opt3=conf2_t1_val3
+EOF
+
+    localconf_has_section $file_localconf post-config $file_conf1 conf1_t1
+    assert_equal $? 0
+    localconf_has_section $file_localconf post-config $file_conf1 conf1_t2
+    assert_equal $? 0
+    localconf_has_section $file_localconf post-config $file_conf1 conf1_t3
+    assert_equal $? 0
+    localconf_has_section $file_localconf post-extra $file_conf2 conf2_t1
+    assert_equal $? 0
+    localconf_has_section $file_localconf post-config $file_conf1 conf1_t4
+    assert_equal $? 1
+    localconf_has_section $file_localconf post-install $file_conf1 conf1_t1
+    assert_equal $? 1
+    localconf_has_section $file_localconf local localrc conf1_t2
+    assert_equal $? 1
+    rm -f $file_localconf $file_conf1 $file_conf2
+}
+
+# test that can determine if file has option in specified meta-section and section
+function test_localconf_has_option {
+    local file_localconf
+    local file_conf1
+    local file_conf2
+    file_localconf=`mktemp`
+    file_conf1=`mktemp`
+    file_conf2=`mktemp`
+    cat <<- EOF > $file_localconf
+[[post-config|$file_conf1]]
+[conf1_t1]
+conf1_t1_opt1 = conf1_t1_val1
+conf1_t1_opt2 = conf1_t1_val2
+conf1_t1_opt3 = conf1_t1_val3
+[conf1_t2]
+conf1_t2_opt1=conf1_t2_val1
+conf1_t2_opt2=conf1_t2_val2
+conf1_t2_opt3=conf1_t2_val3
+[conf1_t3]
+conf1_t3_opt1=conf1_t3_val1
+conf1_t3_opt2=conf1_t3_val2
+conf1_t3_opt3=conf1_t3_val3
+
+[[local|localrc]]
+LOCALRC_VAR1=localrc_val1
+LOCALRC_VAR2=localrc_val2
+LOCALRC_VAR3=localrc_val3
+
+[[post-extra|$file_conf2]]
+[conf2_t1]
+conf2_t1_opt1=conf2_t1_val1
+conf2_t1_opt2=conf2_t1_val2
+conf2_t1_opt3=conf2_t1_val3
+EOF
+
+    localconf_has_option $file_localconf local localrc "" LOCALRC_VAR1
+    assert_equal $? 0
+    localconf_has_option $file_localconf local localrc "" LOCALRC_VAR2
+    assert_equal $? 0
+    localconf_has_option $file_localconf local localrc "" LOCALRC_VAR3
+    assert_equal $? 0
+    localconf_has_option $file_localconf post-config $file_conf1 conf1_t1 conf1_t1_opt1
+    assert_equal $? 0
+    localconf_has_option $file_localconf post-config $file_conf1 conf1_t2 conf1_t2_opt2
+    assert_equal $? 0
+    localconf_has_option $file_localconf post-config $file_conf1 conf1_t3 conf1_t3_opt3
+    assert_equal $? 0
+    localconf_has_option $file_localconf post-extra $file_conf2 conf2_t1 conf2_t1_opt2
+    assert_equal $? 0
+    localconf_has_option $file_localconf post-config $file_conf1 conf1_t1_opt4
+    assert_equal $? 1
+    localconf_has_option $file_localconf post-install $file_conf1 conf1_t1_opt1
+    assert_equal $? 1
+    localconf_has_option $file_localconf local localrc conf1_t2 conf1_t2_opt1
+    assert_equal $? 1
+    rm -f $file_localconf $file_conf1 $file_conf2
+}
+
+# test that update option in specified meta-section and section
+function test_localconf_update_option {
+    local file_localconf
+    local file_localconf_expected
+    local file_conf1
+    local file_conf2
+    file_localconf=`mktemp`
+    file_localconf_expected=`mktemp`
+    file_conf1=`mktemp`
+    file_conf2=`mktemp`
+    cat <<- EOF > $file_localconf
+[[local|localrc]]
+LOCALRC_VAR1 = localrc_val1
+LOCALRC_VAR2 = localrc_val2
+LOCALRC_VAR3 = localrc_val3
+
+[[post-config|$file_conf1]]
+[conf1_t1]
+conf1_t1_opt1=conf1_t1_val1
+conf1_t1_opt2=conf1_t1_val2
+conf1_t1_opt3=conf1_t1_val3
+[conf1_t2]
+conf1_t2_opt1=conf1_t2_val1
+conf1_t2_opt2=conf1_t2_val2
+conf1_t2_opt3=conf1_t2_val3
+[conf1_t3]
+conf1_t3_opt1=conf1_t3_val1
+conf1_t3_opt2=conf1_t3_val2
+conf1_t3_opt3=conf1_t3_val3
+
+[[post-extra|$file_conf2]]
+[conf2_t1]
+conf2_t1_opt1=conf2_t1_val1
+conf2_t1_opt2=conf2_t1_val2
+conf2_t1_opt3=conf2_t1_val3
+EOF
+    cat <<- EOF > $file_localconf_expected
+[[local|localrc]]
+LOCALRC_VAR1 = localrc_val1
+LOCALRC_VAR2 = localrc_val2_update
+LOCALRC_VAR3 = localrc_val3
+
+[[post-config|$file_conf1]]
+[conf1_t1]
+conf1_t1_opt1=conf1_t1_val1_update
+conf1_t1_opt2=conf1_t1_val2
+conf1_t1_opt3=conf1_t1_val3
+[conf1_t2]
+conf1_t2_opt1=conf1_t2_val1
+conf1_t2_opt2=conf1_t2_val2_update
+conf1_t2_opt3=conf1_t2_val3
+[conf1_t3]
+conf1_t3_opt1=conf1_t3_val1
+conf1_t3_opt2=conf1_t3_val2
+conf1_t3_opt3=conf1_t3_val3_update
+
+[[post-extra|$file_conf2]]
+[conf2_t1]
+conf2_t1_opt1=conf2_t1_val1
+conf2_t1_opt2=conf2_t1_val2
+conf2_t1_opt3=conf2_t1_val3_update
+EOF
+
+    localconf_update_option "$SUDO" $file_localconf local localrc "" LOCALRC_VAR2 localrc_val2_update
+    localconf_update_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t1 conf1_t1_opt1 conf1_t1_val1_update
+    localconf_update_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t2 conf1_t2_opt2 conf1_t2_val2_update
+    localconf_update_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t3 conf1_t3_opt3 conf1_t3_val3_update
+    localconf_update_option "$SUDO" $file_localconf post-extra $file_conf2 conf2_t1 conf2_t1_opt3 conf2_t1_val3_update
+    result=`cat $file_localconf`
+    result_expected=`cat $file_localconf_expected`
+    assert_equal "$result" "$result_expected"
+    localconf_update_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t2 conf1_t3_opt1 conf1_t3_val1_update
+    localconf_update_option "$SUDO" $file_localconf post-extra $file_conf2 conf2_t1 conf2_t1_opt4 conf2_t1_val4_update
+    localconf_update_option "$SUDO" $file_localconf post-install $file_conf2 conf2_t1 conf2_t1_opt1 conf2_t1_val1_update
+    localconf_update_option "$SUDO" $file_localconf local localrc "" LOCALRC_VAR4 localrc_val4_update
+    result=`cat $file_localconf`
+    result_expected=`cat $file_localconf_expected`
+    assert_equal "$result" "$result_expected"
+    rm -f $file_localconf $file_localconf_expected $file_conf1 $file_conf2
+}
+
+# test that add option in specified meta-section and section
+function test_localconf_add_option {
+    local file_localconf
+    local file_localconf_expected
+    local file_conf1
+    local file_conf2
+    file_localconf=`mktemp`
+    file_localconf_expected=`mktemp`
+    file_conf1=`mktemp`
+    file_conf2=`mktemp`
+    cat <<- EOF > $file_localconf
+[[post-config|$file_conf1]]
+[conf1_t1]
+conf1_t1_opt1=conf1_t1_val1
+conf1_t1_opt2=conf1_t1_val2
+conf1_t1_opt3=conf1_t1_val3
+[conf1_t2]
+conf1_t2_opt1=conf1_t2_val1
+conf1_t2_opt2=conf1_t2_val2
+conf1_t2_opt3=conf1_t2_val3
+[conf1_t3]
+conf1_t3_opt1=conf1_t3_val1
+conf1_t3_opt2=conf1_t3_val2
+conf1_t3_opt3=conf1_t3_val3
+
+[[local|localrc]]
+LOCALRC_VAR1=localrc_val1
+LOCALRC_VAR2=localrc_val2
+LOCALRC_VAR3=localrc_val3
+
+[[post-extra|$file_conf2]]
+[conf2_t1]
+conf2_t1_opt1 = conf2_t1_val1
+conf2_t1_opt2 = conf2_t1_val2
+conf2_t1_opt3 = conf2_t1_val3
+EOF
+    cat <<- EOF > $file_localconf_expected
+[[post-config|$file_conf1]]
+[conf1_t1]
+conf1_t1_opt4 = conf1_t1_val4
+conf1_t1_opt1=conf1_t1_val1
+conf1_t1_opt2=conf1_t1_val2
+conf1_t1_opt3=conf1_t1_val3
+[conf1_t2]
+conf1_t2_opt4 = conf1_t2_val4
+conf1_t2_opt1=conf1_t2_val1
+conf1_t2_opt2=conf1_t2_val2
+conf1_t2_opt3=conf1_t2_val3
+[conf1_t3]
+conf1_t3_opt4 = conf1_t3_val4
+conf1_t3_opt1=conf1_t3_val1
+conf1_t3_opt2=conf1_t3_val2
+conf1_t3_opt3=conf1_t3_val3
+
+[[local|localrc]]
+LOCALRC_VAR4 = localrc_val4
+LOCALRC_VAR1=localrc_val1
+LOCALRC_VAR2=localrc_val2
+LOCALRC_VAR3=localrc_val3
+
+[[post-extra|$file_conf2]]
+[conf2_t1]
+conf2_t1_opt4 = conf2_t1_val4
+conf2_t1_opt1 = conf2_t1_val1
+conf2_t1_opt2 = conf2_t1_val2
+conf2_t1_opt3 = conf2_t1_val3
+EOF
+
+    localconf_add_option "$SUDO" $file_localconf local localrc "" LOCALRC_VAR4 localrc_val4
+    localconf_add_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t1 conf1_t1_opt4 conf1_t1_val4
+    localconf_add_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t2 conf1_t2_opt4 conf1_t2_val4
+    localconf_add_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t3 conf1_t3_opt4 conf1_t3_val4
+    localconf_add_option "$SUDO" $file_localconf post-extra $file_conf2 conf2_t1 conf2_t1_opt4 conf2_t1_val4
+    result=`cat $file_localconf`
+    result_expected=`cat $file_localconf_expected`
+    assert_equal "$result" "$result_expected"
+    localconf_add_option "$SUDO" $file_localconf local localrc.conf "" LOCALRC_VAR4 localrc_val4_update
+    localconf_add_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t4 conf1_t4_opt1 conf1_t4_val1
+    localconf_add_option "$SUDO" $file_localconf post-extra $file_conf2 conf2_t2 conf2_t2_opt4 conf2_t2_val4
+    localconf_add_option "$SUDO" $file_localconf post-install $file_conf2 conf2_t1 conf2_t1_opt4 conf2_t2_val4
+    result=`cat $file_localconf`
+    result_expected=`cat $file_localconf_expected`
+    assert_equal "$result" "$result_expected"
+    rm -f $file_localconf $file_localconf_expected $file_conf1 $file_conf2
+}
+
+# test that add section and option in specified meta-section
+function test_localconf_add_section_and_option {
+    local file_localconf
+    local file_localconf_expected
+    local file_conf1
+    local file_conf2
+    file_localconf=`mktemp`
+    file_localconf_expected=`mktemp`
+    file_conf1=`mktemp`
+    file_conf2=`mktemp`
+    cat <<- EOF > $file_localconf
+[[post-config|$file_conf1]]
+[conf1_t1]
+conf1_t1_opt1=conf1_t1_val1
+conf1_t1_opt2=conf1_t1_val2
+conf1_t1_opt3=conf1_t1_val3
+[conf1_t2]
+conf1_t2_opt1=conf1_t2_val1
+conf1_t2_opt2=conf1_t2_val2
+conf1_t2_opt3=conf1_t2_val3
+[conf1_t3]
+conf1_t3_opt1=conf1_t3_val1
+conf1_t3_opt2=conf1_t3_val2
+conf1_t3_opt3=conf1_t3_val3
+
+[[local|localrc]]
+LOCALRC_VAR1=localrc_val1
+LOCALRC_VAR2=localrc_val2
+LOCALRC_VAR3=localrc_val3
+
+[[post-extra|$file_conf2]]
+[conf2_t1]
+conf2_t1_opt1=conf2_t1_val1
+conf2_t1_opt2=conf2_t1_val2
+conf2_t1_opt3=conf2_t1_val3
+EOF
+    cat <<- EOF > $file_localconf_expected
+[[post-config|$file_conf1]]
+[conf1_t4]
+conf1_t4_opt1 = conf1_t4_val1
+[conf1_t1]
+conf1_t1_opt1=conf1_t1_val1
+conf1_t1_opt2=conf1_t1_val2
+conf1_t1_opt3=conf1_t1_val3
+[conf1_t2]
+conf1_t2_opt1=conf1_t2_val1
+conf1_t2_opt2=conf1_t2_val2
+conf1_t2_opt3=conf1_t2_val3
+[conf1_t3]
+conf1_t3_opt1=conf1_t3_val1
+conf1_t3_opt2=conf1_t3_val2
+conf1_t3_opt3=conf1_t3_val3
+
+[[local|localrc]]
+LOCALRC_VAR1=localrc_val1
+LOCALRC_VAR2=localrc_val2
+LOCALRC_VAR3=localrc_val3
+
+[[post-extra|$file_conf2]]
+[conf2_t2]
+conf2_t2_opt1 = conf2_t2_val1
+[conf2_t1]
+conf2_t1_opt1=conf2_t1_val1
+conf2_t1_opt2=conf2_t1_val2
+conf2_t1_opt3=conf2_t1_val3
+EOF
+
+    localconf_add_section_and_option "$SUDO" $file_localconf post-config $file_conf1 conf1_t4 conf1_t4_opt1 conf1_t4_val1
+    localconf_add_section_and_option "$SUDO" $file_localconf post-extra $file_conf2 conf2_t2 conf2_t2_opt1 conf2_t2_val1
+    result=`cat $file_localconf`
+    result_expected=`cat $file_localconf_expected`
+    assert_equal "$result" "$result_expected"
+    localconf_add_section_and_option "$SUDO" $file_localconf post-install $file_conf2 conf2_t2 conf2_t2_opt1 conf2_t2_val1
+    result=`cat $file_localconf`
+    result_expected=`cat $file_localconf_expected`
+    assert_equal "$result" "$result_expected"
+    rm -f $file_localconf $file_localconf_expected $file_conf1 $file_conf2
+}
+
+# test that add section and option in specified meta-section
+function test_localconf_set {
+    local file_localconf
+    local file_localconf_expected
+    local file_conf1
+    local file_conf2
+    file_localconf=`mktemp`
+    file_localconf_expected=`mktemp`
+    file_conf1=`mktemp`
+    file_conf2=`mktemp`
+    cat <<- EOF > $file_localconf
+[[local|localrc]]
+LOCALRC_VAR1=localrc_val1
+LOCALRC_VAR2=localrc_val2
+LOCALRC_VAR3=localrc_val3
+
+[[post-config|$file_conf1]]
+[conf1_t1]
+conf1_t1_opt1=conf1_t1_val1
+conf1_t1_opt2=conf1_t1_val2
+conf1_t1_opt3=conf1_t1_val3
+[conf1_t2]
+conf1_t2_opt1=conf1_t2_val1
+conf1_t2_opt2=conf1_t2_val2
+conf1_t2_opt3=conf1_t2_val3
+[conf1_t3]
+conf1_t3_opt1=conf1_t3_val1
+conf1_t3_opt2=conf1_t3_val2
+conf1_t3_opt3=conf1_t3_val3
+
+[[post-extra|$file_conf2]]
+[conf2_t1]
+conf2_t1_opt1=conf2_t1_val1
+conf2_t1_opt2=conf2_t1_val2
+conf2_t1_opt3=conf2_t1_val3
+EOF
+    cat <<- EOF > $file_localconf_expected
+[[local|localrc]]
+LOCALRC_VAR1=localrc_val1
+LOCALRC_VAR2=localrc_val2_update
+LOCALRC_VAR3=localrc_val3
+
+[[post-config|$file_conf1]]
+[conf1_t4]
+conf1_t4_opt1 = conf1_t4_val1
+[conf1_t1]
+conf1_t1_opt1=conf1_t1_val1
+conf1_t1_opt2=conf1_t1_val2
+conf1_t1_opt3=conf1_t1_val3
+[conf1_t2]
+conf1_t2_opt1=conf1_t2_val1
+conf1_t2_opt2=conf1_t2_val2
+conf1_t2_opt3=conf1_t2_val3
+[conf1_t3]
+conf1_t3_opt1=conf1_t3_val1
+conf1_t3_opt2=conf1_t3_val2
+conf1_t3_opt3=conf1_t3_val3
+
+[[post-extra|$file_conf2]]
+[conf2_t1]
+conf2_t1_opt4 = conf2_t1_val4
+conf2_t1_opt1=conf2_t1_val1
+conf2_t1_opt2=conf2_t1_val2
+conf2_t1_opt3=conf2_t1_val3
+
+[[post-install|/etc/neutron/plugin/ml2/ml2_conf.ini]]
+[ml2]
+ml2_opt1 = ml2_val1
+EOF
+
+    if [[ -n "$SUDO" ]]; then
+        SUDO_ARG="-sudo"
+    else
+        SUDO_ARG=""
+    fi
+    localconf_set $SUDO_ARG $file_localconf post-install /etc/neutron/plugin/ml2/ml2_conf.ini ml2 ml2_opt1 ml2_val1
+    localconf_set $SUDO_ARG $file_localconf local localrc "" LOCALRC_VAR2 localrc_val2_update
+    localconf_set $SUDO_ARG $file_localconf post-config $file_conf1 conf1_t4 conf1_t4_opt1 conf1_t4_val1
+    localconf_set $SUDO_ARG $file_localconf post-extra $file_conf2 conf2_t1 conf2_t1_opt4 conf2_t1_val4
+    result=`cat $file_localconf`
+    result_expected=`cat $file_localconf_expected`
+    assert_equal "$result" "$result_expected"
+    rm -f $file_localconf $file_localconf_expected $file_conf1 $file_conf2
+}
+
+
+test_localconf_has_section
+test_localconf_has_option
+test_localconf_update_option
+test_localconf_add_option
+test_localconf_add_section_and_option
+test_localconf_set
diff --git a/tests/test_meta_config.sh b/tests/test_meta_config.sh
index 327fb56..92f9c01 100755
--- a/tests/test_meta_config.sh
+++ b/tests/test_meta_config.sh
@@ -125,6 +125,14 @@
 [[test10|does-not-exist-dir/test.conf]]
 foo=bar
 
+[[test11|test-same.conf]]
+[DEFAULT]
+foo=bar
+
+[[test11|test-same.conf]]
+[some]
+random=config
+
 [[test-multi-sections|test-multi-sections.conf]]
 [sec-1]
 cfg_item1 = abcd
@@ -147,6 +155,9 @@
 cfg_item2 = efgh
 cfg_item2 = \${FOO_BAR_BAZ}
 
+[[test11|test-same.conf]]
+[another]
+non = sense
 EOF
 
 echo -n "get_meta_section_files: test0 doesn't exist: "
@@ -385,8 +396,24 @@
 check_result "$VAL" "$EXPECT_VAL"
 set -e
 
+echo -n "merge_config_file test11 same section: "
+rm -f test-same.conf
+merge_config_group test.conf test11
+VAL=$(cat test-same.conf)
+EXPECT_VAL='
+[DEFAULT]
+foo = bar
+
+[some]
+random = config
+
+[another]
+non = sense'
+check_result "$VAL" "$EXPECT_VAL"
+
+
 rm -f test.conf test1c.conf test2a.conf \
     test-space.conf test-equals.conf test-strip.conf \
     test-colon.conf test-env.conf test-multiline.conf \
-    test-multi-sections.conf
+    test-multi-sections.conf test-same.conf
 rm -rf test-etc
diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh
index b6db5d1..30d1a01 100755
--- a/tools/create_userrc.sh
+++ b/tools/create_userrc.sh
@@ -193,7 +193,6 @@
 export OS_AUTH_URL="$OS_AUTH_URL"
 export OS_CACERT="$OS_CACERT"
 export NOVA_CERT="$ACCOUNT_DIR/cacert.pem"
-export OS_AUTH_TYPE=v2password
 EOF
     if [ -n "$ADDPASS" ]; then
         echo "export OS_PASSWORD=\"$user_passwd\"" >>"$rcfile"
diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh
index 193a1f7..4dec95e 100755
--- a/tools/fixup_stuff.sh
+++ b/tools/fixup_stuff.sh
@@ -162,7 +162,11 @@
 fi
 
 # The version of pip(1.5.4) supported by python-virtualenv(1.11.4) has
-# connection issues under proxy, hence uninstalling python-virtualenv package
-# and installing the latest version using pip.
-uninstall_package python-virtualenv
-pip_install -U virtualenv
+# connection issues under proxy so re-install the latest version using
+# pip. To avoid having pip's virtualenv overwritten by the distro's
+# package (e.g. due to installing a distro package with a dependency
+# on python-virtualenv), first install the distro python-virtualenv
+# to satisfy any dependencies then use pip to overwrite it.
+
+install_package python-virtualenv
+pip_install -U --force-reinstall virtualenv
diff --git a/tools/generate-devstack-plugins-list.py b/tools/generate-devstack-plugins-list.py
index bbad1bf..56f12e7 100644
--- a/tools/generate-devstack-plugins-list.py
+++ b/tools/generate-devstack-plugins-list.py
@@ -46,6 +46,9 @@
 
 # Check if this project has a plugin file
 def has_devstack_plugin(proj):
+    # Don't link in the deb packaging repos
+    if "openstack/deb-" in proj:
+        return False
     r = requests.get("https://git.openstack.org/cgit/%s/plain/devstack/plugin.sh" % proj)
     return r.status_code == 200
 
diff --git a/tools/info.sh b/tools/info.sh
index c056fa7..282667f 100755
--- a/tools/info.sh
+++ b/tools/info.sh
@@ -8,7 +8,7 @@
 # Output types are git,localrc,os,pip,pkg:
 #
 #   git|<project>|<branch>[<shaq>]
-#   localtc|<var>=<value>
+#   localrc|<var>=<value>
 #   os|<var>=<value>
 #   pip|<package>|<version>
 #   pkg|<package>|<version>
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index 1267699..a5ccb19 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -24,7 +24,20 @@
 
 FILES=$TOP_DIR/files
 
-PIP_GET_PIP_URL=https://bootstrap.pypa.io/get-pip.py
+# The URL from where the get-pip.py file gets downloaded. If a local
+# get-pip.py mirror is available, PIP_GET_PIP_URL can be set to that
+# mirror in local.conf to avoid download timeouts.
+# Example:
+#  PIP_GET_PIP_URL="http://local-server/get-pip.py"
+#
+# Note that if get-pip.py already exists in $FILES this script will
+# not re-download or check for a new version.  For example, this is
+# done by openstack-infra diskimage-builder elements as part of image
+# preparation [1].  This prevents any network access, which can be
+# unreliable in CI situations.
+# [1] http://git.openstack.org/cgit/openstack-infra/project-config/tree/nodepool/elements/cache-devstack/source-repository-pip
+
+PIP_GET_PIP_URL=${PIP_GET_PIP_URL:-"https://bootstrap.pypa.io/get-pip.py"}
 LOCAL_PIP="$FILES/$(basename $PIP_GET_PIP_URL)"
 
 GetDistro
diff --git a/tools/make_cert.sh b/tools/make_cert.sh
index 2628b40..e91464f 100755
--- a/tools/make_cert.sh
+++ b/tools/make_cert.sh
@@ -45,6 +45,7 @@
 
 # Make sure the CA is set up
 configure_CA
+fix_system_ca_bundle_path
 init_CA
 
 # Create the server cert
diff --git a/tools/ping_neutron.sh b/tools/ping_neutron.sh
index dba7502..73fe3f3 100755
--- a/tools/ping_neutron.sh
+++ b/tools/ping_neutron.sh
@@ -54,7 +54,7 @@
 REMAINING_ARGS="${@:2}"
 
 # BUG: with duplicate network names, this fails pretty hard.
-NET_ID=$(neutron net-list | grep "$NET_NAME" | awk '{print $2}')
+NET_ID=$(openstack network show -f value -c id "$NET_NAME")
 PROBE_ID=$(neutron-debug probe-list -c id -c network_id | grep "$NET_ID" | awk '{print $2}' | head -n 1)
 
 # This runs a command inside the specific netns
diff --git a/tools/xen/functions b/tools/xen/functions
index cf14568..e1864eb 100644
--- a/tools/xen/functions
+++ b/tools/xen/functions
@@ -305,3 +305,25 @@
 
     xe vm-list name-label="$vm_name_label" params=dom-id minimal=true
 }
+
+function install_conntrack_tools {
+    local xs_host
+    local xs_ver_major
+    local centos_ver
+    local conntrack_conf
+    xs_host=$(xe host-list --minimal)
+    xs_ver_major=$(xe host-param-get uuid=$xs_host param-name=software-version param-key=product_version_text_short | cut -d'.' -f 1)
+    if [ $xs_ver_major -gt 6 ]; then
+        # Only support conntrack-tools in Dom0 with XS7.0 and above
+        if [ ! -f /usr/sbin/conntrackd ]; then
+            sed -i s/#baseurl=/baseurl=/g /etc/yum.repos.d/CentOS-Base.repo
+            centos_ver=$(yum version nogroups |grep Installed | cut -d' ' -f 2 | cut -d'.' -f1-2 | tr '-' '.')
+            yum install -y --enablerepo=base --releasever=$centos_ver conntrack-tools
+            # Backup conntrackd.conf after install conntrack-tools, use the one with statistic mode
+            mv /etc/conntrackd/conntrackd.conf /etc/conntrackd/conntrackd.conf.back
+            conntrack_conf=$(find /usr/share/doc -name conntrackd.conf |grep stats)
+            cp $conntrack_conf /etc/conntrackd/conntrackd.conf
+        fi
+        service conntrackd restart
+    fi
+}
diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh
index 3a61215..66b9eda 100755
--- a/tools/xen/install_os_domU.sh
+++ b/tools/xen/install_os_domU.sh
@@ -247,7 +247,7 @@
 fi
 
 if [ -n "${EXIT_AFTER_JEOS_INSTALLATION:-}" ]; then
-    echo "User requested to quit after JEOS instalation"
+    echo "User requested to quit after JEOS installation"
     exit 0
 fi
 
diff --git a/unstack.sh b/unstack.sh
index a69b218..c05d1f0 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -63,6 +63,7 @@
 source $TOP_DIR/lib/keystone
 source $TOP_DIR/lib/glance
 source $TOP_DIR/lib/nova
+source $TOP_DIR/lib/placement
 source $TOP_DIR/lib/cinder
 source $TOP_DIR/lib/swift
 source $TOP_DIR/lib/heat
@@ -96,11 +97,6 @@
 # Phase: unstack
 run_phase unstack
 
-if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then
-    source $TOP_DIR/openrc
-    teardown_neutron_debug
-fi
-
 # Call service stop
 
 if is_service_enabled heat; then
@@ -111,6 +107,10 @@
     stop_nova
 fi
 
+if is_service_enabled placement; then
+    stop_placement
+fi
+
 if is_service_enabled glance; then
     stop_glance
 fi
@@ -168,7 +168,6 @@
 
 if is_service_enabled neutron; then
     stop_neutron
-    stop_neutron_third_party
     cleanup_neutron
 fi
 
@@ -185,11 +184,13 @@
     fi
 fi
 
-# BUG: maybe it doesn't exist? We should isolate this further down.
 # NOTE: Cinder automatically installs the lvm2 package, independently of the
-# enabled backends. So if Cinder is enabled, we are sure lvm (lvremove,
-# /etc/lvm/lvm.conf, etc.) is here.
-if is_service_enabled cinder; then
+# enabled backends. So if Cinder is enabled, and installed successfully we are
+# sure lvm2 (lvremove, /etc/lvm/lvm.conf, etc.) is here.
+if is_service_enabled cinder && is_package_installed lvm2; then
+    # Using /bin/true here indicates a BUG - maybe the
+    # DEFAULT_VOLUME_GROUP_NAME doesn't exist?  We should
+    # isolate this further down in lib/cinder cleanup.
     clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME || /bin/true
     clean_lvm_filter
 fi