Merge "worlddump: Add cinder-volume guru meditation report"
diff --git a/.gitignore b/.gitignore
index a470ff5..d1781bc 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,6 +15,8 @@
 files/*.vmdk
 files/*.rpm
 files/*.rpm.*
+files/*.deb
+files/*.deb.*
 files/*.qcow2
 files/*.img
 files/images
diff --git a/HACKING.rst b/HACKING.rst
index d763c75..b76cb6c 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -219,7 +219,7 @@
     set -o errexit
 
     # Print the commands being run so that we can see the command that triggers
-    # an error.  It is also useful for following allowing as the install occurs.
+    # an error.  It is also useful for following as the install occurs.
     set -o xtrace
 
 * Settings and configuration are stored in ``exerciserc``, which must be
diff --git a/README.md b/README.md
index 4ba4619..ff5598b 100644
--- a/README.md
+++ b/README.md
@@ -25,9 +25,9 @@
 The DevStack master branch generally points to trunk versions of OpenStack
 components.  For older, stable versions, look for branches named
 stable/[release] in the DevStack repo.  For example, you can do the
-following to create a juno OpenStack cloud:
+following to create a Newton OpenStack cloud:
 
-    git checkout stable/juno
+    git checkout stable/newton
     ./stack.sh
 
 You can also pick specific OpenStack project releases by setting the appropriate
diff --git a/clean.sh b/clean.sh
index 452df02..d92807c 100755
--- a/clean.sh
+++ b/clean.sh
@@ -46,6 +46,7 @@
 source $TOP_DIR/lib/keystone
 source $TOP_DIR/lib/glance
 source $TOP_DIR/lib/nova
+source $TOP_DIR/lib/placement
 source $TOP_DIR/lib/cinder
 source $TOP_DIR/lib/swift
 source $TOP_DIR/lib/heat
@@ -146,3 +147,8 @@
 done
 
 rm -rf ~/.config/openstack
+
+# Clean up all *.pyc files
+if [[ -n "$DEST" ]] && [[ -d "$DEST" ]]; then
+    sudo find $DEST -name "*.pyc" -print0 | xargs -0 rm
+fi
diff --git a/data/devstack-plugins-registry.header b/data/devstack-plugins-registry.header
index 6119ab5..576dbbd 100644
--- a/data/devstack-plugins-registry.header
+++ b/data/devstack-plugins-registry.header
@@ -1,18 +1,16 @@
-..
+.. Note to patch submitters:
 
-  Note to patch submitters:
+   # ============================= #
+   # THIS FILE IS AUTOGENERATED !  #
+   # ============================= #
 
-  # ============================= #
-  # THIS FILE IS AUTOGENERATED !  #
-  # ============================= #
+   ** Plugins are found automatically and added to this list **
 
-  ** Plugins are found automatically and added to this list **
+   This file is created by a periodic proposal job.  You should not
+   edit this file.
 
-  This file is created by a periodic proposal job.  You should not
-  edit this file.
-
-  You should edit the files data/devstack-plugins-registry.footer
-  data/devstack-plugins-registry.header to modify this text.
+   You should edit the files data/devstack-plugins-registry.footer
+   data/devstack-plugins-registry.header to modify this text.
 
 ==========================
  DevStack Plugin Registry
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 1161b34..53ae82f 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -63,7 +63,7 @@
 ::
 
     [[local|localrc]]
-    FIXED_RANGE=10.254.1.0/24
+    IPV4_ADDRS_SAFE_TO_USE=10.254.1.0/24
     ADMIN_PASSWORD=speciale
     LOGFILE=$DEST/logs/stack.sh.log
 
@@ -161,8 +161,8 @@
 
 -  no logging
 -  pre-set the passwords to prevent interactive prompts
--  move network ranges away from the local network (``FIXED_RANGE`` and
-   ``FLOATING_RANGE``, commented out below)
+-  move network ranges away from the local network (``IPV4_ADDRS_SAFE_TO_USE``
+   and ``FLOATING_RANGE``, commented out below)
 -  set the host IP if detection is unreliable (``HOST_IP``, commented
    out below)
 
@@ -173,7 +173,7 @@
     DATABASE_PASSWORD=$ADMIN_PASSWORD
     RABBIT_PASSWORD=$ADMIN_PASSWORD
     SERVICE_PASSWORD=$ADMIN_PASSWORD
-    #FIXED_RANGE=172.31.1.0/24
+    #IPV4_ADDRS_SAFE_TO_USE=172.31.1.0/24
     #FLOATING_RANGE=192.168.20.0/25
     #HOST_IP=10.3.4.5
 
@@ -521,16 +521,14 @@
 IP Version
 ----------
 
-``IP_VERSION`` can be used to configure DevStack to create either an
-IPv4, IPv6, or dual-stack self service project data-network by with
+``IP_VERSION`` can be used to configure Neutron to create either an
+IPv4, IPv6, or dual-stack self-service project data-network by with
 either ``IP_VERSION=4``, ``IP_VERSION=6``, or ``IP_VERSION=4+6``
-respectively.  This functionality requires that the Neutron networking
-service is enabled by setting the following options:
+respectively.
 
     ::
 
-        disable_service n-net
-        enable_service q-svc q-agt q-dhcp q-l3
+        IP_VERSION=4+6
 
 The following optional variables can be used to alter the default IPv6
 behavior:
@@ -539,12 +537,12 @@
 
         IPV6_RA_MODE=slaac
         IPV6_ADDRESS_MODE=slaac
-        FIXED_RANGE_V6=fd$IPV6_GLOBAL_ID::/64
+        IPV6_ADDRS_SAFE_TO_USE=fd$IPV6_GLOBAL_ID::/56
         IPV6_PRIVATE_NETWORK_GATEWAY=fd$IPV6_GLOBAL_ID::1
 
-*Note*: ``FIXED_RANGE_V6`` and ``IPV6_PRIVATE_NETWORK_GATEWAY`` can be
-configured with any valid IPv6 prefix. The default values make use of
-an auto-generated ``IPV6_GLOBAL_ID`` to comply with RFC4193.
+*Note*: ``IPV6_ADDRS_SAFE_TO_USE`` and ``IPV6_PRIVATE_NETWORK_GATEWAY``
+can be configured with any valid IPv6 prefix. The default values make
+use of an auto-generated ``IPV6_GLOBAL_ID`` to comply with RFC4193.
 
 Service Version
 ~~~~~~~~~~~~~~~
@@ -705,13 +703,13 @@
 ~~~~~~
 
 The logical volume group used to hold the Cinder-managed volumes is
-set by ``VOLUME_GROUP``, the logical volume name prefix is set with
+set by ``VOLUME_GROUP_NAME``, the logical volume name prefix is set with
 ``VOLUME_NAME_PREFIX`` and the size of the volume backing file is set
 with ``VOLUME_BACKING_FILE_SIZE``.
 
     ::
 
-        VOLUME_GROUP="stack-volumes"
+        VOLUME_GROUP_NAME="stack-volumes"
         VOLUME_NAME_PREFIX="volume-"
         VOLUME_BACKING_FILE_SIZE=10250M
 
diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst
index 0c439ad..21bea99 100644
--- a/doc/source/guides/devstack-with-lbaas-v2.rst
+++ b/doc/source/guides/devstack-with-lbaas-v2.rst
@@ -66,21 +66,21 @@
     ./stack.sh
     . ./openrc
 
-    neutron net-list  # should show public and private networks
+    openstack network list  # should show public and private networks
 
 Create two nova instances that we can use as test http servers:
 
   ::
 
     #create nova instances on private network
-    nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(neutron net-list | awk '/ private / {print $2}') node1
-    nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(neutron net-list | awk '/ private / {print $2}') node2
+    nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1
+    nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2
     nova list # should show the nova instances just created
 
     #add secgroup rules to allow ssh etc..
-    neutron security-group-rule-create default --protocol icmp
-    neutron security-group-rule-create default --protocol tcp --port-range-min 22 --port-range-max 22
-    neutron security-group-rule-create default --protocol tcp --port-range-min 80 --port-range-max 80
+    openstack security group rule create default --protocol icmp
+    openstack security group rule create default --protocol tcp --dst-port 22:22
+    openstack security group rule create default --protocol tcp --dst-port 80:80
 
 Set up a simple web server on each of these instances. ssh into each instance (username 'cirros', password 'cubswin:)') and run
 
diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst
index c996f95..dfc9936 100644
--- a/doc/source/guides/multinode-lab.rst
+++ b/doc/source/guides/multinode-lab.rst
@@ -260,7 +260,7 @@
     openstack user create $NAME --password=$PASSWORD --project $PROJECT
     openstack role add Member --user $NAME --project $PROJECT
     # The Member role is created by stack.sh
-    # openstack role list
+    # openstack role assignment list
 
 Swift
 -----
@@ -294,10 +294,10 @@
 
 ``stack-volumes`` can be pre-created on any physical volume supported by
 Linux's LVM. The name of the volume group can be changed by setting
-``VOLUME_GROUP`` in ``localrc``. ``stack.sh`` deletes all logical
-volumes in ``VOLUME_GROUP`` that begin with ``VOLUME_NAME_PREFIX`` as
+``VOLUME_GROUP_NAME`` in ``localrc``. ``stack.sh`` deletes all logical
+volumes in ``VOLUME_GROUP_NAME`` that begin with ``VOLUME_NAME_PREFIX`` as
 part of cleaning up from previous runs. It is recommended to not use the
-root volume group as ``VOLUME_GROUP``.
+root volume group as ``VOLUME_GROUP_NAME``.
 
 The details of creating the volume group depends on the server hardware
 involved but looks something like this:
@@ -400,6 +400,10 @@
 
         ssh-keyscan -H DEST_HOSTNAME | sudo tee -a /root/.ssh/known_hosts
 
+3. Verify that login via ssh works without a password::
+
+        ssh -i /root/.ssh/id_rsa.pub stack@DESTINATION
+
 In essence, this means that every compute node's root user's public RSA key
 must exist in every other compute node's stack user's authorized_keys file and
 every compute node's public ECDSA key needs to be in every other compute
diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst
index c5b1634..092809a 100644
--- a/doc/source/guides/neutron.rst
+++ b/doc/source/guides/neutron.rst
@@ -76,16 +76,10 @@
         RABBIT_PASSWORD=secret
         SERVICE_PASSWORD=secret
 
-        # Do not use Nova-Network
-        disable_service n-net
-        # Enable Neutron
-        ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt,q-l3
-
-
         ## Neutron options
         Q_USE_SECGROUP=True
         FLOATING_RANGE="172.18.161.0/24"
-        FIXED_RANGE="10.0.0.0/24"
+        IPV4_ADDRS_SAFE_TO_USE="10.0.0.0/22"
         Q_FLOATING_ALLOCATION_POOL=start=172.18.161.250,end=172.18.161.254
         PUBLIC_NETWORK_GATEWAY="172.18.161.1"
         PUBLIC_INTERFACE=eth0
@@ -389,24 +383,21 @@
 
         Q_USE_PROVIDER_NETWORKING=True
 
-        # Do not use Nova-Network
-        disable_service n-net
-
-        # Neutron
-        ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt
+        disable_service q-l3
 
         ## Neutron Networking options used to create Neutron Subnets
 
-        FIXED_RANGE="203.0.113.0/24"
+        IPV4_ADDRS_SAFE_TO_USE="203.0.113.0/24"
         NETWORK_GATEWAY=203.0.113.1
         PROVIDER_SUBNET_NAME="provider_net"
         PROVIDER_NETWORK_TYPE="vlan"
         SEGMENTATION_ID=2010
+        USE_SUBNETPOOL=False
 
-In this configuration we are defining FIXED_RANGE to be a
+In this configuration we are defining IPV4_ADDRS_SAFE_TO_USE to be a
 publicly routed IPv4 subnet. In this specific instance we are using
 the special TEST-NET-3 subnet defined in `RFC 5737 <http://tools.ietf.org/html/rfc5737>`_,
-which is used for documentation.  In your DevStack setup, FIXED_RANGE
+which is used for documentation.  In your DevStack setup, IPV4_ADDRS_SAFE_TO_USE
 would be a public IP address range that you or your organization has
 allocated to you, so that you could access your instances from the
 public internet.
@@ -530,16 +521,10 @@
     RABBIT_PASSWORD=secret
     SERVICE_PASSWORD=secret
 
-    # Do not use Nova-Network
-    disable_service n-net
-    # Enable Neutron
-    ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt,q-l3
-
-
     ## Neutron options
     Q_USE_SECGROUP=True
     FLOATING_RANGE="172.18.161.0/24"
-    FIXED_RANGE="10.0.0.0/24"
+    IPV4_ADDRS_SAFE_TO_USE="10.0.0.0/24"
     Q_FLOATING_ALLOCATION_POOL=start=172.18.161.250,end=172.18.161.254
     PUBLIC_NETWORK_GATEWAY="172.18.161.1"
     PUBLIC_INTERFACE=eth0
@@ -582,20 +567,18 @@
     Q_ML2_PLUGIN_MECHANISM_DRIVERS=macvtap
     Q_USE_PROVIDER_NETWORKING=True
 
-    #Enable Neutron services
-    disable_service n-net
     enable_plugin neutron git://git.openstack.org/openstack/neutron
-    ENABLED_SERVICES+=,q-agt,q-svc
 
     ## MacVTap agent options
     Q_AGENT=macvtap
     PHYSICAL_NETWORK=default
 
-    FIXED_RANGE="203.0.113.0/24"
+    IPV4_ADDRS_SAFE_TO_USE="203.0.113.0/24"
     NETWORK_GATEWAY=203.0.113.1
     PROVIDER_SUBNET_NAME="provider_net"
     PROVIDER_NETWORK_TYPE="vlan"
     SEGMENTATION_ID=2010
+    USE_SUBNETPOOL=False
 
     [[post-config|/$Q_PLUGIN_CONF_FILE]]
     [macvtap]
@@ -614,7 +597,7 @@
 
 For OVS, a similar configuration like described in the
 :ref:`OVS Provider Network <ovs-provider-network-controller>` section can be
-used. Just add the the following line to this local.conf, which also loads
+used. Just add the following line to this local.conf, which also loads
 the MacVTap mechanism driver:
 
 ::
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 435011b..b8dd506 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -45,31 +45,6 @@
 If you do not have a preference, Ubuntu 16.04 is the most tested, and
 will probably go the smoothest.
 
-Download DevStack
------------------
-
-::
-
-   git clone https://git.openstack.org/openstack-dev/devstack
-
-The ``devstack`` repo contains a script that installs OpenStack and
-templates for configuration files
-
-Create a local.conf
--------------------
-
-Create a ``local.conf`` file with 4 passwords preset
-
-::
-
-   [[local|localrc]]
-   ADMIN_PASSWORD=secret
-   DATABASE_PASSWORD=$ADMIN_PASSWORD
-   RABBIT_PASSWORD=$ADMIN_PASSWORD
-   SERVICE_PASSWORD=$ADMIN_PASSWORD
-
-This is the minimum required config to get started with DevStack.
-
 Add Stack User
 --------------
 
@@ -81,14 +56,48 @@
 
 ::
 
-   devstack/tools/create-stack-user.sh; su stack
+   $ adduser stack
+
+Since this user will be making many changes to your system, it should
+have sudo privileges:
+
+::
+
+    $ echo "stack ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
+    $ su stack
+
+Download DevStack
+-----------------
+
+::
+
+   $ git clone https://git.openstack.org/openstack-dev/devstack
+   $ cd devstack
+
+The ``devstack`` repo contains a script that installs OpenStack and
+templates for configuration files
+
+Create a local.conf
+-------------------
+
+Create a ``local.conf`` file with 4 passwords preset at the root of the
+devstack git repo.
+::
+
+   [[local|localrc]]
+   ADMIN_PASSWORD=secret
+   DATABASE_PASSWORD=$ADMIN_PASSWORD
+   RABBIT_PASSWORD=$ADMIN_PASSWORD
+   SERVICE_PASSWORD=$ADMIN_PASSWORD
+
+This is the minimum required config to get started with DevStack.
 
 Start the install
 -----------------
 
 ::
 
-   cd devstack; ./stack.sh
+   ./stack.sh
 
 This will take a 15 - 20 minutes, largely depending on the speed of
 your internet connection. Many git trees and packages will be
diff --git a/doc/source/networking.rst b/doc/source/networking.rst
index 1d56c33..bdbeaaa 100644
--- a/doc/source/networking.rst
+++ b/doc/source/networking.rst
@@ -4,7 +4,7 @@
 
 An important part of the DevStack experience is networking that works
 by default for created guests. This might not be optimal for your
-particular testing environment, so this document tries it's best to
+particular testing environment, so this document tries its best to
 explain what's going on.
 
 Defaults
@@ -15,10 +15,11 @@
 * neutron (including l3 with openvswitch)
 * private project networks for each openstack project
 * a floating ip range of 172.24.4.0/24 with the gateway of 172.24.4.1
-* the demo project configured with fixed ips on 10.0.0.0/24
-* a ``br-ex`` interface controlled by neutron for all it's networking
+* the demo project configured with fixed ips on a subnet allocated from
+  the 10.0.0.0/22 range
+* a ``br-ex`` interface controlled by neutron for all its networking
   (this is not connected to any physical interfaces).
-* DNS resolution for guests based on the resolv.conf for you host
+* DNS resolution for guests based on the resolv.conf for your host
 * an ip masq rule that allows created guests to route out
 
 This creates an environment which is isolated to the single
@@ -39,7 +40,7 @@
 Locally Accessible Guests
 =========================
 
-If you want to make you guests accessible other machines on your
+If you want to make you guests accessible from other machines on your
 network, we have to connect ``br-ex`` to a physical interface.
 
 Dedicated Guest Interface
@@ -95,3 +96,21 @@
 your existing network, you'll want to give it a slice that your local
 dhcp server is not allocating. Otherwise you could easily have
 conflicting ip addresses, and cause havoc with your local network.
+
+
+Private Network Addressing
+==========================
+
+The private networks addresses are controlled by the ``IPV4_ADDRS_SAFE_TO_USE``
+and the ``IPV6_ADDRS_SAFE_TO_USE`` variables. This allows users to specify one
+single variable of safe internal IPs to use that will be referenced whether or
+not subnetpools are in use.
+
+For IPv4, ``FIXED_RANGE`` and ``SUBNETPOOL_PREFIX_V4`` will just default to
+the value of ``IPV4_ADDRS_SAFE_TO_USE`` directly.
+
+For IPv6, ``FIXED_RANGE_V6`` will default to the first /64 of the value of
+``IPV6_ADDRS_SAFE_TO_USE``. If ``IPV6_ADDRS_SAFE_TO_USE`` is /64 or smaller,
+``FIXED_RANGE_V6`` will just use the value of that directly.
+``SUBNETPOOL_PREFIX_V6`` will just default to the value of
+``IPV6_ADDRS_SAFE_TO_USE`` directly.
diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst
index bb85270..6ece997 100644
--- a/doc/source/plugin-registry.rst
+++ b/doc/source/plugin-registry.rst
@@ -1,18 +1,16 @@
-..
+.. Note to patch submitters:
 
-  Note to patch submitters:
+   # ============================= #
+   # THIS FILE IS AUTOGENERATED !  #
+   # ============================= #
 
-  # ============================= #
-  # THIS FILE IS AUTOGENERATED !  #
-  # ============================= #
+   ** Plugins are found automatically and added to this list **
 
-  ** Plugins are found automatically and added to this list **
+   This file is created by a periodic proposal job.  You should not
+   edit this file.
 
-  This file is created by a periodic proposal job.  You should not
-  edit this file.
-
-  You should edit the files data/devstack-plugins-registry.footer
-  data/devstack-plugins-registry.header to modify this text.
+   You should edit the files data/devstack-plugins-registry.footer
+   data/devstack-plugins-registry.header to modify this text.
 
 ==========================
  DevStack Plugin Registry
@@ -26,6 +24,7 @@
 ====================================== ===
 Plugin Name                            URL
 ====================================== ===
+almanach                               `git://git.openstack.org/openstack/almanach <https://git.openstack.org/cgit/openstack/almanach>`__
 aodh                                   `git://git.openstack.org/openstack/aodh <https://git.openstack.org/cgit/openstack/aodh>`__
 app-catalog-ui                         `git://git.openstack.org/openstack/app-catalog-ui <https://git.openstack.org/cgit/openstack/app-catalog-ui>`__
 astara                                 `git://git.openstack.org/openstack/astara <https://git.openstack.org/cgit/openstack/astara>`__
@@ -60,21 +59,26 @@
 freezer-api                            `git://git.openstack.org/openstack/freezer-api <https://git.openstack.org/cgit/openstack/freezer-api>`__
 freezer-web-ui                         `git://git.openstack.org/openstack/freezer-web-ui <https://git.openstack.org/cgit/openstack/freezer-web-ui>`__
 gce-api                                `git://git.openstack.org/openstack/gce-api <https://git.openstack.org/cgit/openstack/gce-api>`__
+glare                                  `git://git.openstack.org/openstack/glare <https://git.openstack.org/cgit/openstack/glare>`__
 gnocchi                                `git://git.openstack.org/openstack/gnocchi <https://git.openstack.org/cgit/openstack/gnocchi>`__
 group-based-policy                     `git://git.openstack.org/openstack/group-based-policy <https://git.openstack.org/cgit/openstack/group-based-policy>`__
 heat                                   `git://git.openstack.org/openstack/heat <https://git.openstack.org/cgit/openstack/heat>`__
-higgins                                `git://git.openstack.org/openstack/higgins <https://git.openstack.org/cgit/openstack/higgins>`__
 horizon-mellanox                       `git://git.openstack.org/openstack/horizon-mellanox <https://git.openstack.org/cgit/openstack/horizon-mellanox>`__
 ironic                                 `git://git.openstack.org/openstack/ironic <https://git.openstack.org/cgit/openstack/ironic>`__
 ironic-inspector                       `git://git.openstack.org/openstack/ironic-inspector <https://git.openstack.org/cgit/openstack/ironic-inspector>`__
 ironic-staging-drivers                 `git://git.openstack.org/openstack/ironic-staging-drivers <https://git.openstack.org/cgit/openstack/ironic-staging-drivers>`__
+karbor                                 `git://git.openstack.org/openstack/karbor <https://git.openstack.org/cgit/openstack/karbor>`__
+karbor-dashboard                       `git://git.openstack.org/openstack/karbor-dashboard <https://git.openstack.org/cgit/openstack/karbor-dashboard>`__
+keystone                               `git://git.openstack.org/openstack/keystone <https://git.openstack.org/cgit/openstack/keystone>`__
 kingbird                               `git://git.openstack.org/openstack/kingbird <https://git.openstack.org/cgit/openstack/kingbird>`__
+kuryr-kubernetes                       `git://git.openstack.org/openstack/kuryr-kubernetes <https://git.openstack.org/cgit/openstack/kuryr-kubernetes>`__
 kuryr-libnetwork                       `git://git.openstack.org/openstack/kuryr-libnetwork <https://git.openstack.org/cgit/openstack/kuryr-libnetwork>`__
 magnum                                 `git://git.openstack.org/openstack/magnum <https://git.openstack.org/cgit/openstack/magnum>`__
 magnum-ui                              `git://git.openstack.org/openstack/magnum-ui <https://git.openstack.org/cgit/openstack/magnum-ui>`__
 manila                                 `git://git.openstack.org/openstack/manila <https://git.openstack.org/cgit/openstack/manila>`__
 masakari                               `git://git.openstack.org/openstack/masakari <https://git.openstack.org/cgit/openstack/masakari>`__
 mistral                                `git://git.openstack.org/openstack/mistral <https://git.openstack.org/cgit/openstack/mistral>`__
+mixmatch                               `git://git.openstack.org/openstack/mixmatch <https://git.openstack.org/cgit/openstack/mixmatch>`__
 monasca-analytics                      `git://git.openstack.org/openstack/monasca-analytics <https://git.openstack.org/cgit/openstack/monasca-analytics>`__
 monasca-api                            `git://git.openstack.org/openstack/monasca-api <https://git.openstack.org/cgit/openstack/monasca-api>`__
 monasca-ceilometer                     `git://git.openstack.org/openstack/monasca-ceilometer <https://git.openstack.org/cgit/openstack/monasca-ceilometer>`__
@@ -82,6 +86,7 @@
 monasca-transform                      `git://git.openstack.org/openstack/monasca-transform <https://git.openstack.org/cgit/openstack/monasca-transform>`__
 murano                                 `git://git.openstack.org/openstack/murano <https://git.openstack.org/cgit/openstack/murano>`__
 networking-6wind                       `git://git.openstack.org/openstack/networking-6wind <https://git.openstack.org/cgit/openstack/networking-6wind>`__
+networking-arista                      `git://git.openstack.org/openstack/networking-arista <https://git.openstack.org/cgit/openstack/networking-arista>`__
 networking-bagpipe                     `git://git.openstack.org/openstack/networking-bagpipe <https://git.openstack.org/cgit/openstack/networking-bagpipe>`__
 networking-bgpvpn                      `git://git.openstack.org/openstack/networking-bgpvpn <https://git.openstack.org/cgit/openstack/networking-bgpvpn>`__
 networking-brocade                     `git://git.openstack.org/openstack/networking-brocade <https://git.openstack.org/cgit/openstack/networking-brocade>`__
@@ -103,6 +108,7 @@
 networking-plumgrid                    `git://git.openstack.org/openstack/networking-plumgrid <https://git.openstack.org/cgit/openstack/networking-plumgrid>`__
 networking-powervm                     `git://git.openstack.org/openstack/networking-powervm <https://git.openstack.org/cgit/openstack/networking-powervm>`__
 networking-sfc                         `git://git.openstack.org/openstack/networking-sfc <https://git.openstack.org/cgit/openstack/networking-sfc>`__
+networking-vpp                         `git://git.openstack.org/openstack/networking-vpp <https://git.openstack.org/cgit/openstack/networking-vpp>`__
 networking-vsphere                     `git://git.openstack.org/openstack/networking-vsphere <https://git.openstack.org/cgit/openstack/networking-vsphere>`__
 neutron                                `git://git.openstack.org/openstack/neutron <https://git.openstack.org/cgit/openstack/neutron>`__
 neutron-dynamic-routing                `git://git.openstack.org/openstack/neutron-dynamic-routing <https://git.openstack.org/cgit/openstack/neutron-dynamic-routing>`__
@@ -110,13 +116,15 @@
 neutron-lbaas                          `git://git.openstack.org/openstack/neutron-lbaas <https://git.openstack.org/cgit/openstack/neutron-lbaas>`__
 neutron-lbaas-dashboard                `git://git.openstack.org/openstack/neutron-lbaas-dashboard <https://git.openstack.org/cgit/openstack/neutron-lbaas-dashboard>`__
 neutron-vpnaas                         `git://git.openstack.org/openstack/neutron-vpnaas <https://git.openstack.org/cgit/openstack/neutron-vpnaas>`__
+nimble                                 `git://git.openstack.org/openstack/nimble <https://git.openstack.org/cgit/openstack/nimble>`__
 nova-docker                            `git://git.openstack.org/openstack/nova-docker <https://git.openstack.org/cgit/openstack/nova-docker>`__
 nova-lxd                               `git://git.openstack.org/openstack/nova-lxd <https://git.openstack.org/cgit/openstack/nova-lxd>`__
+nova-mksproxy                          `git://git.openstack.org/openstack/nova-mksproxy <https://git.openstack.org/cgit/openstack/nova-mksproxy>`__
 nova-powervm                           `git://git.openstack.org/openstack/nova-powervm <https://git.openstack.org/cgit/openstack/nova-powervm>`__
+oaktree                                `git://git.openstack.org/openstack/oaktree <https://git.openstack.org/cgit/openstack/oaktree>`__
 octavia                                `git://git.openstack.org/openstack/octavia <https://git.openstack.org/cgit/openstack/octavia>`__
 osprofiler                             `git://git.openstack.org/openstack/osprofiler <https://git.openstack.org/cgit/openstack/osprofiler>`__
 panko                                  `git://git.openstack.org/openstack/panko <https://git.openstack.org/cgit/openstack/panko>`__
-python-freezerclient                   `git://git.openstack.org/openstack/python-freezerclient <https://git.openstack.org/cgit/openstack/python-freezerclient>`__
 rally                                  `git://git.openstack.org/openstack/rally <https://git.openstack.org/cgit/openstack/rally>`__
 sahara                                 `git://git.openstack.org/openstack/sahara <https://git.openstack.org/cgit/openstack/sahara>`__
 sahara-dashboard                       `git://git.openstack.org/openstack/sahara-dashboard <https://git.openstack.org/cgit/openstack/sahara-dashboard>`__
@@ -124,12 +132,11 @@
 searchlight                            `git://git.openstack.org/openstack/searchlight <https://git.openstack.org/cgit/openstack/searchlight>`__
 searchlight-ui                         `git://git.openstack.org/openstack/searchlight-ui <https://git.openstack.org/cgit/openstack/searchlight-ui>`__
 senlin                                 `git://git.openstack.org/openstack/senlin <https://git.openstack.org/cgit/openstack/senlin>`__
-smaug                                  `git://git.openstack.org/openstack/smaug <https://git.openstack.org/cgit/openstack/smaug>`__
-smaug-dashboard                        `git://git.openstack.org/openstack/smaug-dashboard <https://git.openstack.org/cgit/openstack/smaug-dashboard>`__
 solum                                  `git://git.openstack.org/openstack/solum <https://git.openstack.org/cgit/openstack/solum>`__
 tacker                                 `git://git.openstack.org/openstack/tacker <https://git.openstack.org/cgit/openstack/tacker>`__
 tap-as-a-service                       `git://git.openstack.org/openstack/tap-as-a-service <https://git.openstack.org/cgit/openstack/tap-as-a-service>`__
 tricircle                              `git://git.openstack.org/openstack/tricircle <https://git.openstack.org/cgit/openstack/tricircle>`__
+trio2o                                 `git://git.openstack.org/openstack/trio2o <https://git.openstack.org/cgit/openstack/trio2o>`__
 trove                                  `git://git.openstack.org/openstack/trove <https://git.openstack.org/cgit/openstack/trove>`__
 trove-dashboard                        `git://git.openstack.org/openstack/trove-dashboard <https://git.openstack.org/cgit/openstack/trove-dashboard>`__
 vitrage                                `git://git.openstack.org/openstack/vitrage <https://git.openstack.org/cgit/openstack/vitrage>`__
@@ -139,6 +146,8 @@
 watcher-dashboard                      `git://git.openstack.org/openstack/watcher-dashboard <https://git.openstack.org/cgit/openstack/watcher-dashboard>`__
 zaqar                                  `git://git.openstack.org/openstack/zaqar <https://git.openstack.org/cgit/openstack/zaqar>`__
 zaqar-ui                               `git://git.openstack.org/openstack/zaqar-ui <https://git.openstack.org/cgit/openstack/zaqar-ui>`__
+zun                                    `git://git.openstack.org/openstack/zun <https://git.openstack.org/cgit/openstack/zun>`__
+zun-ui                                 `git://git.openstack.org/openstack/zun-ui <https://git.openstack.org/cgit/openstack/zun-ui>`__
 ====================================== ===
 
 
diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst
index 70469d6..31987bc 100644
--- a/doc/source/plugins.rst
+++ b/doc/source/plugins.rst
@@ -99,7 +99,7 @@
       should exist at this point.
    -  **extra** - Called near the end after layer 1 and 2 services have
       been started.
-   - **test-config** Called at the end of devstack used to configure tempest
+   - **test-config** - Called at the end of devstack used to configure tempest
       or any other test environments
 
 -  **unstack** - Called by ``unstack.sh`` before other services are shut
diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh
index 808ef76..8cbca54 100755
--- a/exercises/aggregates.sh
+++ b/exercises/aggregates.sh
@@ -20,7 +20,7 @@
 set -o errexit
 
 # Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following allowing as the install occurs.
+# an error.  It is also useful for following as the install occurs.
 set -o xtrace
 
 
diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh
index 84ac08f..7478bdf 100755
--- a/exercises/boot_from_volume.sh
+++ b/exercises/boot_from_volume.sh
@@ -16,7 +16,7 @@
 set -o errexit
 
 # Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following allowing as the install occurs.
+# an error.  It is also useful for following as the install occurs.
 set -o xtrace
 
 
diff --git a/exercises/client-args.sh b/exercises/client-args.sh
index 2c8fe81..b380968 100755
--- a/exercises/client-args.sh
+++ b/exercises/client-args.sh
@@ -13,7 +13,7 @@
 set -o errexit
 
 # Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following allowing as the install occurs.
+# an error.  It is also useful for following as the install occurs.
 set -o xtrace
 
 
diff --git a/exercises/client-env.sh b/exercises/client-env.sh
index 6ab4d08..fff04df 100755
--- a/exercises/client-env.sh
+++ b/exercises/client-env.sh
@@ -13,7 +13,7 @@
 set -o errexit
 
 # Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following allowing as the install occurs.
+# an error.  It is also useful for following as the install occurs.
 set -o xtrace
 
 
diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh
index 485208b..5abc713 100755
--- a/exercises/floating_ips.sh
+++ b/exercises/floating_ips.sh
@@ -13,7 +13,7 @@
 set -o errexit
 
 # Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following allowing as the install occurs.
+# an error.  It is also useful for following as the install occurs.
 set -o xtrace
 
 
diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh
index 8115006..e8c8f62 100755
--- a/exercises/neutron-adv-test.sh
+++ b/exercises/neutron-adv-test.sh
@@ -20,7 +20,7 @@
 set -o errtrace
 
 # Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following allowing as the install occurs.
+# an error.  It is also useful for following as the install occurs.
 set -o xtrace
 
 # Environment
@@ -148,7 +148,7 @@
 function get_role_id {
     local ROLE_NAME=$1
     local ROLE_ID
-    ROLE_ID=`openstack role list | grep $ROLE_NAME | awk '{print $2}'`
+    ROLE_ID=`openstack role assignment list | grep $ROLE_NAME | awk '{print $2}'`
     die_if_not_set $LINENO ROLE_ID "Failure retrieving ROLE_ID for $ROLE_NAME"
     echo "$ROLE_ID"
 }
@@ -156,7 +156,7 @@
 function get_network_id {
     local NETWORK_NAME="$1"
     local NETWORK_ID
-    NETWORK_ID=`neutron net-list -F id  -- --name=$NETWORK_NAME | awk "NR==4" | awk '{print $2}'`
+    NETWORK_ID=`openstack network show -f value -c id $NETWORK_NAME`
     echo $NETWORK_ID
 }
 
@@ -234,9 +234,9 @@
     PROJECT_ID=$(get_project_id $PROJECT)
     source $TOP_DIR/openrc $PROJECT $PROJECT
     local NET_ID
-    NET_ID=$(neutron net-create --project-id $PROJECT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' )
+    NET_ID=$(openstack network create --project $PROJECT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' )
     die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PROJECT_ID $NET_NAME $EXTRA"
-    neutron subnet-create --ip-version 4 --project-id $PROJECT_ID --gateway $GATEWAY --subnetpool None $NET_ID $CIDR
+    openstack subnet create --ip-version 4 --project $PROJECT_ID --gateway $GATEWAY --subnet-pool None --network $NET_ID --subnet-range $CIDR "${NET_NAME}_subnet"
     neutron_debug_admin probe-create --device-owner compute $NET_ID
     source $TOP_DIR/openrc demo demo
 }
@@ -325,10 +325,10 @@
     PROJECT_ID=$(get_project_id $PROJECT)
     #TODO(nati) comment out until l3-agent merged
     #for res in port subnet net router;do
-    for net_id in `neutron net-list -c id -c name | grep $NET_NAME | awk '{print $2}'`;do
+    for net_id in `openstack network list -c ID -c Name | grep $NET_NAME | awk '{print $2}'`;do
         delete_probe $net_id
-        neutron subnet-list | grep $net_id | awk '{print $2}' | xargs -I% neutron subnet-delete %
-        neutron net-delete $net_id
+        openstack subnet list | grep $net_id | awk '{print $2}' | xargs -I% openstack subnet delete %
+        openstack network delete $net_id
     done
     source $TOP_DIR/openrc demo demo
 }
diff --git a/exercises/sec_groups.sh b/exercises/sec_groups.sh
index 5f8b0a4..2f78e39 100755
--- a/exercises/sec_groups.sh
+++ b/exercises/sec_groups.sh
@@ -13,7 +13,7 @@
 set -o errexit
 
 # Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following allowing as the install occurs.
+# an error.  It is also useful for following as the install occurs.
 set -o xtrace
 
 
diff --git a/exercises/swift.sh b/exercises/swift.sh
index 4a41e0f..8aa376b 100755
--- a/exercises/swift.sh
+++ b/exercises/swift.sh
@@ -13,7 +13,7 @@
 set -o errexit
 
 # Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following allowing as the install occurs.
+# an error.  It is also useful for following as the install occurs.
 set -o xtrace
 
 
diff --git a/exercises/volumes.sh b/exercises/volumes.sh
index 0de1226..e7c3560 100755
--- a/exercises/volumes.sh
+++ b/exercises/volumes.sh
@@ -13,7 +13,7 @@
 set -o errexit
 
 # Print the commands being run so that we can see the command that triggers
-# an error.  It is also useful for following allowing as the install occurs.
+# an error.  It is also useful for following as the install occurs.
 set -o xtrace
 
 
diff --git a/extras.d/60-ceph.sh b/extras.d/60-ceph.sh
deleted file mode 100644
index cc90128..0000000
--- a/extras.d/60-ceph.sh
+++ /dev/null
@@ -1,75 +0,0 @@
-# ceph.sh - DevStack extras script to install Ceph
-
-if is_service_enabled ceph; then
-    if [[ "$1" == "source" ]]; then
-        # Initial source
-        source $TOP_DIR/lib/ceph
-    elif [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
-        echo_summary "Installing Ceph"
-        check_os_support_ceph
-        if [ "$REMOTE_CEPH" = "False" ]; then
-            install_ceph
-            echo_summary "Configuring Ceph"
-            configure_ceph
-            # NOTE (leseb): Do everything here because we need to have Ceph started before the main
-            # OpenStack components. Ceph OSD must start here otherwise we can't upload any images.
-            echo_summary "Initializing Ceph"
-            init_ceph
-            start_ceph
-        else
-            install_ceph_remote
-        fi
-    elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
-        if is_service_enabled glance; then
-            echo_summary "Configuring Glance for Ceph"
-            configure_ceph_glance
-        fi
-        if is_service_enabled nova; then
-            echo_summary "Configuring Nova for Ceph"
-            configure_ceph_nova
-        fi
-        if is_service_enabled cinder; then
-            echo_summary "Configuring Cinder for Ceph"
-            configure_ceph_cinder
-        fi
-        if is_service_enabled n-cpu; then
-            # NOTE (leseb): the part below is a requirement to attach Ceph block devices
-            echo_summary "Configuring libvirt secret"
-            import_libvirt_secret_ceph
-        fi
-
-        if [ "$REMOTE_CEPH" = "False" ]; then
-            if is_service_enabled glance; then
-                echo_summary "Configuring Glance for Ceph"
-                configure_ceph_embedded_glance
-            fi
-            if is_service_enabled nova; then
-                echo_summary "Configuring Nova for Ceph"
-                configure_ceph_embedded_nova
-            fi
-            if is_service_enabled cinder; then
-                echo_summary "Configuring Cinder for Ceph"
-                configure_ceph_embedded_cinder
-            fi
-        fi
-    fi
-
-    if [[ "$1" == "unstack" ]]; then
-        if [ "$REMOTE_CEPH" = "True" ]; then
-            cleanup_ceph_remote
-        else
-            cleanup_ceph_embedded
-            stop_ceph
-        fi
-        cleanup_ceph_general
-    fi
-
-    if [[ "$1" == "clean" ]]; then
-        if [ "$REMOTE_CEPH" = "True" ]; then
-            cleanup_ceph_remote
-        else
-            cleanup_ceph_embedded
-        fi
-        cleanup_ceph_general
-    fi
-fi
diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh
index 6a3d121..15ecfe3 100644
--- a/extras.d/80-tempest.sh
+++ b/extras.d/80-tempest.sh
@@ -11,13 +11,16 @@
         # Tempest config must come after layer 2 services are running
         :
     elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
+        # Tempest config must come after all other plugins are run
+        :
+    elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then
+        # local.conf Tempest option overrides
+        :
+    elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then
         echo_summary "Initializing Tempest"
         configure_tempest
         echo_summary "Installing Tempest Plugins"
         install_tempest_plugins
-    elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then
-        # local.conf Tempest option overrides
-        :
     fi
 
     if [[ "$1" == "unstack" ]]; then
diff --git a/files/apache-keystone.template b/files/apache-keystone.template
index 8a4b0f0..428544f 100644
--- a/files/apache-keystone.template
+++ b/files/apache-keystone.template
@@ -44,8 +44,8 @@
     WSGIPassAuthorization On
 </Location>
 
-Alias /identity_v2_admin %KEYSTONE_BIN%/keystone-wsgi-admin
-<Location /identity_v2_admin>
+Alias /identity_admin %KEYSTONE_BIN%/keystone-wsgi-admin
+<Location /identity_admin>
     SetHandler wsgi-script
     Options +ExecCGI
 
diff --git a/files/apache-placement-api.template b/files/apache-placement-api.template
new file mode 100644
index 0000000..b89ef96
--- /dev/null
+++ b/files/apache-placement-api.template
@@ -0,0 +1,25 @@
+Listen %PUBLICPORT%
+
+<VirtualHost *:%PUBLICPORT%>
+    WSGIDaemonProcess placement-api processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
+    WSGIProcessGroup placement-api
+    WSGIScriptAlias / %PUBLICWSGI%
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+    <IfVersion >= 2.4>
+      ErrorLogFormat "%M"
+    </IfVersion>
+    ErrorLog /var/log/%APACHE_NAME%/placement-api.log
+    %SSLENGINE%
+    %SSLCERTFILE%
+    %SSLKEYFILE%
+</VirtualHost>
+
+Alias /placement %PUBLICWSGI%
+<Location /placement>
+    SetHandler wsgi-script
+    Options +ExecCGI
+    WSGIProcessGroup placement-api
+    WSGIApplicationGroup %{GLOBAL}
+    WSGIPassAuthorization On
+</Location>
diff --git a/files/debs/cinder b/files/debs/cinder
index 3595e01..c1b79fd 100644
--- a/files/debs/cinder
+++ b/files/debs/cinder
@@ -3,3 +3,4 @@
 open-iscsi-utils # Deprecated since quantal dist:precise
 qemu-utils
 tgt # NOPRIME
+thin-provisioning-tools
diff --git a/files/debs/tls-proxy b/files/debs/tls-proxy
index dce9c07..5bd8e21 100644
--- a/files/debs/tls-proxy
+++ b/files/debs/tls-proxy
@@ -1 +1 @@
-stud
+apache2
diff --git a/files/rpms/general b/files/rpms/general
index d0ceb56..77d2fa5 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -7,9 +7,9 @@
 gettext  # used for compiling message catalogs
 git-core
 graphviz # needed only for docs
-iptables-services  # NOPRIME f23,f24
+iptables-services  # NOPRIME f23,f24,f25
 java-1.7.0-openjdk-headless  # NOPRIME rhel7
-java-1.8.0-openjdk-headless  # NOPRIME f23,f24
+java-1.8.0-openjdk-headless  # NOPRIME f23,f24,f25
 libffi-devel
 libjpeg-turbo-devel # Pillow 3.0.0
 libxml2-devel # lxml
diff --git a/files/rpms/nova b/files/rpms/nova
index a883ec4..45f1c94 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -7,7 +7,7 @@
 genisoimage # required for config_drive
 iptables
 iputils
-kernel-modules # dist:f23,f24
+kernel-modules # dist:f23,f24,f25
 kpartx
 kvm # NOPRIME
 libvirt-bin # NOPRIME
diff --git a/files/rpms/swift b/files/rpms/swift
index bd249ee..2f12df0 100644
--- a/files/rpms/swift
+++ b/files/rpms/swift
@@ -2,7 +2,7 @@
 liberasurecode-devel
 memcached
 pyxattr
-rsync-daemon # dist:f23,f24
+rsync-daemon # dist:f23,f24,f25
 sqlite
 xfsprogs
 xinetd
diff --git a/files/swift/rsyncd.conf b/files/swift/rsyncd.conf
index c670531..c49f716 100644
--- a/files/swift/rsyncd.conf
+++ b/files/swift/rsyncd.conf
@@ -4,76 +4,76 @@
 pid file = %SWIFT_DATA_DIR%/run/rsyncd.pid
 address = 127.0.0.1
 
-[account6012]
+[account6612]
 max connections = 25
 path = %SWIFT_DATA_DIR%/1/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/account6012.lock
+lock file = %SWIFT_DATA_DIR%/run/account6612.lock
 
-[account6022]
+[account6622]
 max connections = 25
 path = %SWIFT_DATA_DIR%/2/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/account6022.lock
+lock file = %SWIFT_DATA_DIR%/run/account6622.lock
 
-[account6032]
+[account6632]
 max connections = 25
 path = %SWIFT_DATA_DIR%/3/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/account6032.lock
+lock file = %SWIFT_DATA_DIR%/run/account6632.lock
 
-[account6042]
+[account6642]
 max connections = 25
 path = %SWIFT_DATA_DIR%/4/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/account6042.lock
+lock file = %SWIFT_DATA_DIR%/run/account6642.lock
 
 
-[container6011]
+[container6611]
 max connections = 25
 path = %SWIFT_DATA_DIR%/1/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/container6011.lock
+lock file = %SWIFT_DATA_DIR%/run/container6611.lock
 
-[container6021]
+[container6621]
 max connections = 25
 path = %SWIFT_DATA_DIR%/2/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/container6021.lock
+lock file = %SWIFT_DATA_DIR%/run/container6621.lock
 
-[container6031]
+[container6631]
 max connections = 25
 path = %SWIFT_DATA_DIR%/3/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/container6031.lock
+lock file = %SWIFT_DATA_DIR%/run/container6631.lock
 
-[container6041]
+[container6641]
 max connections = 25
 path = %SWIFT_DATA_DIR%/4/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/container6041.lock
+lock file = %SWIFT_DATA_DIR%/run/container6641.lock
 
 
-[object6010]
+[object6613]
 max connections = 25
 path = %SWIFT_DATA_DIR%/1/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/object6010.lock
+lock file = %SWIFT_DATA_DIR%/run/object6613.lock
 
-[object6020]
+[object6623]
 max connections = 25
 path = %SWIFT_DATA_DIR%/2/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/object6020.lock
+lock file = %SWIFT_DATA_DIR%/run/object6623.lock
 
-[object6030]
+[object6633]
 max connections = 25
 path = %SWIFT_DATA_DIR%/3/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/object6030.lock
+lock file = %SWIFT_DATA_DIR%/run/object6633.lock
 
-[object6040]
+[object6643]
 max connections = 25
 path = %SWIFT_DATA_DIR%/4/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/object6040.lock
+lock file = %SWIFT_DATA_DIR%/run/object6643.lock
diff --git a/functions b/functions
index 5856578..6a0ac67 100644
--- a/functions
+++ b/functions
@@ -646,6 +646,24 @@
 }
 
 
+# enable_kernel_bridge_firewall - Enable kernel support for bridge firewalling
+function enable_kernel_bridge_firewall {
+    # Load bridge module. This module provides access to firewall for bridged
+    # frames; and also on older kernels (pre-3.18) it provides sysctl knobs to
+    # enable/disable bridge firewalling
+    sudo modprobe bridge
+    # For newer kernels (3.18+), those sysctl settings are split into a separate
+    # kernel module (br_netfilter). Load it too, if present.
+    sudo modprobe br_netfilter 2>> /dev/null || :
+    # Enable bridge firewalling in case it's disabled in kernel (upstream
+    # default is enabled, but some distributions may decide to change it).
+    # This is at least needed for RHEL 7.2 and earlier releases.
+    for proto in arp ip ip6; do
+        sudo sysctl -w net.bridge.bridge-nf-call-${proto}tables=1
+    done
+}
+
+
 # Restore xtrace
 $_XTRACE_FUNCTIONS
 
diff --git a/functions-common b/functions-common
index 3fdd71b..cc1d42b 100644
--- a/functions-common
+++ b/functions-common
@@ -534,10 +534,8 @@
                 echo "the project to the \$PROJECTS variable in the job definition."
                 die $LINENO "Cloning not allowed in this configuration"
             fi
-            git_timed clone $git_clone_flags $git_remote $git_dest
-            cd $git_dest
-            # This checkout syntax works for both branches and tags
-            git checkout $git_ref
+            # '--branch' can also take tags
+            git_timed clone $git_clone_flags $git_remote $git_dest --branch $git_ref
         elif [[ "$RECLONE" = "True" ]]; then
             # if it does exist then simulate what clone does if asked to RECLONE
             cd $git_dest
@@ -865,11 +863,9 @@
     domain_args=$(_get_domain_args $4 $5)
 
     # Gets user role id
-    user_role_id=$(openstack role list \
+    user_role_id=$(openstack role assignment list \
         --user $2 \
-        --column "ID" \
         --project $3 \
-        --column "Name" \
         $domain_args \
         | grep " $1 " | get_field 1)
     if [[ -z "$user_role_id" ]]; then
@@ -878,11 +874,9 @@
             --user $2 \
             --project $3 \
             $domain_args
-        user_role_id=$(openstack role list \
+        user_role_id=$(openstack role assignment list \
             --user $2 \
-            --column "ID" \
             --project $3 \
-            --column "Name" \
             $domain_args \
             | grep " $1 " | get_field 1)
     fi
@@ -894,22 +888,18 @@
 function get_or_add_user_domain_role {
     local user_role_id
     # Gets user role id
-    user_role_id=$(openstack role list \
+    user_role_id=$(openstack role assignment list \
         --user $2 \
-        --column "ID" \
         --domain $3 \
-        --column "Name" \
         | grep " $1 " | get_field 1)
     if [[ -z "$user_role_id" ]]; then
         # Adds role to user and get it
         openstack role add $1 \
             --user $2 \
             --domain $3
-        user_role_id=$(openstack role list \
+        user_role_id=$(openstack role assignment list \
             --user $2 \
-            --column "ID" \
             --domain $3 \
-            --column "Name" \
             | grep " $1 " | get_field 1)
     fi
     echo $user_role_id
@@ -920,13 +910,11 @@
 function get_or_add_user_domain_role {
     local user_role_id
     # Gets user role id
-    user_role_id=$(openstack role list \
+    user_role_id=$(openstack role assignment list \
         --user $2 \
         --os-url=$KEYSTONE_SERVICE_URI_V3 \
         --os-identity-api-version=3 \
-        --column "ID" \
         --domain $3 \
-        --column "Name" \
         | grep " $1 " | get_field 1)
     if [[ -z "$user_role_id" ]]; then
         # Adds role to user and get it
@@ -935,13 +923,11 @@
             --domain $3 \
             --os-url=$KEYSTONE_SERVICE_URI_V3 \
             --os-identity-api-version=3
-        user_role_id=$(openstack role list \
+        user_role_id=$(openstack role assignment list \
             --user $2 \
             --os-url=$KEYSTONE_SERVICE_URI_V3 \
             --os-identity-api-version=3 \
-            --column "ID" \
             --domain $3 \
-            --column "Name" \
             | grep " $1 " | get_field 1)
     fi
     echo $user_role_id
@@ -952,19 +938,19 @@
 function get_or_add_group_project_role {
     local group_role_id
     # Gets group role id
-    group_role_id=$(openstack role list \
+    group_role_id=$(openstack role assignment list \
         --group $2 \
         --project $3 \
-        -c "ID" -f value)
+        -f value)
     if [[ -z "$group_role_id" ]]; then
         # Adds role to group and get it
         openstack role add $1 \
             --group $2 \
             --project $3
-        group_role_id=$(openstack role list \
+        group_role_id=$(openstack role assignment list \
             --group $2 \
             --project $3 \
-            -c "ID" -f value)
+            -f value)
     fi
     echo $group_role_id
 }
@@ -1330,7 +1316,7 @@
     elif is_fedora; then
         sudo ${YUM:-yum} remove -y "$@" ||:
     elif is_suse; then
-        sudo zypper rm "$@" ||:
+        sudo zypper remove -y "$@" ||:
     else
         exit_distro_not_supported "uninstalling packages"
     fi
@@ -1346,20 +1332,26 @@
 
     time_start "yum_install"
 
-    # - We run with LC_ALL=C so string matching *should* be OK
-    # - Exit 1 if the failure might get better with a retry.
-    # - Exit 2 if it is fatal.
-    parse_yum_result='             \
-        BEGIN { result=0 }         \
-        /^YUM_FAILED/ { exit $2 }  \
-        /^No package/ { result=2 } \
-        /^Failed:/    { result=2 } \
-        //{ print }                \
+    # This is a bit tricky, because yum -y assumes missing or failed
+    # packages are OK (see [1]).  We want devstack to stop if we are
+    # installing missing packages.
+    #
+    # Thus we manually match on the output (stack.sh runs in a fixed
+    # locale, so lang shouldn't change).
+    #
+    # If yum returns !0, we echo the result as "YUM_FAILED" and return
+    # that from the awk (we're subverting -e with this trick).
+    # Otherwise we use awk to look for failure strings and return "2"
+    # to indicate a terminal failure.
+    #
+    # [1] https://bugzilla.redhat.com/show_bug.cgi?id=965567
+    parse_yum_result='              \
+        BEGIN { result=0 }          \
+        /^YUM_FAILED/ { result=$2 } \
+        /^No package/ { result=2 }  \
+        /^Failed:/    { result=2 }  \
+        //{ print }                 \
         END { exit result }'
-
-    # The manual check for missing packages is because yum -y assumes
-    # missing or failed packages are OK.
-    # See https://bugzilla.redhat.com/show_bug.cgi?id=965567
     (sudo_with_proxies "${YUM:-yum}" install -y "$@" 2>&1 || echo YUM_FAILED $?) \
         | awk "$parse_yum_result" && result=$? || result=$?
 
@@ -1686,7 +1678,7 @@
     local logfile=$2
 
     if [[ "$USE_SCREEN" = "True" ]]; then
-        screen_process "$name" "sudo tail -f $logfile"
+        screen_process "$name" "sudo tail -f $logfile | sed 's/\\\\\\\\x1b/\o033/g'"
     fi
 }
 
@@ -1779,6 +1771,9 @@
     local name=$1
     local url=$2
     local branch=${3:-master}
+    if [[ ",${DEVSTACK_PLUGINS}," =~ ,${name}, ]]; then
+        die $LINENO "Plugin attempted to be enabled twice: ${name} ${url} ${branch}"
+    fi
     DEVSTACK_PLUGINS+=",$name"
     GITREPO[$name]=$url
     GITDIR[$name]=$DEST/$name
@@ -1880,7 +1875,7 @@
             # white listed elements in tree. We want these to move out
             # over time as well, but they are in tree, so we need to
             # manage that.
-            local exceptions="60-ceph.sh 80-tempest.sh"
+            local exceptions="80-tempest.sh"
             local extra
             extra=$(basename $extra_plugin_file_name)
             if [[ ! ( $exceptions =~ "$extra" ) ]]; then
@@ -2207,6 +2202,18 @@
     echo ${1-0}.${2-0}.${3-0}.${4-0}
 }
 
+# Check if this is a valid ipv4 address string
+function is_ipv4_address {
+    local address=$1
+    local regex='([0-9]{1,3}.){3}[0-9]{1,3}'
+    # TODO(clarkb) make this more robust
+    if [[ "$address" =~ $regex ]] ; then
+        return 0
+    else
+        return 1
+    fi
+}
+
 # Gracefully cp only if source file/dir exists
 # cp_it source destination
 function cp_it {
@@ -2254,6 +2261,14 @@
     echo $subnet
 }
 
+function is_provider_network {
+    if [ "$Q_USE_PROVIDER_NETWORKING" == "True" ]; then
+        return 0
+    fi
+    return 1
+}
+
+
 # Return the current python as "python<major>.<minor>"
 function python_version {
     local python_version
@@ -2304,6 +2319,17 @@
     fi
 }
 
+# Service wrapper to reload services
+# If the service was not in running state it will start it
+# reload_service service-name
+function reload_service {
+    if [ -x /bin/systemctl ]; then
+        sudo /bin/systemctl reload-or-restart $1
+    else
+        sudo service $1 reload
+    fi
+}
+
 # Test with a finite retry loop.
 #
 function test_with_retry {
diff --git a/inc/meta-config b/inc/meta-config
index 6eb7a00..6252135 100644
--- a/inc/meta-config
+++ b/inc/meta-config
@@ -40,12 +40,10 @@
     $CONFIG_AWK_CMD -v matchgroup=$matchgroup -v configfile=$configfile '
         BEGIN { group = "" }
         /^\[\[.+\|.*\]\]/ {
-            if (group == "") {
-                gsub("[][]", "", $1);
-                split($1, a, "|");
-                if (a[1] == matchgroup && a[2] == configfile) {
-                    group=a[1]
-                }
+            gsub("[][]", "", $1);
+            split($1, a, "|");
+            if (a[1] == matchgroup && a[2] == configfile) {
+                group=a[1]
             } else {
                 group=""
             }
diff --git a/lib/apache b/lib/apache
index 2c84c7a..2dc626f 100644
--- a/lib/apache
+++ b/lib/apache
@@ -29,37 +29,59 @@
 
 
 # Set up apache name and configuration directory
+# Note that APACHE_CONF_DIR is really more accurately apache's vhost
+# configuration dir but we can't just change this because public interfaces.
 if is_ubuntu; then
     APACHE_NAME=apache2
     APACHE_CONF_DIR=${APACHE_CONF_DIR:-/etc/$APACHE_NAME/sites-available}
+    APACHE_SETTINGS_DIR=${APACHE_SETTINGS_DIR:-/etc/$APACHE_NAME/conf-enabled}
 elif is_fedora; then
     APACHE_NAME=httpd
     APACHE_CONF_DIR=${APACHE_CONF_DIR:-/etc/$APACHE_NAME/conf.d}
+    APACHE_SETTINGS_DIR=${APACHE_SETTINGS_DIR:-/etc/$APACHE_NAME/conf.d}
 elif is_suse; then
     APACHE_NAME=apache2
     APACHE_CONF_DIR=${APACHE_CONF_DIR:-/etc/$APACHE_NAME/vhosts.d}
+    APACHE_SETTINGS_DIR=${APACHE_SETTINGS_DIR:-/etc/$APACHE_NAME/conf.d}
 fi
+APACHE_LOG_DIR="/var/log/${APACHE_NAME}"
 
 # Functions
 # ---------
+
+# Enable apache mod and restart apache if it isn't already enabled.
+function enable_apache_mod {
+    local mod=$1
+    # Apache installation, because we mark it NOPRIME
+    if is_ubuntu || is_suse ; then
+        if ! a2query -m $mod ; then
+            sudo a2enmod $mod
+            restart_apache_server
+        fi
+    elif is_fedora; then
+        # pass
+        true
+    else
+        exit_distro_not_supported "apache enable mod"
+    fi
+}
+
 # install_apache_wsgi() - Install Apache server and wsgi module
 function install_apache_wsgi {
     # Apache installation, because we mark it NOPRIME
     if is_ubuntu; then
         # Install apache2, which is NOPRIME'd
         install_package apache2 libapache2-mod-wsgi
-        # WSGI isn't enabled by default, enable it
-        sudo a2enmod wsgi
     elif is_fedora; then
         sudo rm -f /etc/httpd/conf.d/000-*
         install_package httpd mod_wsgi
     elif is_suse; then
         install_package apache2 apache2-mod_wsgi
-        # WSGI isn't enabled by default, enable it
-        sudo a2enmod wsgi
     else
-        exit_distro_not_supported "apache installation"
+        exit_distro_not_supported "apache wsgi installation"
     fi
+    # WSGI isn't enabled by default, enable it
+    enable_apache_mod wsgi
 
     # ensure mod_version enabled for <IfVersion ...>.  This is
     # built-in statically on anything recent, but precise (2.2)
@@ -192,6 +214,11 @@
     time_stop "restart_apache_server"
 }
 
+# reload_apache_server
+function reload_apache_server {
+    reload_service $APACHE_NAME
+}
+
 # Restore xtrace
 $_XTRACE_LIB_APACHE
 
diff --git a/lib/ceph b/lib/ceph
deleted file mode 100644
index 1e55c48..0000000
--- a/lib/ceph
+++ /dev/null
@@ -1,381 +0,0 @@
-#!/bin/bash
-#
-# lib/ceph
-# Functions to control the configuration and operation of the **Ceph** storage service
-
-# Dependencies:
-#
-# - ``functions`` file
-# - ``CEPH_DATA_DIR`` or ``DATA_DIR`` must be defined
-
-# ``stack.sh`` calls the entry points in this order (via ``extras.d/60-ceph.sh``):
-#
-# - install_ceph
-# - configure_ceph
-# - init_ceph
-# - start_ceph
-# - stop_ceph
-# - cleanup_ceph
-
-# Save trace setting
-_XTRACE_LIB_CEPH=$(set +o | grep xtrace)
-set +o xtrace
-
-
-# Defaults
-# --------
-
-# Set ``CEPH_DATA_DIR`` to the location of Ceph drives and objects.
-# Default is the common DevStack data directory.
-CEPH_DATA_DIR=${CEPH_DATA_DIR:-/var/lib/ceph}
-CEPH_DISK_IMAGE=${CEPH_DATA_DIR}/drives/images/ceph.img
-
-# Set ``CEPH_CONF_DIR`` to the location of the configuration files.
-# Default is ``/etc/ceph``.
-CEPH_CONF_DIR=${CEPH_CONF_DIR:-/etc/ceph}
-
-# DevStack will create a loop-back disk formatted as XFS to store the
-# Ceph data. Set ``CEPH_LOOPBACK_DISK_SIZE`` to the disk size in
-# kilobytes.
-# Default is 1 gigabyte.
-CEPH_LOOPBACK_DISK_SIZE_DEFAULT=4G
-CEPH_LOOPBACK_DISK_SIZE=${CEPH_LOOPBACK_DISK_SIZE:-$CEPH_LOOPBACK_DISK_SIZE_DEFAULT}
-
-# Common
-CEPH_FSID=$(uuidgen)
-CEPH_CONF_FILE=${CEPH_CONF_DIR}/ceph.conf
-
-# Glance
-GLANCE_CEPH_USER=${GLANCE_CEPH_USER:-glance}
-GLANCE_CEPH_POOL=${GLANCE_CEPH_POOL:-images}
-GLANCE_CEPH_POOL_PG=${GLANCE_CEPH_POOL_PG:-8}
-GLANCE_CEPH_POOL_PGP=${GLANCE_CEPH_POOL_PGP:-8}
-
-# Nova
-NOVA_CEPH_POOL=${NOVA_CEPH_POOL:-vms}
-NOVA_CEPH_POOL_PG=${NOVA_CEPH_POOL_PG:-8}
-NOVA_CEPH_POOL_PGP=${NOVA_CEPH_POOL_PGP:-8}
-
-# Cinder
-CINDER_CEPH_POOL=${CINDER_CEPH_POOL:-volumes}
-CINDER_CEPH_POOL_PG=${CINDER_CEPH_POOL_PG:-8}
-CINDER_CEPH_POOL_PGP=${CINDER_CEPH_POOL_PGP:-8}
-CINDER_CEPH_USER=${CINDER_CEPH_USER:-cinder}
-CINDER_CEPH_UUID=${CINDER_CEPH_UUID:-$(uuidgen)}
-
-# Set ``CEPH_REPLICAS`` to configure how many replicas are to be
-# configured for your Ceph cluster. By default we are configuring
-# only one replica since this is way less CPU and memory intensive. If
-# you are planning to test Ceph replication feel free to increase this value
-CEPH_REPLICAS=${CEPH_REPLICAS:-1}
-CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS})
-
-# Connect to an existing Ceph cluster
-REMOTE_CEPH=$(trueorfalse False REMOTE_CEPH)
-REMOTE_CEPH_ADMIN_KEY_PATH=${REMOTE_CEPH_ADMIN_KEY_PATH:-$CEPH_CONF_DIR/ceph.client.admin.keyring}
-
-# Cinder encrypted volume tests are not supported with a Ceph backend due to
-# bug 1463525.
-ATTACH_ENCRYPTED_VOLUME_AVAILABLE=False
-
-
-# Functions
-# ------------
-
-function get_ceph_version {
-    local ceph_version_str
-    ceph_version_str=$(sudo ceph daemon mon.$(hostname) version | cut -d '"' -f 4 | cut -f 1,2 -d '.')
-    echo $ceph_version_str
-}
-
-# import_libvirt_secret_ceph() - Imports Cinder user key into libvirt
-# so it can connect to the Ceph cluster while attaching a Cinder block device
-function import_libvirt_secret_ceph {
-    cat > secret.xml <<EOF
-<secret ephemeral='no' private='no'>
-   <uuid>${CINDER_CEPH_UUID}</uuid>
-   <usage type='ceph'>
-     <name>client.${CINDER_CEPH_USER} secret</name>
-   </usage>
-</secret>
-EOF
-    sudo virsh secret-define --file secret.xml
-    sudo virsh secret-set-value --secret ${CINDER_CEPH_UUID} --base64 $(sudo ceph -c ${CEPH_CONF_FILE} auth get-key client.${CINDER_CEPH_USER})
-    sudo rm -f secret.xml
-}
-
-# undefine_virsh_secret() - Undefine Cinder key secret from libvirt
-function undefine_virsh_secret {
-    if is_service_enabled cinder || is_service_enabled nova; then
-        local virsh_uuid
-        virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }')
-        sudo virsh secret-undefine ${virsh_uuid} >/dev/null 2>&1
-    fi
-}
-
-
-# check_os_support_ceph() - Check if the operating system provides a decent version of Ceph
-function check_os_support_ceph {
-    if [[ ! ${DISTRO} =~ (trusty|f23|f24) ]]; then
-        echo "WARNING: your distro $DISTRO does not provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 20 (and higher)"
-        if [[ "$FORCE_CEPH_INSTALL" != "yes" ]]; then
-            die $LINENO "If you wish to install Ceph on this distribution anyway run with FORCE_CEPH_INSTALL=yes"
-        fi
-        NO_UPDATE_REPOS=False
-    fi
-}
-
-# cleanup_ceph() - Remove residual data files, anything left over from previous
-# runs that a clean run would need to clean up
-function cleanup_ceph_remote {
-    # do a proper cleanup from here to avoid leftover on the remote Ceph cluster
-    if is_service_enabled glance; then
-        sudo ceph osd pool delete $GLANCE_CEPH_POOL $GLANCE_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
-        sudo ceph auth del client.$GLANCE_CEPH_USER > /dev/null 2>&1
-    fi
-    if is_service_enabled cinder; then
-        sudo ceph osd pool delete $CINDER_CEPH_POOL $CINDER_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
-        sudo ceph auth del client.$CINDER_CEPH_USER > /dev/null 2>&1
-    fi
-    if is_service_enabled c-bak; then
-        sudo ceph osd pool delete $CINDER_BAK_CEPH_POOL $CINDER_BAK_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
-        sudo ceph auth del client.$CINDER_BAK_CEPH_USER > /dev/null 2>&1
-    fi
-    if is_service_enabled nova; then
-        iniset $NOVA_CONF libvirt rbd_secret_uuid ""
-        sudo ceph osd pool delete $NOVA_CEPH_POOL $NOVA_CEPH_POOL --yes-i-really-really-mean-it > /dev/null 2>&1
-    fi
-}
-
-function cleanup_ceph_embedded {
-    sudo killall -w -9 ceph-mon
-    sudo killall -w -9 ceph-osd
-    sudo rm -rf ${CEPH_DATA_DIR}/*/*
-    if egrep -q ${CEPH_DATA_DIR} /proc/mounts; then
-        sudo umount ${CEPH_DATA_DIR}
-    fi
-    if [[ -e ${CEPH_DISK_IMAGE} ]]; then
-        sudo rm -f ${CEPH_DISK_IMAGE}
-    fi
-
-    # purge ceph config file and keys
-    sudo rm -rf ${CEPH_CONF_DIR}/*
-}
-
-function cleanup_ceph_general {
-    undefine_virsh_secret
-}
-
-
-# configure_ceph() - Set config files, create data dirs, etc
-function configure_ceph {
-    local count=0
-
-    # create a backing file disk
-    create_disk ${CEPH_DISK_IMAGE} ${CEPH_DATA_DIR} ${CEPH_LOOPBACK_DISK_SIZE}
-
-    # populate ceph directory
-    sudo mkdir -p ${CEPH_DATA_DIR}/{bootstrap-mds,bootstrap-osd,mds,mon,osd,tmp}
-
-    # create ceph monitor initial key and directory
-    sudo ceph-authtool /var/lib/ceph/tmp/keyring.mon.$(hostname) \
-        --create-keyring --name=mon. --add-key=$(ceph-authtool --gen-print-key) \
-        --cap mon 'allow *'
-    sudo mkdir /var/lib/ceph/mon/ceph-$(hostname)
-
-    # create a default ceph configuration file
-    sudo tee ${CEPH_CONF_FILE} > /dev/null <<EOF
-[global]
-fsid = ${CEPH_FSID}
-mon_initial_members = $(hostname)
-mon_host = ${SERVICE_HOST}
-auth_cluster_required = cephx
-auth_service_required = cephx
-auth_client_required = cephx
-filestore_xattr_use_omap = true
-osd crush chooseleaf type = 0
-osd journal size = 100
-EOF
-
-    # bootstrap the ceph monitor
-    sudo ceph-mon -c ${CEPH_CONF_FILE} --mkfs -i $(hostname) \
-        --keyring /var/lib/ceph/tmp/keyring.mon.$(hostname)
-
-    if is_ubuntu; then
-        sudo touch /var/lib/ceph/mon/ceph-$(hostname)/upstart
-        sudo initctl emit ceph-mon id=$(hostname)
-    else
-        sudo touch /var/lib/ceph/mon/ceph-$(hostname)/sysvinit
-        sudo service ceph start mon.$(hostname)
-    fi
-
-    # wait for the admin key to come up otherwise we will not be able to do the actions below
-    until [ -f ${CEPH_CONF_DIR}/ceph.client.admin.keyring ]; do
-        echo_summary "Waiting for the Ceph admin key to be ready..."
-
-        count=$(($count + 1))
-        if [ $count -eq 3 ]; then
-            die $LINENO "Maximum of 3 retries reached"
-        fi
-        sleep 5
-    done
-
-    # pools data and metadata were removed in the Giant release so depending on the version we apply different commands
-    local ceph_version
-    ceph_version=$(get_ceph_version)
-    # change pool replica size according to the CEPH_REPLICAS set by the user
-    if [[ ${ceph_version%%.*} -eq 0 ]] && [[ ${ceph_version##*.} -lt 87 ]]; then
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd size ${CEPH_REPLICAS}
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set data size ${CEPH_REPLICAS}
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set metadata size ${CEPH_REPLICAS}
-    else
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd size ${CEPH_REPLICAS}
-    fi
-
-    # create a simple rule to take OSDs instead of host with CRUSH
-    # then apply this rules to the default pool
-    if [[ $CEPH_REPLICAS -ne 1 ]]; then
-        sudo ceph -c ${CEPH_CONF_FILE} osd crush rule create-simple devstack default osd
-        RULE_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd crush rule dump devstack | awk '/rule_id/ {print $3}' | cut -d ',' -f1)
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd crush_ruleset ${RULE_ID}
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set data crush_ruleset ${RULE_ID}
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set metadata crush_ruleset ${RULE_ID}
-    fi
-
-    # create the OSD(s)
-    for rep in ${CEPH_REPLICAS_SEQ}; do
-        OSD_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd create)
-        sudo mkdir -p ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}
-        sudo ceph-osd -c ${CEPH_CONF_FILE} -i ${OSD_ID} --mkfs
-        sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create osd.${OSD_ID} \
-            mon 'allow profile osd ' osd 'allow *' | \
-            sudo tee ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/keyring
-
-        # ceph's init script is parsing ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/ and looking for a file
-        # 'upstart' or 'sysinitv', thanks to these 'touches' we are able to control OSDs daemons
-        # from the init script.
-        if is_ubuntu; then
-            sudo touch ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/upstart
-        else
-            sudo touch ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/sysvinit
-        fi
-    done
-}
-
-function configure_ceph_embedded_glance {
-    # configure Glance service options, ceph pool, ceph user and ceph key
-    sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} size ${CEPH_REPLICAS}
-    if [[ $CEPH_REPLICAS -ne 1 ]]; then
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${GLANCE_CEPH_POOL} crush_ruleset ${RULE_ID}
-    fi
-}
-
-# configure_ceph_glance() - Glance config needs to come after Glance is set up
-function configure_ceph_glance {
-    sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP}
-    sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GLANCE_CEPH_USER} \
-        mon "allow r" \
-        osd "allow class-read object_prefix rbd_children, allow rwx pool=${GLANCE_CEPH_POOL}" | \
-        sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
-    sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring
-
-    iniset $GLANCE_API_CONF DEFAULT show_image_direct_url True
-    iniset $GLANCE_API_CONF glance_store default_store rbd
-    iniset $GLANCE_API_CONF glance_store stores "file, http, rbd"
-    iniset $GLANCE_API_CONF glance_store rbd_store_ceph_conf $CEPH_CONF_FILE
-    iniset $GLANCE_API_CONF glance_store rbd_store_user $GLANCE_CEPH_USER
-    iniset $GLANCE_API_CONF glance_store rbd_store_pool $GLANCE_CEPH_POOL
-}
-
-function configure_ceph_embedded_nova {
-    # configure Nova service options, ceph pool, ceph user and ceph key
-    sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} size ${CEPH_REPLICAS}
-    if [[ $CEPH_REPLICAS -ne 1 ]]; then
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID}
-    fi
-}
-
-# configure_ceph_nova() - Nova config needs to come after Nova is set up
-function configure_ceph_nova {
-    sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${NOVA_CEPH_POOL} ${NOVA_CEPH_POOL_PG} ${NOVA_CEPH_POOL_PGP}
-    iniset $NOVA_CONF libvirt rbd_user ${CINDER_CEPH_USER}
-    iniset $NOVA_CONF libvirt rbd_secret_uuid ${CINDER_CEPH_UUID}
-    iniset $NOVA_CONF libvirt inject_key false
-    iniset $NOVA_CONF libvirt disk_cachemodes "network=writeback"
-    iniset $NOVA_CONF libvirt images_type rbd
-    iniset $NOVA_CONF libvirt images_rbd_pool ${NOVA_CEPH_POOL}
-    iniset $NOVA_CONF libvirt images_rbd_ceph_conf ${CEPH_CONF_FILE}
-
-    if ! is_service_enabled cinder; then
-        sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} \
-            mon "allow r" \
-            osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rwx pool=${GLANCE_CEPH_POOL}" | \
-            sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring > /dev/null
-        sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
-    fi
-}
-
-function configure_ceph_embedded_cinder {
-    # Configure Cinder service options, ceph pool, ceph user and ceph key
-    sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} size ${CEPH_REPLICAS}
-    if [[ $CEPH_REPLICAS -ne 1 ]]; then
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_CEPH_POOL} crush_ruleset ${RULE_ID}
-    fi
-}
-
-# configure_ceph_cinder() - Cinder config needs to come after Cinder is set up
-function configure_ceph_cinder {
-    sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP}
-    sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} \
-        mon "allow r" \
-        osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rwx pool=${GLANCE_CEPH_POOL}" | \
-        sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
-    sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring
-}
-
-# init_ceph() - Initialize databases, etc.
-function init_ceph {
-    # clean up from previous (possibly aborted) runs
-    # make sure to kill all ceph processes first
-    sudo pkill -f ceph-mon || true
-    sudo pkill -f ceph-osd || true
-}
-
-# install_ceph() - Collect source and prepare
-function install_ceph_remote {
-    install_package ceph-common
-}
-
-function install_ceph {
-    install_package ceph
-}
-
-# start_ceph() - Start running processes, including screen
-function start_ceph {
-    if is_ubuntu; then
-        sudo initctl emit ceph-mon id=$(hostname)
-        for id in $(sudo ceph -c ${CEPH_CONF_FILE} osd ls); do
-            sudo start ceph-osd id=${id}
-        done
-    else
-        sudo service ceph start
-    fi
-}
-
-# stop_ceph() - Stop running processes (non-screen)
-function stop_ceph {
-    if is_ubuntu; then
-        sudo service ceph-mon-all stop > /dev/null 2>&1
-        sudo service ceph-osd-all stop > /dev/null 2>&1
-    else
-        sudo service ceph stop > /dev/null 2>&1
-    fi
-}
-
-
-# Restore xtrace
-$_XTRACE_LIB_CEPH
-
-## Local variables:
-## mode: shell-script
-## End:
diff --git a/lib/cinder b/lib/cinder
index a87f395..ad434d6 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -70,6 +70,9 @@
 # What type of LVM device should Cinder use for LVM backend
 # Defaults to default, which is thick, the other valid choice
 # is thin, which as the name implies utilizes lvm thin provisioning.
+# Thinly provisioned LVM volumes may be more efficient when using the Cinder
+# image cache, but there are also known race failures with volume snapshots
+# and thinly provisioned LVM volumes, see bug 1642111 for details.
 CINDER_LVM_TYPE=${CINDER_LVM_TYPE:-default}
 
 # Default backends
@@ -128,6 +131,17 @@
 CINDER_NOVA_CATALOG_INFO=${CINDER_NOVA_CATALOG_INFO:-compute:nova:publicURL}
 CINDER_NOVA_CATALOG_ADMIN_INFO=${CINDER_NOVA_CATALOG_ADMIN_INFO:-compute:nova:adminURL}
 
+# Environment variables to configure the image-volume cache
+CINDER_IMG_CACHE_ENABLED=${CINDER_IMG_CACHE_ENABLED:-True}
+
+# For limits, if left unset, it will use cinder defaults of 0 for unlimited
+CINDER_IMG_CACHE_SIZE_GB=${CINDER_IMG_CACHE_SIZE_GB:-}
+CINDER_IMG_CACHE_SIZE_COUNT=${CINDER_IMG_CACHE_SIZE_COUNT:-}
+
+# Configure which cinder backends will have the image-volume cache, this takes the same
+# form as the CINDER_ENABLED_BACKENDS config option. By default it will
+# enable the cache for all cinder backends.
+CINDER_CACHE_ENABLED_FOR_BACKENDS=${CINDER_CACHE_ENABLED_FOR_BACKENDS:-$CINDER_ENABLED_BACKENDS}
 
 # Functions
 # ---------
@@ -292,6 +306,7 @@
         if [[ -n "$default_name" ]]; then
             iniset $CINDER_CONF DEFAULT default_volume_type ${default_name}
         fi
+        configure_cinder_image_volume_cache
     fi
 
     if is_service_enabled swift; then
@@ -305,8 +320,8 @@
     if is_service_enabled tls-proxy; then
         # Set the service port for a proxy to take the original
         iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT
-
         iniset $CINDER_CONF DEFAULT public_endpoint $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT
+        iniset $CINDER_CONF DEFAULT osapi_volume_base_URL $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT
     fi
 
     if [ "$SYSLOG" != "False" ]; then
@@ -358,6 +373,13 @@
     iniset $CINDER_CONF DEFAULT os_privileged_user_password "$SERVICE_PASSWORD"
     iniset $CINDER_CONF DEFAULT os_privileged_user_tenant "$SERVICE_PROJECT_NAME"
     iniset $CINDER_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT"
+
+    # Set the backend url according to the configured dlm backend
+    if is_dlm_enabled; then
+        if [[ "$(dlm_backend)" == "zookeeper" ]]; then
+            iniset $CINDER_CONF coordination backend_url "zake://"
+        fi
+    fi
 }
 
 # create_cinder_accounts() - Set up common required cinder accounts
@@ -397,6 +419,8 @@
             "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s" \
             "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s" \
             "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s"
+
+        configure_cinder_internal_tenant
     fi
 }
 
@@ -408,11 +432,7 @@
 }
 
 # init_cinder() - Initialize database and volume group
-# Uses global ``NOVA_ENABLED_APIS``
 function init_cinder {
-    # Force nova volumes off
-    NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/osapi_volume,//")
-
     if is_service_enabled $DATABASE_BACKENDS; then
         # (Re)create cinder database
         recreate_database cinder
@@ -534,7 +554,7 @@
 
     # Start proxies if enabled
     if is_service_enabled c-api && is_service_enabled tls-proxy; then
-        start_tls_proxy '*' $CINDER_SERVICE_PORT $CINDER_SERVICE_HOST $CINDER_SERVICE_PORT_INT &
+        start_tls_proxy cinder '*' $CINDER_SERVICE_PORT $CINDER_SERVICE_HOST $CINDER_SERVICE_PORT_INT
     fi
 }
 
@@ -574,6 +594,31 @@
     :
 }
 
+function configure_cinder_internal_tenant {
+    # Re-use the Cinder service account for simplicity.
+    iniset $CINDER_CONF DEFAULT cinder_internal_tenant_project_id $(get_or_create_project $SERVICE_PROJECT_NAME)
+    iniset $CINDER_CONF DEFAULT cinder_internal_tenant_user_id $(get_or_create_user "cinder")
+}
+
+function configure_cinder_image_volume_cache {
+    # Expect CINDER_CACHE_ENABLED_FOR_BACKENDS to be a list of backends
+    # similar to CINDER_ENABLED_BACKENDS with NAME:TYPE where NAME will
+    # be the backend specific configuration stanza in cinder.conf.
+    for be in ${CINDER_CACHE_ENABLED_FOR_BACKENDS//,/ }; do
+        local be_name=${be##*:}
+
+        iniset $CINDER_CONF $be_name image_volume_cache_enabled $CINDER_IMG_CACHE_ENABLED
+
+        if [[ -n $CINDER_IMG_CACHE_SIZE_GB ]]; then
+            iniset $CINDER_CONF $be_name image_volume_cache_max_size_gb $CINDER_IMG_CACHE_SIZE_GB
+        fi
+
+        if [[ -n $CINDER_IMG_CACHE_SIZE_COUNT ]]; then
+            iniset $CINDER_CONF $be_name image_volume_cache_max_count $CINDER_IMG_CACHE_SIZE_COUNT
+        fi
+    done
+}
+
 
 # Restore xtrace
 $_XTRACE_CINDER
diff --git a/lib/cinder_backends/ceph b/lib/cinder_backends/ceph
index 9bff5be..ba86ccf 100644
--- a/lib/cinder_backends/ceph
+++ b/lib/cinder_backends/ceph
@@ -45,7 +45,7 @@
 
     iniset $CINDER_CONF $be_name volume_backend_name $be_name
     iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.rbd.RBDDriver"
-    iniset $CINDER_CONF $be_name rbd_ceph_conf "$CEPH_CONF"
+    iniset $CINDER_CONF $be_name rbd_ceph_conf "$CEPH_CONF_FILE"
     iniset $CINDER_CONF $be_name rbd_pool "$CINDER_CEPH_POOL"
     iniset $CINDER_CONF $be_name rbd_user "$CINDER_CEPH_USER"
     iniset $CINDER_CONF $be_name rbd_uuid "$CINDER_CEPH_UUID"
@@ -66,7 +66,7 @@
         sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
 
         iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph"
-        iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF"
+        iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF_FILE"
         iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL"
         iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER"
         iniset $CINDER_CONF DEFAULT backup_ceph_stripe_unit 0
diff --git a/lib/glance b/lib/glance
index 8d95aad..5259174 100644
--- a/lib/glance
+++ b/lib/glance
@@ -187,8 +187,6 @@
 
         iniset $GLANCE_SWIFT_STORE_CONF ref1 key $SERVICE_PASSWORD
         iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $KEYSTONE_SERVICE_URI/v3
-        iniset $GLANCE_SWIFT_STORE_CONF ref1 user_domain_name $SERVICE_DOMAIN_NAME
-        iniset $GLANCE_SWIFT_STORE_CONF ref1 project_domain_name $SERVICE_DOMAIN_NAME
         iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_version 3
 
         # commenting is not strictly necessary but it's confusing to have bad values in conf
@@ -312,6 +310,11 @@
             "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" \
             "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" \
             "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT"
+
+        # Note(frickler): Crude workaround for https://bugs.launchpad.net/glance-store/+bug/1620999
+        service_domain_id=$(get_or_create_domain $SERVICE_DOMAIN_NAME)
+        iniset $GLANCE_SWIFT_STORE_CONF ref1 project_domain_id $service_domain_id
+        iniset $GLANCE_SWIFT_STORE_CONF ref1 user_domain_id $service_domain_id
     fi
 
     # Add glance-glare service and endpoints
@@ -383,8 +386,8 @@
 function start_glance {
     local service_protocol=$GLANCE_SERVICE_PROTOCOL
     if is_service_enabled tls-proxy; then
-        start_tls_proxy '*' $GLANCE_SERVICE_PORT $GLANCE_SERVICE_HOST $GLANCE_SERVICE_PORT_INT &
-        start_tls_proxy '*' $GLANCE_REGISTRY_PORT $GLANCE_SERVICE_HOST $GLANCE_REGISTRY_PORT_INT &
+        start_tls_proxy glance-service '*' $GLANCE_SERVICE_PORT $GLANCE_SERVICE_HOST $GLANCE_SERVICE_PORT_INT
+        start_tls_proxy glance-registry '*' $GLANCE_REGISTRY_PORT $GLANCE_SERVICE_HOST $GLANCE_REGISTRY_PORT_INT
     fi
 
     run_process g-reg "$GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf"
diff --git a/lib/heat b/lib/heat
index c841e0a..0863128 100644
--- a/lib/heat
+++ b/lib/heat
@@ -40,7 +40,6 @@
 HEAT_CFNTOOLS_DIR=$DEST/heat-cfntools
 HEAT_TEMPLATES_REPO_DIR=$DEST/heat-templates
 OCC_DIR=$DEST/os-collect-config
-DIB_UTILS_DIR=$DEST/dib-utils
 ORC_DIR=$DEST/os-refresh-config
 OAC_DIR=$DEST/os-apply-config
 
@@ -276,7 +275,6 @@
     git_clone $OAC_REPO $OAC_DIR $OAC_BRANCH
     git_clone $OCC_REPO $OCC_DIR $OCC_BRANCH
     git_clone $ORC_REPO $ORC_DIR $ORC_BRANCH
-    git_clone $DIB_UTILS_REPO $DIB_UTILS_DIR $DIB_UTILS_BRANCH
 }
 
 # start_heat() - Start running processes, including screen
@@ -420,7 +418,7 @@
 
 # build_heat_pip_mirror() - Build a pip mirror containing heat agent projects
 function build_heat_pip_mirror {
-    local project_dirs="$OCC_DIR $OAC_DIR $ORC_DIR $HEAT_CFNTOOLS_DIR $DIB_UTILS_DIR"
+    local project_dirs="$OCC_DIR $OAC_DIR $ORC_DIR $HEAT_CFNTOOLS_DIR"
     local projpath proj package
 
     rm -rf $HEAT_PIP_REPO
diff --git a/lib/horizon b/lib/horizon
index 78cbe8b..c0faed7 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -81,7 +81,7 @@
     # Horizon is installed as develop mode, so we can compile here.
     # Message catalog compilation is handled by Django admin script,
     # so compiling them after the installation avoids Django installation twice.
-    (cd $HORIZON_DIR; ./run_tests.sh -N --compilemessages)
+    (cd $HORIZON_DIR; python manage.py compilemessages)
 
     # ``local_settings.py`` is used to override horizon default settings.
     local local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py
@@ -97,6 +97,11 @@
     _horizon_config_set $local_settings "" OPENSTACK_API_VERSIONS {\"identity\":3}
     _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_URI}/v3\""
 
+    # note(trebskit): if HOST_IP points at non-localhost ip address, horizon cannot be accessed
+    # from outside the virtual machine. This fixes is meant primarily for local development
+    # purpose
+    _horizon_config_set $local_settings "" ALLOWED_HOSTS [\"*\"]
+
     if [ -f $SSL_BUNDLE_FILE ]; then
         _horizon_config_set $local_settings "" OPENSTACK_SSL_CACERT \"${SSL_BUNDLE_FILE}\"
     fi
diff --git a/lib/keystone b/lib/keystone
index 6198e43..948d5b4 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -25,7 +25,6 @@
 # - create_keystone_accounts
 # - stop_keystone
 # - cleanup_keystone
-# - _cleanup_keystone_apache_wsgi
 
 # Save trace setting
 _XTRACE_KEYSTONE=$(set +o | grep xtrace)
@@ -52,9 +51,6 @@
 KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf
 KEYSTONE_PASTE_INI=${KEYSTONE_PASTE_INI:-$KEYSTONE_CONF_DIR/keystone-paste.ini}
 
-# NOTE(sdague): remove in Newton
-KEYSTONE_CATALOG_BACKEND="sql"
-
 # Toggle for deploying Keystone under HTTPD + mod_wsgi
 # Deprecated in Mitaka, use KEYSTONE_DEPLOY instead.
 KEYSTONE_USE_MOD_WSGI=${KEYSTONE_USE_MOD_WSGI:-${ENABLE_HTTPD_MOD_WSGI_SERVICES}}
@@ -89,7 +85,7 @@
 
 # Select Keystone's token provider (and format)
 # Choose from 'uuid', 'pki', 'pkiz', or 'fernet'
-KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-}
+KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-fernet}
 KEYSTONE_TOKEN_FORMAT=$(echo ${KEYSTONE_TOKEN_FORMAT} | tr '[:upper:]' '[:lower:]')
 
 # Set Keystone interface configuration
@@ -124,7 +120,7 @@
 # complete URIs
 if [ "$KEYSTONE_DEPLOY" == "mod_wsgi" ]; then
     # If running in Apache, use path access rather than port.
-    KEYSTONE_AUTH_URI=${KEYSTONE_AUTH_PROTOCOL}://${KEYSTONE_AUTH_HOST}/identity_v2_admin
+    KEYSTONE_AUTH_URI=${KEYSTONE_AUTH_PROTOCOL}://${KEYSTONE_AUTH_HOST}/identity_admin
     KEYSTONE_SERVICE_URI=${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}/identity
 else
     KEYSTONE_AUTH_URI=${KEYSTONE_AUTH_PROTOCOL}://${KEYSTONE_AUTH_HOST}:${KEYSTONE_AUTH_PORT}
@@ -149,11 +145,7 @@
 # cleanup_keystone() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
 function cleanup_keystone {
-    _cleanup_keystone_apache_wsgi
-}
-
-# _cleanup_keystone_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file
-function _cleanup_keystone_apache_wsgi {
+    disable_apache_site keystone
     sudo rm -f $(apache_site_config_for keystone)
 }
 
@@ -226,13 +218,6 @@
         iniset $KEYSTONE_CONF ldap password $LDAP_PASSWORD
         iniset $KEYSTONE_CONF ldap user $LDAP_MANAGER_DN
         iniset $KEYSTONE_CONF ldap suffix $LDAP_BASE_DN
-        iniset $KEYSTONE_CONF ldap use_dumb_member "True"
-        iniset $KEYSTONE_CONF ldap user_attribute_ignore "enabled,email,tenants,default_project_id"
-        iniset $KEYSTONE_CONF ldap tenant_attribute_ignore "enabled"
-        iniset $KEYSTONE_CONF ldap tenant_domain_id_attribute "businessCategory"
-        iniset $KEYSTONE_CONF ldap tenant_desc_attribute "description"
-        iniset $KEYSTONE_CONF ldap tenant_tree_dn "ou=Projects,$LDAP_BASE_DN"
-        iniset $KEYSTONE_CONF ldap user_domain_id_attribute "businessCategory"
         iniset $KEYSTONE_CONF ldap user_tree_dn "ou=Users,$LDAP_BASE_DN"
         iniset $KEYSTONE_CONF DEFAULT member_role_id "9fe2ff9ee4384b1894a90878d3e92bab"
         iniset $KEYSTONE_CONF DEFAULT member_role_name "_member_"
@@ -245,12 +230,9 @@
 
     # Enable caching
     iniset $KEYSTONE_CONF cache enabled "True"
-    iniset $KEYSTONE_CONF cache backend "oslo_cache.memcache_pool"
+    iniset $KEYSTONE_CONF cache backend "dogpile.cache.memcached"
     iniset $KEYSTONE_CONF cache memcache_servers localhost:11211
 
-    # Do not cache the catalog backend due to https://bugs.launchpad.net/keystone/+bug/1537617
-    iniset $KEYSTONE_CONF catalog caching "False"
-
     iniset_rpc_backend keystone $KEYSTONE_CONF
 
     # Register SSL certificates if provided
@@ -338,6 +320,8 @@
             iniset "$file" uwsgi buffer-size 65535
             # Make sure the client doesn't try to re-use the connection.
             iniset "$file" uwsgi add-header "Connection: close"
+            # This ensures that file descriptors aren't shared between processes.
+            iniset "$file" uwsgi lazy-apps true
         done
     fi
 
@@ -345,11 +329,13 @@
 
     iniset $KEYSTONE_CONF fernet_tokens key_repository "$KEYSTONE_CONF_DIR/fernet-keys/"
 
+    iniset $KEYSTONE_CONF credential key_repository "$KEYSTONE_CONF_DIR/credential-keys/"
+
     # Configure the project created by the 'keystone-manage bootstrap' as the cloud-admin project.
     # The users from this project are globally admin as before, but it also
     # allows policy changes in order to clarify the adminess scope.
-    iniset $KEYSTONE_CONF resource admin_project_domain_name Default
-    iniset $KEYSTONE_CONF resource admin_project_name admin
+    #iniset $KEYSTONE_CONF resource admin_project_domain_name Default
+    #iniset $KEYSTONE_CONF resource admin_project_name admin
 }
 
 # create_keystone_accounts() - Sets up common required keystone accounts
@@ -514,6 +500,9 @@
         rm -rf "$KEYSTONE_CONF_DIR/fernet-keys/"
         $KEYSTONE_BIN_DIR/keystone-manage --config-file $KEYSTONE_CONF fernet_setup
     fi
+    rm -rf "$KEYSTONE_CONF_DIR/credential-keys/"
+    $KEYSTONE_BIN_DIR/keystone-manage --config-file $KEYSTONE_CONF credential_setup
+
 }
 
 # install_keystoneauth() - Collect source and prepare
@@ -609,8 +598,8 @@
 
     # Start proxies if enabled
     if is_service_enabled tls-proxy; then
-        start_tls_proxy '*' $KEYSTONE_SERVICE_PORT $KEYSTONE_SERVICE_HOST $KEYSTONE_SERVICE_PORT_INT &
-        start_tls_proxy '*' $KEYSTONE_AUTH_PORT $KEYSTONE_AUTH_HOST $KEYSTONE_AUTH_PORT_INT &
+        start_tls_proxy keystone-service '*' $KEYSTONE_SERVICE_PORT $KEYSTONE_SERVICE_HOST $KEYSTONE_SERVICE_PORT_INT
+        start_tls_proxy keystone-auth '*' $KEYSTONE_AUTH_PORT $KEYSTONE_AUTH_HOST $KEYSTONE_AUTH_PORT_INT
     fi
 
     # (re)start memcached to make sure we have a clean memcache.
diff --git a/lib/lvm b/lib/lvm
index b9d7c39..99c7ba9 100644
--- a/lib/lvm
+++ b/lib/lvm
@@ -23,11 +23,7 @@
 # Defaults
 # --------
 # Name of the lvm volume groups to use/create for iscsi volumes
-# This monkey-motion is for compatibility with icehouse-generation Grenade
-# If ``VOLUME_GROUP`` is set, use it, otherwise we'll build a VG name based
-# on ``VOLUME_GROUP_NAME`` that includes the backend name
-# Grenade doesn't use ``VOLUME_GROUP2`` so it is left out
-VOLUME_GROUP_NAME=${VOLUME_GROUP:-${VOLUME_GROUP_NAME:-stack-volumes}}
+VOLUME_GROUP_NAME=${VOLUME_GROUP_NAME:-stack-volumes}
 DEFAULT_VOLUME_GROUP_NAME=$VOLUME_GROUP_NAME-default
 
 # Backing file name is of the form $VOLUME_GROUP$BACKING_FILE_SUFFIX
@@ -58,7 +54,9 @@
     if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then
         local vg_dev
         vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/'$BACKING_FILE_SUFFIX'/ { print $1}')
-        sudo losetup -d $vg_dev
+        if [[ -n "$vg_dev" ]]; then
+            sudo losetup -d $vg_dev
+        fi
         rm -f $backing_file
     fi
 }
diff --git a/lib/neutron b/lib/neutron
index c1552e3..d30e185 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -47,10 +47,10 @@
 NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron}
 
 # By default, use the ML2 plugin
-NEUTRON_PLUGIN=${NEUTRON_PLUGIN:-ml2}
-NEUTRON_PLUGIN_CONF_FILENAME=${NEUTRON_PLUGIN_CONF_FILENAME:-ml2_conf.ini}
-NEUTRON_PLUGIN_CONF_PATH=$NEUTRON_CONF_DIR/plugins/$NEUTRON_PLUGIN
-NEUTRON_PLUGIN_CONF=$NEUTRON_PLUGIN_CONF_PATH/$NEUTRON_PLUGIN_CONF_FILENAME
+NEUTRON_CORE_PLUGIN=${NEUTRON_CORE_PLUGIN:-ml2}
+NEUTRON_CORE_PLUGIN_CONF_FILENAME=${NEUTRON_CORE_PLUGIN_CONF_FILENAME:-ml2_conf.ini}
+NEUTRON_CORE_PLUGIN_CONF_PATH=$NEUTRON_CONF_DIR/plugins/$NEUTRON_CORE_PLUGIN
+NEUTRON_CORE_PLUGIN_CONF=$NEUTRON_CORE_PLUGIN_CONF_PATH/$NEUTRON_CORE_PLUGIN_CONF_FILENAME
 
 NEUTRON_AGENT_BINARY=${NEUTRON_AGENT_BINARY:-neutron-$NEUTRON_AGENT-agent}
 NEUTRON_L3_BINARY=${NEUTRON_L3_BINARY:-neutron-l3-agent}
@@ -117,16 +117,16 @@
 
     configure_neutron_rootwrap
 
-    mkdir -p $NEUTRON_PLUGIN_CONF_PATH
+    mkdir -p $NEUTRON_CORE_PLUGIN_CONF_PATH
 
-    cp $NEUTRON_DIR/etc/neutron/plugins/$NEUTRON_PLUGIN/$NEUTRON_PLUGIN_CONF_FILENAME.sample $NEUTRON_PLUGIN_CONF
+    cp $NEUTRON_DIR/etc/neutron/plugins/$NEUTRON_CORE_PLUGIN/$NEUTRON_CORE_PLUGIN_CONF_FILENAME.sample $NEUTRON_CORE_PLUGIN_CONF
 
     iniset $NEUTRON_CONF database connection `database_connection_url neutron`
     iniset $NEUTRON_CONF DEFAULT state_path $NEUTRON_STATE_PATH
     iniset $NEUTRON_CONF oslo_concurrency lock_path $NEUTRON_STATE_PATH/lock
     iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG
 
-    iniset $NEUTRON_CONF DEFAULT debug True
+    iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
 
     iniset_rpc_backend neutron $NEUTRON_CONF
 
@@ -139,7 +139,7 @@
 
         cp $NEUTRON_DIR/etc/api-paste.ini $NEUTRON_CONF_DIR/api-paste.ini
 
-        iniset $NEUTRON_CONF DEFAULT core_plugin ml2
+        iniset $NEUTRON_CONF DEFAULT core_plugin $NEUTRON_CORE_PLUGIN
 
         iniset $NEUTRON_CONF DEFAULT policy_file $policy_file
         iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips True
@@ -147,10 +147,6 @@
         iniset $NEUTRON_CONF DEFAULT auth_strategy $NEUTRON_AUTH_STRATEGY
         configure_auth_token_middleware $NEUTRON_CONF neutron $NEUTRON_AUTH_CACHE_DIR keystone_authtoken
 
-        # Configuration for neutron notifations to nova.
-        iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES
-        iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES
-
         iniset $NEUTRON_CONF nova auth_type password
         iniset $NEUTRON_CONF nova auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v3"
         iniset $NEUTRON_CONF nova username nova
@@ -162,33 +158,37 @@
 
         # Configure VXLAN
         # TODO(sc68cal) not hardcode?
-        iniset $NEUTRON_PLUGIN_CONF ml2 tenant_network_types vxlan
-        iniset $NEUTRON_PLUGIN_CONF ml2 type_drivers vxlan
-        iniset $NEUTRON_PLUGIN_CONF ml2 mechanism_drivers openvswitch,linuxbridge
-        iniset $NEUTRON_PLUGIN_CONF ml2_type_vxlan vni_ranges 1001:2000
-        iniset $NEUTRON_PLUGIN_CONF ml2 extension_drivers port_security
+        iniset $NEUTRON_CORE_PLUGIN_CONF ml2 tenant_network_types vxlan
+        iniset $NEUTRON_CORE_PLUGIN_CONF ml2 type_drivers vxlan
+        iniset $NEUTRON_CORE_PLUGIN_CONF ml2 mechanism_drivers openvswitch,linuxbridge
+        iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vxlan vni_ranges 1001:2000
+        if [[ "$NEUTRON_PORT_SECURITY" = "True" ]]; then
+            iniset $NEUTRON_CORE_PLUGIN_CONF ml2 extension_drivers port_security
+        fi
     fi
 
     # Neutron OVS or LB agent
     if is_service_enabled neutron-agent; then
-        iniset $NEUTRON_PLUGIN_CONF agent tunnel_types vxlan
-        iniset $NEUTRON_PLUGIN_CONF DEFAULT debug True
+        iniset $NEUTRON_CORE_PLUGIN_CONF agent tunnel_types vxlan
+        iniset $NEUTRON_CORE_PLUGIN_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
 
         # Configure the neutron agent
         if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then
-            iniset $NEUTRON_PLUGIN_CONF securitygroup iptables
-            iniset $NEUTRON_PLUGIN_CONF vxlan local_ip $HOST_IP
+            iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup iptables
+            iniset $NEUTRON_CORE_PLUGIN_CONF vxlan local_ip $HOST_IP
         else
-            iniset $NEUTRON_PLUGIN_CONF securitygroup iptables_hybrid
-            iniset $NEUTRON_PLUGIN_CONF ovs local_ip $HOST_IP
+            iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup iptables_hybrid
+            iniset $NEUTRON_CORE_PLUGIN_CONF ovs local_ip $HOST_IP
         fi
+
+        enable_kernel_bridge_firewall
     fi
 
     # DHCP Agent
     if is_service_enabled neutron-dhcp; then
         cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $NEUTRON_DHCP_CONF
 
-        iniset $NEUTRON_DHCP_CONF DEFAULT debug True
+        iniset $NEUTRON_DHCP_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
         # make it so we have working DNS from guests
         iniset $NEUTRON_DHCP_CONF DEFAULT dnsmasq_local_resolv True
 
@@ -200,9 +200,9 @@
     if is_service_enabled neutron-l3; then
         cp $NEUTRON_DIR/etc/l3_agent.ini.sample $NEUTRON_L3_CONF
         iniset $NEUTRON_L3_CONF DEFAULT interface_driver $NEUTRON_AGENT
-        iniset $NEUTRON_CONF DEFAULT service_plugins router
+        neutron_service_plugin_class_add router
         iniset $NEUTRON_L3_CONF agent root_helper_daemon "$NEUTRON_ROOTWRAP_DAEMON_CMD"
-        iniset $NEUTRON_L3_CONF DEFAULT debug True
+        iniset $NEUTRON_L3_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
         neutron_plugin_configure_l3_agent $NEUTRON_L3_CONF
     fi
 
@@ -210,7 +210,7 @@
     if is_service_enabled neutron-metadata-agent; then
         cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $NEUTRON_META_CONF
 
-        iniset $NEUTRON_META_CONF DEFAULT debug True
+        iniset $NEUTRON_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
         iniset $NEUTRON_META_CONF DEFAULT nova_metadata_ip $SERVICE_HOST
         iniset $NEUTRON_META_CONF agent root_helper_daemon "$NEUTRON_ROOTWRAP_DAEMON_CMD"
 
@@ -249,14 +249,8 @@
         source $TOP_DIR/lib/neutron_plugins/services/metering
         neutron_agent_metering_configure_common
         neutron_agent_metering_configure_agent
-        # TODO(sc68cal) hack because we don't pass around
-        # $Q_SERVICE_PLUGIN_CLASSES like -legacy does
-        local plugins=""
-        plugins=$(iniget $NEUTRON_CONF DEFAULT service_plugins)
-        plugins+=",metering"
-        iniset $NEUTRON_CONF DEFAULT service_plugins $plugins
+        neutron_service_plugin_class_add metering
     fi
-
 }
 
 # configure_neutron_rootwrap() - configure Neutron's rootwrap
@@ -305,6 +299,9 @@
 
     iniset $NOVA_CONF DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
 
+    # optionally set options in nova_conf
+    neutron_plugin_create_nova_conf
+
     if is_service_enabled neutron-metadata-agent; then
         iniset $NOVA_CONF neutron service_metadata_proxy "True"
     fi
@@ -394,7 +391,7 @@
 
     # Start the Neutron service
     # TODO(sc68cal) Stop hard coding this
-    run_process neutron-api "$NEUTRON_BIN_DIR/neutron-server --config-file $NEUTRON_CONF --config-file $NEUTRON_PLUGIN_CONF"
+    run_process neutron-api "$NEUTRON_BIN_DIR/neutron-server --config-file $NEUTRON_CONF --config-file $NEUTRON_CORE_PLUGIN_CONF"
 
     if is_ssl_enabled_service "neutron"; then
         ssl_ca="--ca-certificate=${SSL_BUNDLE_FILE}"
@@ -409,7 +406,7 @@
 
     # Start proxy if enabled
     if is_service_enabled tls-proxy; then
-        start_tls_proxy '*' $NEUTRON_SERVICE_PORT $NEUTRON_SERVICE_HOST $NEUTRON_SERVICE_PORT_INT &
+        start_tls_proxy neutron '*' $NEUTRON_SERVICE_PORT $NEUTRON_SERVICE_HOST $NEUTRON_SERVICE_PORT_INT
     fi
 }
 
@@ -430,15 +427,17 @@
     if is_service_enabled neutron-l3; then
         run_process neutron-l3 "$NEUTRON_BIN_DIR/$NEUTRON_L3_BINARY $NEUTRON_CONFIG_ARG"
     fi
-    # XXX(sc68cal) - Here's where plugins can wire up their own networks instead
-    # of the code in lib/neutron_plugins/services/l3
-    if type -p neutron_plugin_create_initial_networks > /dev/null; then
-        neutron_plugin_create_initial_networks
-    else
-        # XXX(sc68cal) Load up the built in Neutron networking code and build a topology
-        source $TOP_DIR/lib/neutron_plugins/services/l3
-        # Create the networks using servic
-        create_neutron_initial_network
+    if is_service_enabled neutron-api; then
+        # XXX(sc68cal) - Here's where plugins can wire up their own networks instead
+        # of the code in lib/neutron_plugins/services/l3
+        if type -p neutron_plugin_create_initial_networks > /dev/null; then
+            neutron_plugin_create_initial_networks
+        else
+            # XXX(sc68cal) Load up the built in Neutron networking code and build a topology
+            source $TOP_DIR/lib/neutron_plugins/services/l3
+            # Create the networks using servic
+            create_neutron_initial_network
+        fi
     fi
     if is_service_enabled neutron-metadata-agent; then
         run_process neutron-metadata-agent "$NEUTRON_BIN_DIR/$NEUTRON_META_BINARY $NEUTRON_CONFIG_ARG"
@@ -472,9 +471,9 @@
 
     NEUTRON_CONFIG_ARG+=" --config-file $NEUTRON_CONF"
 
-    #TODO(sc68cal) OVS and LB agent uses settings in NEUTRON_PLUGIN_CONF (ml2_conf.ini) but others may not
+    #TODO(sc68cal) OVS and LB agent uses settings in NEUTRON_CORE_PLUGIN_CONF (ml2_conf.ini) but others may not
     if is_service_enabled neutron-agent; then
-        NEUTRON_CONFIG_ARG+=" --config-file $NEUTRON_PLUGIN_CONF"
+        NEUTRON_CONFIG_ARG+=" --config-file $NEUTRON_CORE_PLUGIN_CONF"
     fi
 
     if is_service_enabled neutron-dhcp; then
@@ -491,6 +490,16 @@
 
 }
 
+# neutron_service_plugin_class_add() - add service plugin class
+function neutron_service_plugin_class_add_new {
+    local service_plugin_class=$1
+    local plugins=""
+
+    plugins=$(iniget $NEUTRON_CONF DEFAULT service_plugins)
+    plugins+=",${service_plugin_class}"
+    iniset $NEUTRON_CONF DEFAULT service_plugins $plugins
+}
+
 # Dispatch functions
 # These are needed for compatibility between the old and new implementations
 # where there are function name overlaps.  These will be removed when
@@ -550,6 +559,15 @@
     fi
 }
 
+function neutron_service_plugin_class_add {
+    if is_neutron_legacy_enabled; then
+        # Call back to old function
+        _neutron_service_plugin_class_add "$@"
+    else
+        neutron_service_plugin_class_add_new "$@"
+    fi
+}
+
 function start_neutron {
     if is_neutron_legacy_enabled; then
         # Call back to old function
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index 25fb6b7..613e0f1 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -24,11 +24,9 @@
 # - check_neutron_third_party_integration
 # - start_neutron_agents
 # - create_neutron_initial_network
-# - setup_neutron_debug
 #
 # ``unstack.sh`` calls the entry points in this order:
 #
-# - teardown_neutron_debug
 # - stop_neutron
 # - stop_neutron_third_party
 # - cleanup_neutron
@@ -74,7 +72,6 @@
 
 NEUTRON_DIR=$DEST/neutron
 NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas
-NEUTRON_LBAAS_DIR=$DEST/neutron-lbaas
 NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron}
 
 # Support entry points installation of console scripts
@@ -88,9 +85,6 @@
 NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf
 export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"}
 
-# Default provider for load balancer service
-DEFAULT_LB_PROVIDER=LOADBALANCER:Haproxy:neutron_lbaas.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
-
 # Agent binaries.  Note, binary paths for other agents are set in per-service
 # scripts in lib/neutron_plugins/services/
 AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent"
@@ -129,8 +123,6 @@
 Q_META_DATA_IP=${Q_META_DATA_IP:-$SERVICE_HOST}
 # Allow Overlapping IP among subnets
 Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True}
-# The name of the default q-l3 router
-Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1}
 Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True}
 Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True}
 VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True}
@@ -203,7 +195,7 @@
 # agent, as described below.
 #
 # Example: ``PHYSICAL_NETWORK=default``
-PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-}
+PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-public}
 
 # With the openvswitch agent, if using VLANs for tenant networks,
 # or if using flat or VLAN provider networks, set in ``localrc`` to
@@ -213,15 +205,17 @@
 # port for external connectivity.
 #
 # Example: ``OVS_PHYSICAL_BRIDGE=br-eth1``
-OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-}
+OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-br-ex}
 
+default_route_dev=$(ip route | grep ^default | awk '{print $5}')
+die_if_not_set $LINENO default_route_dev "Failure retrieving default route device"
 # With the linuxbridge agent, if using VLANs for tenant networks,
 # or if using flat or VLAN provider networks, set in ``localrc`` to
 # the name of the network interface to use for the physical
 # network.
 #
 # Example: ``LB_PHYSICAL_INTERFACE=eth1``
-LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-}
+LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-$default_route_dev}
 
 # When Neutron tunnels are enabled it is needed to specify the
 # IP address of the end point in the local server. This IP is set
@@ -252,12 +246,6 @@
     source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN
 fi
 
-# Agent loadbalancer service plugin functions
-# -------------------------------------------
-
-# Hardcoding for 1 service plugin for now
-source $TOP_DIR/lib/neutron_plugins/services/loadbalancer
-
 # Agent metering service plugin functions
 # -------------------------------------------
 
@@ -316,10 +304,6 @@
     iniset_rpc_backend neutron $NEUTRON_CONF
 
     # goes before q-svc to init Q_SERVICE_PLUGIN_CLASSES
-    if is_service_enabled q-lbaas; then
-        deprecated "Configuring q-lbaas through devstack is deprecated"
-        _configure_neutron_lbaas
-    fi
     if is_service_enabled q-metering; then
         _configure_neutron_metering
     fi
@@ -418,10 +402,6 @@
 
     git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH
     setup_develop $NEUTRON_DIR
-    if is_service_enabled q-lbaas; then
-        git_clone $NEUTRON_LBAAS_REPO $NEUTRON_LBAAS_DIR $NEUTRON_LBAAS_BRANCH
-        setup_develop $NEUTRON_LBAAS_DIR
-    fi
 
     if [ "$VIRT_DRIVER" == 'xenserver' ]; then
         local dom0_ip
@@ -452,10 +432,6 @@
     if is_service_enabled q-agt q-dhcp q-l3; then
         neutron_plugin_install_agent_packages
     fi
-
-    if is_service_enabled q-lbaas; then
-        neutron_agent_lbaas_install_agent_packages
-    fi
 }
 
 # Start running processes, including screen
@@ -482,7 +458,7 @@
 
     # Start proxy if enabled
     if is_service_enabled tls-proxy; then
-        start_tls_proxy '*' $Q_PORT $Q_HOST $Q_PORT_INT &
+        start_tls_proxy neutron '*' $Q_PORT $Q_HOST $Q_PORT_INT
     fi
 }
 
@@ -516,7 +492,6 @@
     fi
 
     run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file $Q_META_CONF_FILE"
-    run_process q-lbaas "$AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file $LBAAS_AGENT_CONF_FILENAME"
     run_process q-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME"
 
     if [ "$VIRT_DRIVER" = 'xenserver' ]; then
@@ -556,9 +531,6 @@
         stop_process q-meta
     fi
 
-    if is_service_enabled q-lbaas; then
-        neutron_lbaas_stop
-    fi
     if is_service_enabled q-metering; then
         neutron_metering_stop
     fi
@@ -662,7 +634,7 @@
     fi
 
     # delete all namespaces created by neutron
-    for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|qlbaas|fip|snat)-[0-9a-f-]*'); do
+    for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|fip|snat)-[0-9a-f-]*'); do
         sudo ip netns delete ${ns}
     done
 }
@@ -818,18 +790,6 @@
     iniset $NEUTRON_CONF oslo_messaging_notifications driver messaging
 }
 
-function _configure_neutron_lbaas {
-    # Uses oslo config generator to generate LBaaS sample configuration files
-    (cd $NEUTRON_LBAAS_DIR && exec ./tools/generate_config_file_samples.sh)
-
-    if [ -f $NEUTRON_LBAAS_DIR/etc/neutron_lbaas.conf.sample ]; then
-        cp $NEUTRON_LBAAS_DIR/etc/neutron_lbaas.conf.sample $NEUTRON_CONF_DIR/neutron_lbaas.conf
-        iniset $NEUTRON_CONF_DIR/neutron_lbaas.conf service_providers service_provider $DEFAULT_LB_PROVIDER
-    fi
-    neutron_agent_lbaas_configure_common
-    neutron_agent_lbaas_configure_agent
-}
-
 function _configure_neutron_metering {
     neutron_agent_metering_configure_common
     neutron_agent_metering_configure_agent
diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent
index 7d59e13..d0de2f5 100644
--- a/lib/neutron_plugins/linuxbridge_agent
+++ b/lib/neutron_plugins/linuxbridge_agent
@@ -69,6 +69,7 @@
     fi
     if [[ "$Q_USE_SECGROUP" == "True" ]]; then
         iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
+        enable_kernel_bridge_firewall
     else
         iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver
     fi
diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2
index 7e80209..e429714 100644
--- a/lib/neutron_plugins/ml2
+++ b/lib/neutron_plugins/ml2
@@ -35,7 +35,11 @@
 Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS:-vni_ranges=$TENANT_TUNNEL_RANGES}
 # List of extension drivers to load, use '-' instead of ':-' to allow people to
 # explicitly override this to blank
-Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS-port_security}
+if [[ "$NEUTRON_PORT_SECURITY" = "True" ]]; then
+    Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS-port_security}
+else
+    Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS:-}
+fi
 
 # L3 Plugin to load for ML2
 # For some flat network environment, they not want to extend L3 plugin.
diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base
index f6d10ea..baf7d7f 100644
--- a/lib/neutron_plugins/ovs_base
+++ b/lib/neutron_plugins/ovs_base
@@ -83,9 +83,10 @@
 
 function _neutron_ovs_base_configure_firewall_driver {
     if [[ "$Q_USE_SECGROUP" == "True" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
+        iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver iptables_hybrid
+        enable_kernel_bridge_firewall
     else
-        iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver
+        iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver noop
     fi
 }
 
diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3
index a4e7248..569a366 100644
--- a/lib/neutron_plugins/services/l3
+++ b/lib/neutron_plugins/services/l3
@@ -17,6 +17,14 @@
 PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex}
 PUBLIC_BRIDGE_MTU=${PUBLIC_BRIDGE_MTU:-1500}
 
+# If Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE=True, assign the gateway IP of the public
+# subnet to the public bridge interface even if Q_USE_PROVIDERNET_FOR_PUBLIC is
+# used.
+Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE=${Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE:-True}
+
+# The name of the default router
+Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1}
+
 # If Q_USE_PUBLIC_VETH=True, create and use a veth pair instead of
 # PUBLIC_BRIDGE.  This is intended to be used with
 # Q_USE_PROVIDERNET_FOR_PUBLIC=True.
@@ -51,7 +59,7 @@
 #    Q_USE_PROVIDERNET_FOR_PUBLIC=True
 #    PUBLIC_PHYSICAL_NETWORK=public
 #    OVS_BRIDGE_MAPPINGS=public:br-ex
-Q_USE_PROVIDERNET_FOR_PUBLIC=${Q_USE_PROVIDERNET_FOR_PUBLIC:-False}
+Q_USE_PROVIDERNET_FOR_PUBLIC=${Q_USE_PROVIDERNET_FOR_PUBLIC:-True}
 PUBLIC_PHYSICAL_NETWORK=${PUBLIC_PHYSICAL_NETWORK:-public}
 
 # Generate 40-bit IPv6 Global ID to comply with RFC 4193
@@ -62,27 +70,36 @@
 IPV6_ADDRESS_MODE=${IPV6_ADDRESS_MODE:-slaac}
 IPV6_PUBLIC_SUBNET_NAME=${IPV6_PUBLIC_SUBNET_NAME:-ipv6-public-subnet}
 IPV6_PRIVATE_SUBNET_NAME=${IPV6_PRIVATE_SUBNET_NAME:-ipv6-private-subnet}
-FIXED_RANGE_V6=${FIXED_RANGE_V6:-fd$IPV6_GLOBAL_ID::/64}
-IPV6_PRIVATE_NETWORK_GATEWAY=${IPV6_PRIVATE_NETWORK_GATEWAY:-fd$IPV6_GLOBAL_ID::1}
+IPV6_ADDRS_SAFE_TO_USE=${IPV6_ADDRS_SAFE_TO_USE:-fd$IPV6_GLOBAL_ID::/56}
+# if we got larger than a /64 safe to use, we only use the first /64 to
+# avoid side effects outlined in rfc7421
+FIXED_RANGE_V6=${FIXED_RANGE_V6:-$(echo $IPV6_ADDRS_SAFE_TO_USE | awk -F '/' '{ print $1"/"($2>63 ? $2 : 64) }')}
+IPV6_PRIVATE_NETWORK_GATEWAY=${IPV6_PRIVATE_NETWORK_GATEWAY:-}
 IPV6_PUBLIC_RANGE=${IPV6_PUBLIC_RANGE:-2001:db8::/64}
 IPV6_PUBLIC_NETWORK_GATEWAY=${IPV6_PUBLIC_NETWORK_GATEWAY:-2001:db8::2}
 IPV6_ROUTER_GW_IP=${IPV6_ROUTER_GW_IP:-2001:db8::1}
 
 # Gateway and subnet defaults, in case they are not customized in localrc
-NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1}
-PUBLIC_NETWORK_GATEWAY=${PUBLIC_NETWORK_GATEWAY:-172.24.4.1}
+NETWORK_GATEWAY=${NETWORK_GATEWAY:-}
+PUBLIC_NETWORK_GATEWAY=${PUBLIC_NETWORK_GATEWAY:-}
 PRIVATE_SUBNET_NAME=${PRIVATE_SUBNET_NAME:-"private-subnet"}
 PUBLIC_SUBNET_NAME=${PUBLIC_SUBNET_NAME:-"public-subnet"}
 
 # Subnetpool defaults
+USE_SUBNETPOOL=${USE_SUBNETPOOL:-True}
 SUBNETPOOL_NAME=${SUBNETPOOL_NAME:-"shared-default-subnetpool"}
 
-SUBNETPOOL_PREFIX_V4=${SUBNETPOOL_PREFIX_V4:-10.0.0.0/8}
-SUBNETPOOL_PREFIX_V6=${SUBNETPOOL_PREFIX_V6:-2001:db8:8000::/48}
+SUBNETPOOL_PREFIX_V4=${SUBNETPOOL_PREFIX_V4:-$IPV4_ADDRS_SAFE_TO_USE}
+SUBNETPOOL_PREFIX_V6=${SUBNETPOOL_PREFIX_V6:-$IPV6_ADDRS_SAFE_TO_USE}
 
-SUBNETPOOL_SIZE_V4=${SUBNETPOOL_SIZE_V4:-24}
+SUBNETPOOL_SIZE_V4=${SUBNETPOOL_SIZE_V4:-26}
 SUBNETPOOL_SIZE_V6=${SUBNETPOOL_SIZE_V6:-64}
 
+default_v4_route_devs=$(ip -4 route | grep ^default | awk '{print $5}')
+die_if_not_set $LINENO default_v4_route_devs "Failure retrieving default IPv4 route devices"
+
+default_v6_route_devs=$(ip -6 route | grep ^default | awk '{print $5}')
+
 function _determine_config_l3 {
     local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE"
     echo "$opts"
@@ -113,9 +130,9 @@
             _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6"
         fi
     else
-        local default_dev=""
-        default_dev=$(ip route | grep ^default | awk '{print $5}')
-        sudo iptables -t nat -A POSTROUTING -o $default_dev -s $FLOATING_RANGE -j MASQUERADE
+        for d in $default_v4_route_devs; do
+            sudo iptables -t nat -A POSTROUTING -o $d -s $FLOATING_RANGE -j MASQUERADE
+        done
     fi
 }
 
@@ -149,21 +166,38 @@
         neutron_plugin_create_initial_network_profile $PHYSICAL_NETWORK
     fi
 
+    if is_networking_extension_supported "auto-allocated-topology"; then
+        if [[ "$USE_SUBNETPOOL" == "True" ]]; then
+            if [[ "$IP_VERSION" =~ 4.* ]]; then
+                SUBNETPOOL_V4_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME --default-prefix-length $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --share --default | grep ' id ' | get_field 2)
+            fi
+            if [[ "$IP_VERSION" =~ .*6 ]]; then
+                SUBNETPOOL_V6_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME --default-prefix-length $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --share --default | grep ' id ' | get_field 2)
+            fi
+        fi
+    fi
+
     if is_provider_network; then
         die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK"
         die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specify the PROVIDER_NETWORK_TYPE"
-        NET_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" net-create $PHYSICAL_NETWORK --tenant_id $project_id --provider:network_type $PROVIDER_NETWORK_TYPE --provider:physical_network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider:segmentation_id $SEGMENTATION_ID} --shared | grep ' id ' | get_field 2)
+        NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --project $project_id --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share | grep ' id ' | get_field 2)
         die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $project_id"
 
         if [[ "$IP_VERSION" =~ 4.* ]]; then
-            SUBNET_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create --tenant_id $project_id --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --name $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2)
+            if [ -z $SUBNETPOOL_V4_ID ]; then
+                fixed_range_v4=$FIXED_RANGE
+            fi
+            SUBNET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID --subnet-range $fixed_range_v4 | grep ' id ' | get_field 2)
             die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME $project_id"
         fi
 
         if [[ "$IP_VERSION" =~ .*6 ]]; then
             die_if_not_set $LINENO IPV6_PROVIDER_FIXED_RANGE "IPV6_PROVIDER_FIXED_RANGE has not been set, but Q_USE_PROVIDERNET_FOR_PUBLIC is true and IP_VERSION includes 6"
             die_if_not_set $LINENO IPV6_PROVIDER_NETWORK_GATEWAY "IPV6_PROVIDER_NETWORK_GATEWAY has not been set, but Q_USE_PROVIDERNET_FOR_PUBLIC is true and IP_VERSION includes 6"
-            SUBNET_V6_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create --tenant_id $project_id --ip_version 6 --ipv6-address-mode $IPV6_ADDRESS_MODE --gateway $IPV6_PROVIDER_NETWORK_GATEWAY --name $IPV6_PROVIDER_SUBNET_NAME $NET_ID $IPV6_PROVIDER_FIXED_RANGE | grep 'id' | get_field 2)
+            if [ -z $SUBNETPOOL_V6_ID ]; then
+                fixed_range_v6=$IPV6_PROVIDER_FIXED_RANGE
+            fi
+            SUBNET_V6_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 6 --ipv6-address-mode $IPV6_ADDRESS_MODE --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID $fixed_range_v6 | grep 'id' | get_field 2)
             die_if_not_set $LINENO SUBNET_V6_ID "Failure creating SUBNET_V6_ID for $IPV6_PROVIDER_SUBNET_NAME $project_id"
         fi
 
@@ -173,7 +207,7 @@
             sudo ip link set $PUBLIC_INTERFACE up
         fi
     else
-        NET_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" net-create --tenant-id $project_id "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2)
+        NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create --project $project_id "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2)
         die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME $project_id"
 
         if [[ "$IP_VERSION" =~ 4.* ]]; then
@@ -191,29 +225,23 @@
         # Create a router, and add the private subnet as one of its interfaces
         if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then
             # create a tenant-owned router.
-            ROUTER_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" router-create --tenant-id $project_id $Q_ROUTER_NAME | grep ' id ' | get_field 2)
+            ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create --project $project_id $Q_ROUTER_NAME | grep ' id ' | get_field 2)
             die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $project_id $Q_ROUTER_NAME"
         else
             # Plugin only supports creating a single router, which should be admin owned.
-            ROUTER_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" router-create $Q_ROUTER_NAME | grep ' id ' | get_field 2)
+            ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create $Q_ROUTER_NAME | grep ' id ' | get_field 2)
             die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $Q_ROUTER_NAME"
         fi
 
-        EXTERNAL_NETWORK_FLAGS="--router:external"
-        if is_networking_extension_supported "auto-allocated-topology" && is_networking_extension_supported "subnet_allocation"; then
-            EXTERNAL_NETWORK_FLAGS="$EXTERNAL_NETWORK_FLAGS --is-default"
-            if [[ "$IP_VERSION" =~ 4.* ]]; then
-                SUBNETPOOL_V4_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnetpool-create $SUBNETPOOL_NAME --default-prefixlen $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --shared --is-default=True | grep ' id ' | get_field 2)
-            fi
-            if [[ "$IP_VERSION" =~ .*6 ]]; then
-                SUBNETPOOL_V6_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnetpool-create $SUBNETPOOL_NAME --default-prefixlen $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --shared --is-default=True | grep ' id ' | get_field 2)
-            fi
+        EXTERNAL_NETWORK_FLAGS="--external"
+        if is_networking_extension_supported "auto-allocated-topology"; then
+            EXTERNAL_NETWORK_FLAGS="$EXTERNAL_NETWORK_FLAGS --default"
         fi
         # Create an external network, and a subnet. Configure the external network as router gw
         if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then
-            EXT_NET_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" net-create "$PUBLIC_NETWORK_NAME" -- $EXTERNAL_NETWORK_FLAGS --provider:network_type=flat --provider:physical_network=${PUBLIC_PHYSICAL_NETWORK} | grep ' id ' | get_field 2)
+            EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type flat --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} | grep ' id ' | get_field 2)
         else
-            EXT_NET_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" net-create "$PUBLIC_NETWORK_NAME" -- $EXTERNAL_NETWORK_FLAGS | grep ' id ' | get_field 2)
+            EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS | grep ' id ' | get_field 2)
         fi
         die_if_not_set $LINENO EXT_NET_ID "Failure creating EXT_NET_ID for $PUBLIC_NETWORK_NAME"
 
@@ -232,13 +260,19 @@
 # Create private IPv4 subnet
 function _neutron_create_private_subnet_v4 {
     local project_id=$1
-    local subnet_params="--tenant-id $project_id "
-    subnet_params+="--ip_version 4 "
-    subnet_params+="--gateway $NETWORK_GATEWAY "
-    subnet_params+="--name $PRIVATE_SUBNET_NAME "
-    subnet_params+="$NET_ID $FIXED_RANGE"
+    if [ -z $SUBNETPOOL_V4_ID ]; then
+        fixed_range_v4=$FIXED_RANGE
+    fi
+    local subnet_params="--project $project_id "
+    subnet_params+="--ip-version 4 "
+    if [[ -n "$NETWORK_GATEWAY" ]]; then
+        subnet_params+="--gateway $NETWORK_GATEWAY "
+    fi
+    subnet_params+="${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} "
+    subnet_params+="${fixed_range_v4:+--subnet-range $fixed_range_v4} "
+    subnet_params+="--network $NET_ID $PRIVATE_SUBNET_NAME"
     local subnet_id
-    subnet_id=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create $subnet_params | grep ' id ' | get_field 2)
+    subnet_id=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2)
     die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet for $project_id"
     echo $subnet_id
 }
@@ -249,47 +283,53 @@
     die_if_not_set $LINENO IPV6_RA_MODE "IPV6 RA Mode not set"
     die_if_not_set $LINENO IPV6_ADDRESS_MODE "IPV6 Address Mode not set"
     local ipv6_modes="--ipv6-ra-mode $IPV6_RA_MODE --ipv6-address-mode $IPV6_ADDRESS_MODE"
-    local subnet_params="--tenant-id $project_id "
-    subnet_params+="--ip_version 6 "
-    subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY "
-    subnet_params+="--name $IPV6_PRIVATE_SUBNET_NAME "
-    subnet_params+="$NET_ID $FIXED_RANGE_V6 $ipv6_modes"
+    if [ -z $SUBNETPOOL_V6_ID ]; then
+        fixed_range_v6=$FIXED_RANGE_V6
+    fi
+    local subnet_params="--project $project_id "
+    subnet_params+="--ip-version 6 "
+    if [[ -n "$IPV6_PRIVATE_NETWORK_GATEWAY" ]]; then
+        subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY "
+    fi
+    subnet_params+="${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} "
+    subnet_params+="${fixed_range_v6:+--subnet-range $fixed_range_v6 $ipv6_modes} "
+    subnet_params+="--network $NET_ID $IPV6_PRIVATE_SUBNET_NAME "
     local ipv6_subnet_id
-    ipv6_subnet_id=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create $subnet_params | grep ' id ' | get_field 2)
+    ipv6_subnet_id=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2)
     die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet for $project_id"
     echo $ipv6_subnet_id
 }
 
 # Create public IPv4 subnet
 function _neutron_create_public_subnet_v4 {
-    local subnet_params+="--ip_version 4 "
+    local subnet_params="--ip-version 4 "
     subnet_params+="${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} "
-    subnet_params+="--gateway $PUBLIC_NETWORK_GATEWAY "
-    subnet_params+="--name $PUBLIC_SUBNET_NAME "
-    subnet_params+="$EXT_NET_ID $FLOATING_RANGE "
-    subnet_params+="-- --enable_dhcp=False"
+    if [[ -n "$PUBLIC_NETWORK_GATEWAY" ]]; then
+        subnet_params+="--gateway $PUBLIC_NETWORK_GATEWAY "
+    fi
+    subnet_params+="--network $EXT_NET_ID --subnet-range $FLOATING_RANGE --no-dhcp "
+    subnet_params+="$PUBLIC_SUBNET_NAME"
     local id_and_ext_gw_ip
-    id_and_ext_gw_ip=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ')
+    id_and_ext_gw_ip=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ')
     die_if_not_set $LINENO id_and_ext_gw_ip "Failure creating public IPv4 subnet"
     echo $id_and_ext_gw_ip
 }
 
 # Create public IPv6 subnet
 function _neutron_create_public_subnet_v6 {
-    local subnet_params="--ip_version 6 "
+    local subnet_params="--ip-version 6 "
     subnet_params+="--gateway $IPV6_PUBLIC_NETWORK_GATEWAY "
-    subnet_params+="--name $IPV6_PUBLIC_SUBNET_NAME "
-    subnet_params+="$EXT_NET_ID $IPV6_PUBLIC_RANGE "
-    subnet_params+="-- --enable_dhcp=False"
+    subnet_params+="--network $EXT_NET_ID --subnet-range $IPV6_PUBLIC_RANGE --no-dhcp "
+    subnet_params+="$IPV6_PUBLIC_SUBNET_NAME"
     local ipv6_id_and_ext_gw_ip
-    ipv6_id_and_ext_gw_ip=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ')
+    ipv6_id_and_ext_gw_ip=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ')
     die_if_not_set $LINENO ipv6_id_and_ext_gw_ip "Failure creating an IPv6 public subnet"
     echo $ipv6_id_and_ext_gw_ip
 }
 
 # Configure neutron router for IPv4 public access
 function _neutron_configure_router_v4 {
-    neutron --os-cloud devstack-admin --os-region "$REGION_NAME" router-interface-add $ROUTER_ID $SUBNET_ID
+    openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router add subnet $ROUTER_ID $SUBNET_ID
     # Create a public subnet on the external network
     local id_and_ext_gw_ip
     id_and_ext_gw_ip=$(_neutron_create_public_subnet_v4 $EXT_NET_ID)
@@ -320,13 +360,12 @@
             local cidr_len=${FLOATING_RANGE#*/}
             local testcmd="ip -o link | grep -q $ext_gw_interface"
             test_with_retry "$testcmd" "$ext_gw_interface creation failed"
-            if [[ $(ip addr show dev $ext_gw_interface | grep -c $ext_gw_ip) == 0 && ( $Q_USE_PROVIDERNET_FOR_PUBLIC == "False" || $Q_USE_PUBLIC_VETH == "True" ) ]]; then
+            if [[ $(ip addr show dev $ext_gw_interface | grep -c $ext_gw_ip) == 0 && ( $Q_USE_PROVIDERNET_FOR_PUBLIC == "False" || $Q_USE_PUBLIC_VETH == "True" || $Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE == "True" ) ]]; then
                 sudo ip addr add $ext_gw_ip/$cidr_len dev $ext_gw_interface
                 sudo ip link set $ext_gw_interface up
             fi
-            ROUTER_GW_IP=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F'ip_address'  '{ print $2 }' | cut -f3 -d\" | tr '\n' ' ')
+            ROUTER_GW_IP=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' --device-owner network:router_gateway | awk -F'ip_address'  '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ')
             die_if_not_set $LINENO ROUTER_GW_IP "Failure retrieving ROUTER_GW_IP"
-            sudo ip route replace  $FIXED_RANGE via $ROUTER_GW_IP
         fi
         _neutron_set_router_id
     fi
@@ -334,7 +373,7 @@
 
 # Configure neutron router for IPv6 public access
 function _neutron_configure_router_v6 {
-    neutron --os-cloud devstack-admin --os-region "$REGION_NAME" router-interface-add $ROUTER_ID $IPV6_SUBNET_ID
+    openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router add subnet $ROUTER_ID $IPV6_SUBNET_ID
     # Create a public subnet on the external network
     local ipv6_id_and_ext_gw_ip
     ipv6_id_and_ext_gw_ip=$(_neutron_create_public_subnet_v6 $EXT_NET_ID)
@@ -351,11 +390,21 @@
 
     # This logic is specific to using the l3-agent for layer 3
     if is_service_enabled q-l3 || is_service_enabled neutron-l3; then
+        # Ensure IPv6 RAs are accepted on interfaces with a default route.
+        # This is needed for neutron-based devstack clouds to work in
+        # IPv6-only clouds in the gate. Please do not remove this without
+        # talking to folks in Infra.
+        for d in $default_v6_route_devs; do
+            # Slashes must be used in this sysctl command because route devices
+            # can have dots in their names. If dots were used, dots in the
+            # device name would be reinterpreted as a slash, causing an error.
+            sudo sysctl -w net/ipv6/conf/$d/accept_ra=2
+        done
         # Ensure IPv6 forwarding is enabled on the host
         sudo sysctl -w net.ipv6.conf.all.forwarding=1
         # Configure and enable public bridge
         # Override global IPV6_ROUTER_GW_IP with the true value from neutron
-        IPV6_ROUTER_GW_IP=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" port-list -c fixed_ips | grep $ipv6_pub_subnet_id | awk -F'ip_address' '{ print $2 }' | cut -f3 -d\" | tr '\n' ' ')
+        IPV6_ROUTER_GW_IP=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' | grep $ipv6_pub_subnet_id | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ')
         die_if_not_set $LINENO IPV6_ROUTER_GW_IP "Failure retrieving IPV6_ROUTER_GW_IP"
 
         if is_neutron_ovs_base_plugin; then
@@ -365,22 +414,19 @@
 
             # Configure interface for public bridge
             sudo ip -6 addr replace $ipv6_ext_gw_ip/$ipv6_cidr_len dev $ext_gw_interface
-            sudo ip -6 route replace $FIXED_RANGE_V6 via $IPV6_ROUTER_GW_IP dev $ext_gw_interface
+            local replace_range=${SUBNETPOOL_PREFIX_V6}
+            if [[ -z "${SUBNETPOOL_V6_ID}" ]]; then
+                replace_range=${FIXED_RANGE_V6}
+            fi
+            sudo ip -6 route replace $replace_range via $IPV6_ROUTER_GW_IP dev $ext_gw_interface
         fi
         _neutron_set_router_id
     fi
 }
 
-function is_provider_network {
-    if [ "$Q_USE_PROVIDER_NETWORKING" == "True" ]; then
-        return 0
-    fi
-    return 1
-}
-
 function is_networking_extension_supported {
     local extension=$1
     # TODO(sc68cal) cache this instead of calling every time
-    EXT_LIST=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" ext-list -c alias -f value)
+    EXT_LIST=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" extension list --network -c Alias -f value)
     [[ $EXT_LIST =~ $extension ]] && return 0
 }
diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer
deleted file mode 100644
index 30e9480..0000000
--- a/lib/neutron_plugins/services/loadbalancer
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/bash
-
-# Neutron loadbalancer plugin
-# ---------------------------
-
-# Save trace setting
-_XTRACE_NEUTRON_LB=$(set +o | grep xtrace)
-set +o xtrace
-
-
-AGENT_LBAAS_BINARY="$NEUTRON_BIN_DIR/neutron-lbaas-agent"
-LBAAS_PLUGIN=neutron_lbaas.services.loadbalancer.plugin.LoadBalancerPlugin
-
-function neutron_agent_lbaas_install_agent_packages {
-    if is_ubuntu || is_fedora || is_suse; then
-        install_package haproxy
-    fi
-}
-
-function neutron_agent_lbaas_configure_common {
-    _neutron_service_plugin_class_add $LBAAS_PLUGIN
-    _neutron_deploy_rootwrap_filters $NEUTRON_LBAAS_DIR
-}
-
-function neutron_agent_lbaas_configure_agent {
-    LBAAS_AGENT_CONF_PATH=/etc/neutron/services/loadbalancer/haproxy
-    mkdir -p $LBAAS_AGENT_CONF_PATH
-
-    LBAAS_AGENT_CONF_FILENAME="$LBAAS_AGENT_CONF_PATH/lbaas_agent.ini"
-
-    cp $NEUTRON_LBAAS_DIR/etc/lbaas_agent.ini.sample $LBAAS_AGENT_CONF_FILENAME
-
-    # ovs_use_veth needs to be set before the plugin configuration
-    # occurs to allow plugins to override the setting.
-    iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT ovs_use_veth $Q_OVS_USE_VETH
-
-    neutron_plugin_setup_interface_driver $LBAAS_AGENT_CONF_FILENAME
-
-    if is_fedora; then
-        iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT user_group "nobody"
-        iniset $LBAAS_AGENT_CONF_FILENAME haproxy user_group "nobody"
-    fi
-}
-
-function neutron_lbaas_stop {
-    pids=$(ps aux | awk '/haproxy/ { print $2 }')
-    [ ! -z "$pids" ] && sudo kill $pids || true
-}
-
-# Restore xtrace
-$_XTRACE_NEUTRON_LB
diff --git a/lib/nova b/lib/nova
index e187220..ca9a6c7 100644
--- a/lib/nova
+++ b/lib/nova
@@ -83,7 +83,10 @@
 
 # Option to enable/disable config drive
 # NOTE: Set ``FORCE_CONFIG_DRIVE="False"`` to turn OFF config drive
-FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"True"}
+FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"False"}
+
+# Option to initialize CellsV2 environment
+NOVA_CONFIGURE_CELLSV2=$(trueorfalse False NOVA_CONFIGURE_CELLSV2)
 
 # Nova supports pluggable schedulers.  The default ``FilterScheduler``
 # should work in most cases.
@@ -299,8 +302,6 @@
     # Put config files in ``/etc/nova`` for everyone to find
     sudo install -d -o $STACK_USER $NOVA_CONF_DIR
 
-    install_default_policy nova
-
     configure_rootwrap nova
 
     if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
@@ -458,7 +459,6 @@
     iniset $NOVA_CONF DEFAULT rootwrap_config "$NOVA_CONF_DIR/rootwrap.conf"
     iniset $NOVA_CONF DEFAULT scheduler_driver "$SCHEDULER"
     iniset $NOVA_CONF DEFAULT scheduler_default_filters "$FILTERS"
-    iniset $NOVA_CONF DEFAULT force_dhcp_release "True"
     iniset $NOVA_CONF DEFAULT default_floating_pool "$PUBLIC_NETWORK_NAME"
     iniset $NOVA_CONF DEFAULT s3_host "$SERVICE_HOST"
     iniset $NOVA_CONF DEFAULT s3_port "$S3_SERVICE_PORT"
@@ -554,7 +554,6 @@
         # For multi-host, this should be the management ip of the compute host.
         VNCSERVER_LISTEN=${VNCSERVER_LISTEN=$NOVA_SERVICE_LOCAL_HOST}
         VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=$NOVA_SERVICE_LOCAL_HOST}
-        iniset $NOVA_CONF vnc enabled true
         iniset $NOVA_CONF vnc vncserver_listen "$VNCSERVER_LISTEN"
         iniset $NOVA_CONF vnc vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS"
         iniset $NOVA_CONF vnc novncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
@@ -572,8 +571,6 @@
         iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN"
         iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS"
         iniset $NOVA_CONF spice html5proxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
-    else
-        iniset $NOVA_CONF spice enabled false
     fi
 
     # Set the oslo messaging driver to the typical default. This does not
@@ -682,10 +679,15 @@
     # All nova components talk to a central database.
     # Only do this step once on the API node for an entire cluster.
     if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then
-        # (Re)create nova database
+        # (Re)create nova databases
         recreate_database nova
+        if [ "$NOVA_CONFIGURE_CELLSV2" != "False" ]; then
+            recreate_database nova_api_cell0
+        fi
 
-        # Migrate nova database
+        # Migrate nova database. If "nova-manage cell_v2 simple_cell_setup" has
+        # been run this migrates the "nova" and "nova_api_cell0" database.
+        # Otherwise it just migrates the "nova" database.
         $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db sync
 
         if is_service_enabled n-cell; then
@@ -800,7 +802,7 @@
 
     # Start proxies if enabled
     if is_service_enabled tls-proxy; then
-        start_tls_proxy '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT &
+        start_tls_proxy nova '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT
     fi
 
     export PATH=$old_path
@@ -823,6 +825,10 @@
         # ``sg`` is used in run_process to execute nova-compute as a member of the
         # **$LIBVIRT_GROUP** group.
         run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" $LIBVIRT_GROUP
+    elif [[ "$VIRT_DRIVER" = 'lxd' ]]; then
+        run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" $LXD_GROUP
+    elif [[ "$VIRT_DRIVER" = 'docker' ]]; then
+        run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" $DOCKER_GROUP
     elif [[ "$VIRT_DRIVER" = 'fake' ]]; then
         local i
         for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do
@@ -860,9 +866,13 @@
     run_process n-cond "$NOVA_BIN_DIR/nova-conductor --config-file $compute_cell_conf"
     run_process n-cell-region "$NOVA_BIN_DIR/nova-cells --config-file $api_cell_conf"
     run_process n-cell-child "$NOVA_BIN_DIR/nova-cells --config-file $compute_cell_conf"
-
     run_process n-crt "$NOVA_BIN_DIR/nova-cert --config-file $api_cell_conf"
+
+    if is_service_enabled n-net; then
+        enable_kernel_bridge_firewall
+    fi
     run_process n-net "$NOVA_BIN_DIR/nova-network --config-file $compute_cell_conf"
+
     run_process n-sch "$NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf"
     run_process n-api-meta "$NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf"
 
@@ -938,6 +948,15 @@
     fi
 }
 
+# create_cell(): Group the available hosts into a cell
+function create_cell {
+    if ! is_service_enabled n-cell; then
+        nova-manage cell_v2 simple_cell_setup --transport-url $(get_transport_url)
+    else
+        echo 'Skipping cellsv2 setup for this cellsv1 configuration'
+    fi
+}
+
 # Restore xtrace
 $_XTRACE_LIB_NOVA
 
diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt
index 4e5a748..5e7695a 100644
--- a/lib/nova_plugins/functions-libvirt
+++ b/lib/nova_plugins/functions-libvirt
@@ -23,12 +23,7 @@
 # Installs required distro-specific libvirt packages.
 function install_libvirt {
     if is_ubuntu; then
-        if is_arch "aarch64" && [[ ${DISTRO} == "trusty" ]]; then
-            install_package qemu-system
-        else
-            install_package qemu-kvm
-            install_package libguestfs0
-        fi
+        install_package qemu-system
         install_package libvirt-bin libvirt-dev
         pip_install_gr libvirt-python
         if [[ "$EBTABLES_RACE_FIX" == "True" ]]; then
@@ -65,6 +60,7 @@
     "/dev/random", "/dev/urandom",
     "/dev/ptmx", "/dev/kvm", "/dev/kqemu",
     "/dev/rtc", "/dev/hpet","/dev/net/tun",
+    "/dev/vfio/vfio",
 ]
 EOF
     fi
@@ -124,6 +120,12 @@
     # Service needs to be started on redhat/fedora -- do a restart for
     # sanity after fiddling the config.
     restart_service $LIBVIRT_DAEMON
+
+    # Restart virtlogd companion service to ensure it is running properly
+    #  https://bugs.launchpad.net/ubuntu/+source/libvirt/+bug/1577455
+    #  https://bugzilla.redhat.com/show_bug.cgi?id=1290357
+    # (not all platforms have it; libvirt 1.3+ only, thus the ignore)
+    restart_service virtlogd || true
 }
 
 
diff --git a/lib/nova_plugins/hypervisor-fake b/lib/nova_plugins/hypervisor-fake
index 6ac2199..f9b95c1 100644
--- a/lib/nova_plugins/hypervisor-fake
+++ b/lib/nova_plugins/hypervisor-fake
@@ -45,7 +45,7 @@
     iniset $NOVA_CONF DEFAULT quota_fixed_ips -1
     iniset $NOVA_CONF DEFAULT quota_metadata_items -1
     iniset $NOVA_CONF DEFAULT quota_injected_files -1
-    iniset $NOVA_CONF DEFAULT quota_injected_file_path_bytes -1
+    iniset $NOVA_CONF DEFAULT quota_injected_file_path_length -1
     iniset $NOVA_CONF DEFAULT quota_security_groups -1
     iniset $NOVA_CONF DEFAULT quota_security_group_rules -1
     iniset $NOVA_CONF DEFAULT quota_key_pairs -1
diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic
index c40427c..7ffd14d 100644
--- a/lib/nova_plugins/hypervisor-ironic
+++ b/lib/nova_plugins/hypervisor-ironic
@@ -45,11 +45,13 @@
     iniset $NOVA_CONF DEFAULT ram_allocation_ratio 1.0
     iniset $NOVA_CONF DEFAULT reserved_host_memory_mb 0
     # ironic section
-    iniset $NOVA_CONF ironic admin_username admin
-    iniset $NOVA_CONF ironic admin_password $ADMIN_PASSWORD
-    iniset $NOVA_CONF ironic admin_url $KEYSTONE_AUTH_URI/v2.0
-    iniset $NOVA_CONF ironic admin_tenant_name demo
-    iniset $NOVA_CONF ironic api_endpoint $IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT/v1
+    iniset $NOVA_CONF ironic auth_type password
+    iniset $NOVA_CONF ironic username admin
+    iniset $NOVA_CONF ironic password $ADMIN_PASSWORD
+    iniset $NOVA_CONF ironic auth_url $KEYSTONE_AUTH_URI/v3
+    iniset $NOVA_CONF ironic project_domain_id default
+    iniset $NOVA_CONF ironic user_domain_id default
+    iniset $NOVA_CONF ironic project_name demo
 }
 
 # install_nova_hypervisor() - Install external components
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index 20dde8e..167ab6f 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -40,7 +40,8 @@
     configure_libvirt
     iniset $NOVA_CONF libvirt virt_type "$LIBVIRT_TYPE"
     iniset $NOVA_CONF libvirt cpu_mode "none"
-    iniset $NOVA_CONF libvirt use_usb_tablet "False"
+    # Do not enable USB tablet input devices to avoid QEMU CPU overhead.
+    iniset $NOVA_CONF DEFAULT pointer_model "ps2mouse"
     iniset $NOVA_CONF libvirt live_migration_uri "qemu+ssh://$STACK_USER@%s/system"
     iniset $NOVA_CONF DEFAULT default_ephemeral_format "ext4"
     iniset $NOVA_CONF DEFAULT compute_driver "libvirt.LibvirtDriver"
@@ -100,6 +101,14 @@
             yum_install libcgroup-tools
         fi
     fi
+
+    if [[ "$ENABLE_FILE_INJECTION" == "True" ]] ; then
+        if is_ubuntu; then
+            install_package python-guestfs
+        elif is_fedora || is_suse; then
+            install_package python-libguestfs
+        fi
+    fi
 }
 
 # start_nova_hypervisor - Start any required external services
diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver
index e75226a..e5d25da 100644
--- a/lib/nova_plugins/hypervisor-xenserver
+++ b/lib/nova_plugins/hypervisor-xenserver
@@ -87,6 +87,7 @@
         cat $TOP_DIR/tools/xen/functions
         echo "create_directory_for_images"
         echo "create_directory_for_kernels"
+        echo "install_conntrack_tools"
     } | $ssh_dom0
 
 }
diff --git a/lib/placement b/lib/placement
new file mode 100644
index 0000000..165c670
--- /dev/null
+++ b/lib/placement
@@ -0,0 +1,193 @@
+#!/bin/bash
+#
+# lib/placement
+# Functions to control the configuration and operation of the **Placement** service
+#
+# Currently the placement service is embedded in nova. Eventually we
+# expect this to change so this file is started as a separate entity
+# despite making use of some *NOVA* variables and files.
+
+# Dependencies:
+#
+# - ``functions`` file
+# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined
+# - ``FILES``
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# - install_placement
+# - cleanup_placement
+# - configure_placement
+# - init_placement
+# - start_placement
+# - stop_placement
+
+# Save trace setting
+_XTRACE_LIB_PLACEMENT=$(set +o | grep xtrace)
+set +o xtrace
+
+# Defaults
+# --------
+
+PLACEMENT_CONF_DIR=/etc/nova
+PLACEMENT_CONF=$PLACEMENT_CONF_DIR/nova.conf
+PLACEMENT_AUTH_STRATEGY=${PLACEMENT_AUTH_STRATEGY:-placement}
+
+
+# The placement service can optionally use a separate database
+# connection. Set PLACEMENT_DB_ENABLED to True to use it.
+# NOTE(cdent): This functionality depends on some code that is not
+# yet merged in nova but is coming soon.
+PLACEMENT_DB_ENABLED=$(trueorfalse False PLACEMENT_DB_ENABLED)
+
+if is_ssl_enabled_service "placement-api" || is_service_enabled tls-proxy; then
+    PLACEMENT_SERVICE_PROTOCOL="https"
+fi
+
+# Public facing bits
+PLACEMENT_SERVICE_PROTOCOL=${PLACEMENT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
+PLACEMENT_SERVICE_HOST=${PLACEMENT_SERVICE_HOST:-$SERVICE_HOST}
+PLACEMENT_SERVICE_PORT=${PLACEMENT_SERVICE_PORT:-8778}
+
+# Functions
+# ---------
+
+# Test if any placement services are enabled
+# is_placement_enabled
+function is_placement_enabled {
+    [[ ,${ENABLED_SERVICES} =~ ,"placement-" ]] && return 0
+    return 1
+}
+
+# cleanup_placement() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_placement {
+    sudo rm -f $(apache_site_config_for placement-api)
+}
+
+# _config_placement_apache_wsgi() - Set WSGI config files
+function _config_placement_apache_wsgi {
+    local placement_api_apache_conf
+    local placement_api_port=$PLACEMENT_SERVICE_PORT
+    local venv_path=""
+    local nova_bin_dir=""
+    nova_bin_dir=$(get_python_exec_prefix)
+    placement_api_apache_conf=$(apache_site_config_for placement-api)
+
+    # reuse nova's cert if a cert is being used
+    if is_ssl_enabled_service "placement-api"; then
+        placement_ssl="SSLEngine On"
+        placement_certfile="SSLCertificateFile $NOVA_SSL_CERT"
+        placement_keyfile="SSLCertificateKeyFile $NOVA_SSL_KEY"
+    fi
+    # reuse nova's venv if there is one as placement code lives
+    # there
+    if [[ ${USE_VENV} = True ]]; then
+        venv_path="python-path=${PROJECT_VENV["nova"]}/lib/$(python_version)/site-packages"
+        nova_bin_dir=${PROJECT_VENV["nova"]}/bin
+    fi
+
+    sudo cp $FILES/apache-placement-api.template $placement_api_apache_conf
+    sudo sed -e "
+        s|%PUBLICPORT%|$placement_api_port|g;
+        s|%APACHE_NAME%|$APACHE_NAME|g;
+        s|%PUBLICWSGI%|$nova_bin_dir/nova-placement-api|g;
+        s|%SSLENGINE%|$placement_ssl|g;
+        s|%SSLCERTFILE%|$placement_certfile|g;
+        s|%SSLKEYFILE%|$placement_keyfile|g;
+        s|%USER%|$STACK_USER|g;
+        s|%VIRTUALENV%|$venv_path|g
+        s|%APIWORKERS%|$API_WORKERS|g
+    " -i $placement_api_apache_conf
+}
+
+# configure_placement() - Set config files, create data dirs, etc
+function configure_placement {
+    if [ "$PLACEMENT_DB_ENABLED" != False ]; then
+        iniset $PLACEMENT_CONF placement_database connection `database_connection_url placement`
+    fi
+
+    iniset $NOVA_CONF placement auth_type "password"
+    iniset $NOVA_CONF placement auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v3"
+    iniset $NOVA_CONF placement username placement
+    iniset $NOVA_CONF placement password "$SERVICE_PASSWORD"
+    iniset $NOVA_CONF placement user_domain_name "Default"
+    iniset $NOVA_CONF placement project_name "$SERVICE_TENANT_NAME"
+    iniset $NOVA_CONF placement project_domain_name "Default"
+    iniset $NOVA_CONF placement os_region_name "$REGION_NAME"
+    # TODO(cdent): auth_strategy, which is common to see in these
+    # blocks is not currently used here. For the time being the
+    # placement api uses the auth_strategy configuration setting
+    # established by the nova api. This avoids, for the time, being,
+    # creating redundant configuration items that are just used for
+    # testing.
+
+    _config_placement_apache_wsgi
+}
+
+# create_placement_accounts() - Set up required placement accounts
+# and service and endpoints.
+function create_placement_accounts {
+    create_service_user "placement" "admin"
+    local placement_api_url="$PLACEMENT_SERVICE_PROTOCOL://$PLACEMENT_SERVICE_HOST/placement"
+    get_or_create_service "placement" "placement" "Placement Service"
+    get_or_create_endpoint \
+        "placement" \
+        "$REGION_NAME" \
+        "$placement_api_url" \
+        "$placement_api_url" \
+        "$placement_api_url"
+}
+
+# init_placement() - Create service user and endpoints
+# If PLACEMENT_DB_ENABLED is true, create the separate placement db
+# using, for now, the api_db migrations.
+function init_placement {
+    if [ "$PLACEMENT_DB_ENABLED" != False ]; then
+        recreate_database placement
+        $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF api_db sync
+    fi
+    create_placement_accounts
+}
+
+# install_placement() - Collect source and prepare
+function install_placement {
+    install_apache_wsgi
+    if is_ssl_enabled_service "placement-api"; then
+        enable_mod_ssl
+    fi
+}
+
+# start_placement_api() - Start the API processes ahead of other things
+function start_placement_api {
+    # Get right service port for testing
+    local service_port=$PLACEMENT_SERVICE_PORT
+    local placement_api_port=$PLACEMENT_SERVICE_PORT
+
+    enable_apache_site placement-api
+    restart_apache_server
+    tail_log placement-api /var/log/$APACHE_NAME/placement-api.log
+
+    echo "Waiting for placement-api to start..."
+    if ! wait_for_service $SERVICE_TIMEOUT $PLACEMENT_SERVICE_PROTOCOL://$PLACEMENT_SERVICE_HOST/placement; then
+        die $LINENO "placement-api did not start"
+    fi
+}
+
+function start_placement {
+    start_placement_api
+}
+
+# stop_placement() - Disable the api service and stop it.
+function stop_placement {
+    disable_apache_site placement-api
+    restart_apache_server
+}
+
+# Restore xtrace
+$_XTRACE_LIB_PLACEMENT
+
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/rpc_backend b/lib/rpc_backend
index 0ee46dc..97b1aa4 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -24,6 +24,8 @@
 _XTRACE_RPC_BACKEND=$(set +o | grep xtrace)
 set +o xtrace
 
+RABBIT_USERID=${RABBIT_USERID:-stackrabbit}
+
 # Functions
 # ---------
 
diff --git a/lib/swift b/lib/swift
index 0c74411..b175f2e 100644
--- a/lib/swift
+++ b/lib/swift
@@ -397,6 +397,9 @@
     iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server node_timeout 120
     iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server conn_timeout 20
 
+    # Versioned Writes
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:versioned_writes allow_versioned_writes true
+
     # Configure Ceilometer
     if is_service_enabled ceilometer; then
         iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer "set log_level" "WARN"
@@ -489,8 +492,6 @@
         generate_swift_config_services ${swift_node_config} ${node_number} $(( CONTAINER_PORT_BASE + 10 * (node_number - 1) )) container
         iniuncomment ${swift_node_config} DEFAULT bind_ip
         iniset ${swift_node_config} DEFAULT bind_ip ${SWIFT_SERVICE_LISTEN_ADDRESS}
-        iniuncomment ${swift_node_config} app:container-server allow_versions
-        iniset ${swift_node_config} app:container-server allow_versions  "true"
 
         swift_node_config=${SWIFT_CONF_DIR}/account-server/${node_number}.conf
         cp ${SWIFT_DIR}/etc/account-server.conf-sample ${swift_node_config}
@@ -806,7 +807,7 @@
     done
     if is_service_enabled tls-proxy; then
         local proxy_port=${SWIFT_DEFAULT_BIND_PORT}
-        start_tls_proxy '*' $proxy_port $SERVICE_HOST $SWIFT_DEFAULT_BIND_PORT_INT &
+        start_tls_proxy swift '*' $proxy_port $SERVICE_HOST $SWIFT_DEFAULT_BIND_PORT_INT
     fi
     run_process s-proxy "$SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v"
     if [[ ${SWIFT_REPLICAS} == 1 ]]; then
diff --git a/lib/tempest b/lib/tempest
index 67fb454..5746ffc 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -15,7 +15,6 @@
 #   - ``SERVICE_HOST``
 #   - ``BASE_SQL_CONN`` ``lib/database`` declares
 #   - ``PUBLIC_NETWORK_NAME``
-#   - ``Q_ROUTER_NAME``
 #   - ``VIRT_DRIVER``
 #   - ``LIBVIRT_TYPE``
 #   - ``KEYSTONE_SERVICE_PROTOCOL``, ``KEYSTONE_SERVICE_HOST`` from lib/keystone
@@ -194,11 +193,11 @@
         available_flavors=$(nova flavor-list)
         if  [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then
             if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then
-                nova flavor-create m1.nano 42 64 0 1
+                openstack flavor create --id 42 --ram 64 --disk 0 --vcpus 1 m1.nano
             fi
             flavor_ref=42
             if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then
-                nova flavor-create m1.micro 84 128 0 1
+                openstack flavor create --id 84 --ram 128 --disk 0 --vcpus 1 m1.micro
             fi
             flavor_ref_alt=84
         else
@@ -243,8 +242,7 @@
     # the public network (for floating ip access) is only available
     # if the extension is enabled.
     if is_networking_extension_supported 'external-net'; then
-        public_network_id=$(neutron net-list | grep $PUBLIC_NETWORK_NAME | \
-            awk '{print $2}')
+        public_network_id=$(openstack network show -f value -c id $PUBLIC_NETWORK_NAME)
     fi
 
     iniset $TEMPEST_CONFIG DEFAULT use_syslog $SYSLOG
@@ -268,8 +266,7 @@
     if [[ "$TEMPEST_HAS_ADMIN" == "True" ]]; then
         iniset $TEMPEST_CONFIG auth admin_username $admin_username
         iniset $TEMPEST_CONFIG auth admin_password "$password"
-        iniset $TEMPEST_CONFIG auth admin_tenant_name $admin_project_name
-        iniset $TEMPEST_CONFIG auth admin_tenant_id $admin_project_id
+        iniset $TEMPEST_CONFIG auth admin_project_name $admin_project_name
         iniset $TEMPEST_CONFIG auth admin_domain_name $admin_domain_name
     fi
     if [ "$ENABLE_IDENTITY_V2" == "False" ]; then
@@ -297,7 +294,6 @@
     fi
     if [ "$VIRT_DRIVER" = "xenserver" ]; then
         iniset $TEMPEST_CONFIG image disk_formats "ami,ari,aki,vhd,raw,iso"
-        iniset $TEMPEST_CONFIG scenario img_disk_format vhd
     fi
 
     # Image Features
@@ -307,17 +303,12 @@
     fi
 
     # Compute
-    iniset $TEMPEST_CONFIG compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED
     iniset $TEMPEST_CONFIG compute image_ref $image_uuid
     iniset $TEMPEST_CONFIG compute image_ref_alt $image_uuid_alt
-    iniset $TEMPEST_CONFIG compute image_alt_ssh_user ${ALT_INSTANCE_USER:-cirros}
     iniset $TEMPEST_CONFIG compute flavor_ref $flavor_ref
     iniset $TEMPEST_CONFIG compute flavor_ref_alt $flavor_ref_alt
-    iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method
-    # set the equiv validation option here as well to ensure they are
-    # in sync. They shouldn't be separate options.
     iniset $TEMPEST_CONFIG validation connect_method $ssh_connect_method
-    if [[ ! $(is_service_enabled n-cell) && ! $(is_service_enabled neutron) ]]; then
+    if ! is_service_enabled n-cell && ! is_service_enabled neutron; then
         iniset $TEMPEST_CONFIG compute fixed_network_name $PRIVATE_NETWORK_NAME
     fi
 
@@ -354,6 +345,8 @@
         iniset $TEMPEST_CONFIG compute max_microversion $tempest_compute_max_microversion
     fi
 
+    # TODO(mriedem): Remove allow_port_security_disabled after liberty-eol.
+    iniset $TEMPEST_CONFIG compute-feature-enabled allow_port_security_disabled True
     iniset $TEMPEST_CONFIG compute-feature-enabled personality ${ENABLE_FILE_INJECTION:-False}
     iniset $TEMPEST_CONFIG compute-feature-enabled resize True
     iniset $TEMPEST_CONFIG compute-feature-enabled live_migration ${LIVE_MIGRATION_AVAILABLE:-False}
@@ -386,14 +379,10 @@
     iniset $TEMPEST_CONFIG network default_network "$FIXED_RANGE"
     iniset $TEMPEST_CONFIG network-feature-enabled ipv6 "$IPV6_ENABLED"
     iniset $TEMPEST_CONFIG network-feature-enabled ipv6_subnet_attributes "$IPV6_SUBNET_ATTRIBUTES_ENABLED"
+    iniset $TEMPEST_CONFIG network-feature-enabled port_security $NEUTRON_PORT_SECURITY
 
     # Orchestration Tests
     if is_service_enabled heat; then
-        # Though this is not needed by heat, some tempest tests explicitly
-        # try to set this role. Removing them from the tempest tests breaks
-        # some non-devstack CIs.
-        get_or_create_role "heat_stack_owner"
-
         if [[ ! -z "$HEAT_CFN_IMAGE_URL" ]]; then
             iniset $TEMPEST_CONFIG orchestration image_ref $(basename "${HEAT_CFN_IMAGE_URL%.*}")
         fi
@@ -402,31 +391,41 @@
             # build a specialized heat flavor
             available_flavors=$(nova flavor-list)
             if [[ ! ( $available_flavors =~ 'm1.heat' ) ]]; then
-                nova flavor-create m1.heat 451 512 0 1
+                openstack flavor create --id 451 --ram 512 --disk 0 --vcpus 1 m1.heat
             fi
             iniset $TEMPEST_CONFIG orchestration instance_type "m1.heat"
         fi
         iniset $TEMPEST_CONFIG orchestration build_timeout 900
-        iniset $TEMPEST_CONFIG orchestration stack_owner_role "heat_stack_owner"
+        iniset $TEMPEST_CONFIG orchestration stack_owner_role Member
     fi
 
     # Scenario
-    SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES/images/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec}
+    if [ "$VIRT_DRIVER" = "xenserver" ]; then
+        SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES}
+        SCENARIO_IMAGE_FILE="cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.vhd.tgz"
+        iniset $TEMPEST_CONFIG scenario img_disk_format vhd
+        iniset $TEMPEST_CONFIG scenario img_container_format ovf
+    else
+        SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES/images/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec}
+        SCENARIO_IMAGE_FILE="cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img"
+    fi
     iniset $TEMPEST_CONFIG scenario img_dir $SCENARIO_IMAGE_DIR
+    iniset $TEMPEST_CONFIG scenario img_file $SCENARIO_IMAGE_FILE
     iniset $TEMPEST_CONFIG scenario ami_img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-blank.img"
     iniset $TEMPEST_CONFIG scenario ari_img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-initrd"
     iniset $TEMPEST_CONFIG scenario aki_img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-vmlinuz"
-    iniset $TEMPEST_CONFIG scenario img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img"
 
-    # Telemetry
-    iniset $TEMPEST_CONFIG telemetry-feature-enabled events "True"
-
+    # If using provider networking, use the physical network for validation rather than private
+    TEMPEST_SSH_NETWORK_NAME=$PRIVATE_NETWORK_NAME
+    if is_provider_network; then
+        TEMPEST_SSH_NETWORK_NAME=$PHYSICAL_NETWORK
+    fi
     # Validation
     iniset $TEMPEST_CONFIG validation run_validation ${TEMPEST_RUN_VALIDATION:-False}
     iniset $TEMPEST_CONFIG validation ip_version_for_ssh 4
     iniset $TEMPEST_CONFIG validation ssh_timeout $BUILD_TIMEOUT
     iniset $TEMPEST_CONFIG validation image_ssh_user ${DEFAULT_INSTANCE_USER:-cirros}
-    iniset $TEMPEST_CONFIG validation network_for_ssh $PRIVATE_NETWORK_NAME
+    iniset $TEMPEST_CONFIG validation network_for_ssh $TEMPEST_SSH_NETWORK_NAME
 
     # Volume
     # TODO(obutenko): Remove snapshot_backup when liberty-eol happens.
diff --git a/lib/tls b/lib/tls
index ca57ed4..57b5e52 100644
--- a/lib/tls
+++ b/lib/tls
@@ -16,7 +16,6 @@
 #
 # - configure_CA
 # - init_CA
-# - cleanup_CA
 
 # - configure_proxy
 # - start_tls_proxy
@@ -202,7 +201,6 @@
 # Create root and intermediate CAs
 # init_CA
 function init_CA {
-    fix_system_ca_bundle_path
     # Ensure CAs are built
     make_root_CA $ROOT_CA_DIR
     make_int_CA $INT_CA_DIR $ROOT_CA_DIR
@@ -221,26 +219,13 @@
     fi
 }
 
-# Clean up the CA files
-# cleanup_CA
-function cleanup_CA {
-    if is_fedora; then
-        sudo rm -f /usr/share/pki/ca-trust-source/anchors/devstack-chain.pem
-        sudo update-ca-trust
-    elif is_ubuntu; then
-        sudo rm -f /usr/local/share/ca-certificates/devstack-int.crt
-        sudo rm -f /usr/local/share/ca-certificates/devstack-root.crt
-        sudo update-ca-certificates
-    fi
-}
-
 # Create an initial server cert
 # init_cert
 function init_cert {
     if [[ ! -r $DEVSTACK_CERT ]]; then
         if [[ -n "$TLS_IP" ]]; then
             # Lie to let incomplete match routines work
-            TLS_IP="DNS:$TLS_IP"
+            TLS_IP="DNS:$TLS_IP,IP:$TLS_IP"
         fi
         make_cert $INT_CA_DIR $DEVSTACK_CERT_NAME $DEVSTACK_HOSTNAME "$TLS_IP"
 
@@ -263,6 +248,9 @@
         else
             alt_names="$alt_names,DNS:$SERVICE_HOST"
         fi
+        if is_ipv4_address "$SERVICE_HOST" ; then
+            alt_names="$alt_names,IP:$SERVICE_HOST"
+        fi
     fi
 
     # Only generate the certificate if it doesn't exist yet on the disk
@@ -336,15 +324,17 @@
     create_CA_base $ca_dir
     create_CA_config $ca_dir 'Root CA'
 
-    # Create a self-signed certificate valid for 5 years
-    $OPENSSL req -config $ca_dir/ca.conf \
-        -x509 \
-        -nodes \
-        -newkey rsa \
-        -days 21360 \
-        -keyout $ca_dir/private/cacert.key \
-        -out $ca_dir/cacert.pem \
-        -outform PEM
+    if [ ! -r "$ca_dir/cacert.pem" ]; then
+        # Create a self-signed certificate valid for 5 years
+        $OPENSSL req -config $ca_dir/ca.conf \
+            -x509 \
+            -nodes \
+            -newkey rsa \
+            -days 21360 \
+            -keyout $ca_dir/private/cacert.key \
+            -out $ca_dir/cacert.pem \
+            -outform PEM
+    fi
 }
 
 # If a non-system python-requests is installed then it will use the
@@ -452,30 +442,136 @@
 # Proxy Functions
 # ===============
 
+function tune_apache_connections {
+    local tuning_file=$APACHE_SETTINGS_DIR/connection-tuning.conf
+    if ! [ -f $tuning_file ] ; then
+        sudo bash -c "cat > $tuning_file" << EOF
+# worker MPM
+# StartServers: initial number of server processes to start
+# MinSpareThreads: minimum number of worker threads which are kept spare
+# MaxSpareThreads: maximum number of worker threads which are kept spare
+# ThreadLimit: ThreadsPerChild can be changed to this maximum value during a
+#              graceful restart. ThreadLimit can only be changed by stopping
+#              and starting Apache.
+# ThreadsPerChild: constant number of worker threads in each server process
+# MaxClients: maximum number of simultaneous client connections
+# MaxRequestsPerChild: maximum number of requests a server process serves
+#
+# The apache defaults are too conservative if we want reliable tempest
+# testing. Bump these values up from ~400 max clients to 1024 max clients.
+<IfModule mpm_worker_module>
+# Note that the next three conf values must be changed together.
+# MaxClients = ServerLimit * ThreadsPerChild
+ServerLimit          32
+ThreadsPerChild      32
+MaxClients         1024
+StartServers          3
+MinSpareThreads      96
+MaxSpareThreads     192
+ThreadLimit          64
+MaxRequestsPerChild   0
+</IfModule>
+<IfModule mpm_event_module>
+# Note that the next three conf values must be changed together.
+# MaxClients = ServerLimit * ThreadsPerChild
+ServerLimit          32
+ThreadsPerChild      32
+MaxClients         1024
+StartServers          3
+MinSpareThreads      96
+MaxSpareThreads     192
+ThreadLimit          64
+MaxRequestsPerChild   0
+</IfModule>
+EOF
+        restart_apache_server
+    fi
+}
+
 # Starts the TLS proxy for the given IP/ports
 # start_tls_proxy front-host front-port back-host back-port
 function start_tls_proxy {
-    local f_host=$1
-    local f_port=$2
-    local b_host=$3
-    local b_port=$4
+    local b_service="$1-tls-proxy"
+    local f_host=$2
+    local f_port=$3
+    local b_host=$4
+    local b_port=$5
 
-    stud $STUD_PROTO -f $f_host,$f_port -b $b_host,$b_port $DEVSTACK_CERT 2>/dev/null
+    tune_apache_connections
+
+    local config_file
+    config_file=$(apache_site_config_for $b_service)
+    local listen_string
+    # Default apache configs on ubuntu and centos listen on 80 and 443
+    # newer apache seems fine with duplicate listen directive but older
+    # apache does not so special case 80 and 443.
+    if [[ "$f_port" == "80" ]] || [[ "$f_port" == "443" ]]; then
+        listen_string=""
+    elif [[ "$f_host" == '*' ]] ; then
+        listen_string="Listen $f_port"
+    else
+        listen_string="Listen $f_host:$f_port"
+    fi
+    sudo bash -c "cat >$config_file" << EOF
+$listen_string
+
+<VirtualHost $f_host:$f_port>
+    SSLEngine On
+    SSLCertificateFile $DEVSTACK_CERT
+
+    <Location />
+        ProxyPass http://$b_host:$b_port/ retry=5 nocanon
+        ProxyPassReverse http://$b_host:$b_port/
+    </Location>
+    ErrorLog $APACHE_LOG_DIR/tls-proxy_error.log
+    ErrorLogFormat "[%{u}t] [%-m:%l] [pid %P:tid %T] %7F: %E: [client\ %a] [frontend\ %A] %M% ,\ referer\ %{Referer}i"
+    LogLevel info
+    CustomLog $APACHE_LOG_DIR/tls-proxy_access.log common
+    LogFormat "%v %h %l %u %t \"%r\" %>s %b"
+</VirtualHost>
+EOF
+    for mod in ssl proxy proxy_http; do
+        enable_apache_mod $mod
+    done
+    enable_apache_site $b_service
+    # Only a reload is required to pull in new vhosts
+    # Note that a restart reliably fails on centos7 and trusty
+    # because apache can't open port 80 because the old apache
+    # still has it open. Using reload fixes trusty but centos7
+    # still doesn't work.
+    reload_apache_server
 }
 
+# Follow TLS proxy
+function follow_tls_proxy {
+    sudo touch /var/log/$APACHE_NAME/tls-proxy_error.log
+    tail_log tls-error /var/log/$APACHE_NAME/tls-proxy_error.log
+    sudo touch /var/log/$APACHE_NAME/tls-proxy_access.log
+    tail_log tls-proxy /var/log/$APACHE_NAME/tls-proxy_access.log
+}
 
 # Cleanup Functions
 # =================
 
-# Stops all stud processes. This should be done only after all services
+# Stops the apache service. This should be done only after all services
 # using tls configuration are down.
 function stop_tls_proxy {
-    killall stud
+    stop_apache_server
 }
 
-# Remove CA along with configuration, as well as the local server certificate
+# Clean up the CA files
+# cleanup_CA
 function cleanup_CA {
-    rm -rf "$DATA_DIR/CA" "$DEVSTACK_CERT"
+    if is_fedora; then
+        sudo rm -f /usr/share/pki/ca-trust-source/anchors/devstack-chain.pem
+        sudo update-ca-trust
+    elif is_ubuntu; then
+        sudo rm -f /usr/local/share/ca-certificates/devstack-int.crt
+        sudo rm -f /usr/local/share/ca-certificates/devstack-root.crt
+        sudo update-ca-certificates
+    fi
+
+    rm -rf "$INT_CA_DIR" "$ROOT_CA_DIR" "$DEVSTACK_CERT"
 }
 
 # Tell emacs to use shell-script-mode
diff --git a/samples/local.sh b/samples/local.sh
index 634f6dd..9cd0bdc 100755
--- a/samples/local.sh
+++ b/samples/local.sh
@@ -36,7 +36,7 @@
     # Add first keypair found in localhost:$HOME/.ssh
     for i in $HOME/.ssh/id_rsa.pub $HOME/.ssh/id_dsa.pub; do
         if [[ -r $i ]]; then
-            nova keypair-add --pub_key=$i `hostname`
+            openstack keypair create --public-key $i `hostname`
             break
         fi
     done
@@ -53,8 +53,8 @@
     MI_NAME=m1.micro
 
     # Create micro flavor if not present
-    if [[ -z $(nova flavor-list | grep $MI_NAME) ]]; then
-        nova flavor-create $MI_NAME 6 128 0 1
+    if [[ -z $(openstack flavor list | grep $MI_NAME) ]]; then
+        openstack flavor create $MI_NAME --id 6 --ram 128 --disk 0 --vcpus 1
     fi
 
 
@@ -62,7 +62,7 @@
     # ----------
 
     # Add tcp/22 and icmp to default security group
-    nova secgroup-add-rule default tcp 22 22 0.0.0.0/0
-    nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0
+    openstack security group rule create --project $OS_PROJECT_NAME default --protocol tcp --ingress --dst-port 22
+    openstack security group rule create --project $OS_PROJECT_NAME default --protocol icmp
 
 fi
diff --git a/stack.sh b/stack.sh
index 823b63b..74edb10 100755
--- a/stack.sh
+++ b/stack.sh
@@ -27,6 +27,13 @@
 # Make sure custom grep options don't get in the way
 unset GREP_OPTIONS
 
+# Sanitize language settings to avoid commands bailing out
+# with "unsupported locale setting" errors.
+unset LANG
+unset LANGUAGE
+LC_ALL=C
+export LC_ALL
+
 # Make sure umask is sane
 umask 022
 
@@ -185,7 +192,7 @@
 
 # Warn users who aren't on an explicitly supported distro, but allow them to
 # override check and attempt installation with ``FORCE=yes ./stack``
-if [[ ! ${DISTRO} =~ (trusty|wily|xenial|7.0|wheezy|sid|testing|jessie|f23|f24|rhel7|kvmibm1) ]]; then
+if [[ ! ${DISTRO} =~ (trusty|xenial|yakkety|7.0|wheezy|sid|testing|jessie|f23|f24|f25|rhel7|kvmibm1) ]]; then
     echo "WARNING: this script has not been tested on $DISTRO"
     if [[ "$FORCE" != "yes" ]]; then
         die $LINENO "If you wish to run this script anyway run with FORCE=yes"
@@ -562,6 +569,7 @@
 source $TOP_DIR/lib/keystone
 source $TOP_DIR/lib/glance
 source $TOP_DIR/lib/nova
+source $TOP_DIR/lib/placement
 source $TOP_DIR/lib/cinder
 source $TOP_DIR/lib/swift
 source $TOP_DIR/lib/heat
@@ -656,7 +664,6 @@
 # Rabbit connection info
 # In multi node DevStack, second node needs ``RABBIT_USERID``, but rabbit
 # isn't enabled.
-RABBIT_USERID=${RABBIT_USERID:-stackrabbit}
 if is_service_enabled rabbit; then
     RABBIT_HOST=${RABBIT_HOST:-$SERVICE_HOST}
     read_password RABBIT_PASSWORD "ENTER A PASSWORD TO USE FOR RABBIT."
@@ -802,6 +809,13 @@
     install_os_brick
 fi
 
+# Setup TLS certs
+if is_service_enabled tls-proxy || [ "$USE_SSL" == "True" ]; then
+    configure_CA
+    init_CA
+    init_cert
+fi
+
 # Install middleware
 install_keystonemiddleware
 
@@ -852,6 +866,13 @@
     configure_nova
 fi
 
+if is_service_enabled placement; then
+    # placement api
+    stack_install_service placement
+    cleanup_placement
+    configure_placement
+fi
+
 if is_service_enabled horizon; then
     # django openstack_auth
     install_django_openstack_auth
@@ -867,14 +888,9 @@
 fi
 
 if is_service_enabled tls-proxy || [ "$USE_SSL" == "True" ]; then
-    configure_CA
-    init_CA
-    init_cert
-    # Add name to ``/etc/hosts``.
-    # Don't be naive and add to existing line!
+    fix_system_ca_bundle_path
 fi
 
-
 # Extras Install
 # --------------
 
@@ -979,6 +995,10 @@
     fi
     screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS"
     screen -r $SCREEN_NAME -X setenv PROMPT_COMMAND /bin/true
+
+    if is_service_enabled tls-proxy; then
+        follow_tls_proxy
+    fi
 fi
 
 # Clear ``screenrc`` file
@@ -1007,21 +1027,12 @@
 # Keystone
 # --------
 
-if is_service_enabled keystone; then
-    echo_summary "Starting Keystone"
-
-    if [ "$KEYSTONE_AUTH_HOST" == "$SERVICE_HOST" ]; then
-        init_keystone
-        start_keystone
-        bootstrap_keystone
-    fi
-
-    # Rather than just export these, we write them out to a
-    # intermediate userrc file that can also be used to debug if
-    # something goes wrong between here and running
-    # tools/create_userrc.sh (this script relies on services other
-    # than keystone being available, so we can't call it right now)
-    cat > $TOP_DIR/userrc_early <<EOF
+# Rather than just export these, we write them out to a
+# intermediate userrc file that can also be used to debug if
+# something goes wrong between here and running
+# tools/create_userrc.sh (this script relies on services other
+# than keystone being available, so we can't call it right now)
+cat > $TOP_DIR/userrc_early <<EOF
 # Use this for debugging issues before files in accrc are created
 
 # Set up password auth credentials now that Keystone is bootstrapped
@@ -1036,11 +1047,21 @@
 
 EOF
 
-    if is_service_enabled tls-proxy; then
-        echo "export OS_CACERT=$INT_CA_DIR/ca-chain.pem" >> $TOP_DIR/userrc_early
-    fi
+if is_service_enabled tls-proxy; then
+    echo "export OS_CACERT=$INT_CA_DIR/ca-chain.pem" >> $TOP_DIR/userrc_early
+    start_tls_proxy http-services '*' 443 $SERVICE_HOST 80
+fi
 
-    source $TOP_DIR/userrc_early
+source $TOP_DIR/userrc_early
+
+if is_service_enabled keystone; then
+    echo_summary "Starting Keystone"
+
+    if [ "$KEYSTONE_AUTH_HOST" == "$SERVICE_HOST" ]; then
+        init_keystone
+        start_keystone
+        bootstrap_keystone
+    fi
 
     create_keystone_accounts
     create_nova_accounts
@@ -1153,6 +1174,11 @@
     init_nova_cells
 fi
 
+if is_service_enabled placement; then
+    echo_summary "Configuring placement"
+    init_placement
+fi
+
 
 # Extras Configuration
 # ====================
@@ -1201,11 +1227,6 @@
 
     echo_summary "Uploading images"
 
-    # Option to upload legacy ami-tty, which works with xenserver
-    if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then
-        IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz"
-    fi
-
     for image_url in ${IMAGE_URLS//,/ }; do
         upload_image $image_url
     done
@@ -1258,6 +1279,10 @@
     start_nova
     create_flavors
 fi
+if is_service_enabled placement; then
+    echo_summary "Starting Placement"
+    start_placement
+fi
 if is_service_enabled cinder; then
     echo_summary "Starting Cinder"
     start_cinder
@@ -1356,6 +1381,14 @@
 check_libs_from_git
 
 
+# Configure nova cellsv2
+# ----------------------
+
+# Do this late because it requires compute hosts to have started
+if is_service_enabled n-api && [ "$NOVA_CONFIGURE_CELLSV2" == "True" ]; then
+    create_cell
+fi
+
 # Bash completion
 # ===============
 
diff --git a/stackrc b/stackrc
index bfb897b..b5018de 100644
--- a/stackrc
+++ b/stackrc
@@ -7,13 +7,6 @@
 [[ -z "$_DEVSTACK_STACKRC" ]] || return 0
 declare -r _DEVSTACK_STACKRC=1
 
-# Sanitize language settings to avoid commands bailing out
-# with "unsupported locale setting" errors.
-unset LANG
-unset LANGUAGE
-LC_ALL=C
-export LC_ALL
-
 # Find the other rc files
 RC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd)
 
@@ -51,20 +44,10 @@
 # Specify which services to launch.  These generally correspond to
 # screen tabs. To change the default list, use the ``enable_service`` and
 # ``disable_service`` functions in ``local.conf``.
-# For example, to enable Swift add this to ``local.conf``:
-#  enable_service s-proxy s-object s-container s-account
-# In order to enable Neutron (a single node setup) add the following
+# For example, to enable Swift as part of DevStack add the following
 # settings in ``local.conf``:
 #  [[local|localrc]]
-#  disable_service n-net
-#  enable_service q-svc
-#  enable_service q-agt
-#  enable_service q-dhcp
-#  enable_service q-l3
-#  enable_service q-meta
-#  # Optional, to enable tempest configuration as part of DevStack
-#  enable_service tempest
-
+#  enable_service s-proxy s-object s-container s-account
 # This allows us to pass ``ENABLED_SERVICES``
 if ! isset ENABLED_SERVICES ; then
     # Keystone - nothing works without keystone
@@ -266,10 +249,6 @@
 NEUTRON_FWAAS_REPO=${NEUTRON_FWAAS_REPO:-${GIT_BASE}/openstack/neutron-fwaas.git}
 NEUTRON_FWAAS_BRANCH=${NEUTRON_FWAAS_BRANCH:-master}
 
-# neutron lbaas service
-NEUTRON_LBAAS_REPO=${NEUTRON_LBAAS_REPO:-${GIT_BASE}/openstack/neutron-lbaas.git}
-NEUTRON_LBAAS_BRANCH=${NEUTRON_LBAAS_BRANCH:-master}
-
 # compute service
 NOVA_REPO=${NOVA_REPO:-${GIT_BASE}/openstack/nova.git}
 NOVA_BRANCH=${NOVA_BRANCH:-master}
@@ -597,6 +576,12 @@
             LIBVIRT_GROUP=libvirtd
         fi
         ;;
+    lxd)
+        LXD_GROUP=${LXD_GROUP:-"lxd"}
+        ;;
+    docker)
+        DOCKER_GROUP=${DOCKER_GROUP:-"docker"}
+        ;;
     fake)
         NUMBER_FAKE_NOVA_COMPUTE=${NUMBER_FAKE_NOVA_COMPUTE:-1}
         ;;
@@ -773,7 +758,8 @@
 # Note that setting ``FIXED_RANGE`` may be necessary when running DevStack
 # in an OpenStack cloud that uses either of these address ranges internally.
 FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24}
-FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24}
+IPV4_ADDRS_SAFE_TO_USE=${IPV4_ADDRS_SAFE_TO_USE:-10.0.0.0/22}
+FIXED_RANGE=${FIXED_RANGE:-$IPV4_ADDRS_SAFE_TO_USE}
 FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256}
 HOST_IP_IFACE=${HOST_IP_IFACE:-}
 HOST_IP=${HOST_IP:-}
@@ -786,6 +772,9 @@
 
 HOST_IPV6=$(get_default_host_ip "" "" "$HOST_IP_IFACE" "$HOST_IPV6" "inet6")
 
+# Whether or not the port_security extension should be enabled for Neutron.
+NEUTRON_PORT_SECURITY=$(trueorfalse True NEUTRON_PORT_SECURITY)
+
 # SERVICE IP version
 # This is the IP version that services should be listening on, as well
 # as using to register their endpoints with keystone.
diff --git a/tests/test_meta_config.sh b/tests/test_meta_config.sh
index 327fb56..92f9c01 100755
--- a/tests/test_meta_config.sh
+++ b/tests/test_meta_config.sh
@@ -125,6 +125,14 @@
 [[test10|does-not-exist-dir/test.conf]]
 foo=bar
 
+[[test11|test-same.conf]]
+[DEFAULT]
+foo=bar
+
+[[test11|test-same.conf]]
+[some]
+random=config
+
 [[test-multi-sections|test-multi-sections.conf]]
 [sec-1]
 cfg_item1 = abcd
@@ -147,6 +155,9 @@
 cfg_item2 = efgh
 cfg_item2 = \${FOO_BAR_BAZ}
 
+[[test11|test-same.conf]]
+[another]
+non = sense
 EOF
 
 echo -n "get_meta_section_files: test0 doesn't exist: "
@@ -385,8 +396,24 @@
 check_result "$VAL" "$EXPECT_VAL"
 set -e
 
+echo -n "merge_config_file test11 same section: "
+rm -f test-same.conf
+merge_config_group test.conf test11
+VAL=$(cat test-same.conf)
+EXPECT_VAL='
+[DEFAULT]
+foo = bar
+
+[some]
+random = config
+
+[another]
+non = sense'
+check_result "$VAL" "$EXPECT_VAL"
+
+
 rm -f test.conf test1c.conf test2a.conf \
     test-space.conf test-equals.conf test-strip.conf \
     test-colon.conf test-env.conf test-multiline.conf \
-    test-multi-sections.conf
+    test-multi-sections.conf test-same.conf
 rm -rf test-etc
diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh
index b6db5d1..30d1a01 100755
--- a/tools/create_userrc.sh
+++ b/tools/create_userrc.sh
@@ -193,7 +193,6 @@
 export OS_AUTH_URL="$OS_AUTH_URL"
 export OS_CACERT="$OS_CACERT"
 export NOVA_CERT="$ACCOUNT_DIR/cacert.pem"
-export OS_AUTH_TYPE=v2password
 EOF
     if [ -n "$ADDPASS" ]; then
         echo "export OS_PASSWORD=\"$user_passwd\"" >>"$rcfile"
diff --git a/tools/generate-devstack-plugins-list.py b/tools/generate-devstack-plugins-list.py
index bbad1bf..56f12e7 100644
--- a/tools/generate-devstack-plugins-list.py
+++ b/tools/generate-devstack-plugins-list.py
@@ -46,6 +46,9 @@
 
 # Check if this project has a plugin file
 def has_devstack_plugin(proj):
+    # Don't link in the deb packaging repos
+    if "openstack/deb-" in proj:
+        return False
     r = requests.get("https://git.openstack.org/cgit/%s/plain/devstack/plugin.sh" % proj)
     return r.status_code == 200
 
diff --git a/tools/info.sh b/tools/info.sh
index c056fa7..282667f 100755
--- a/tools/info.sh
+++ b/tools/info.sh
@@ -8,7 +8,7 @@
 # Output types are git,localrc,os,pip,pkg:
 #
 #   git|<project>|<branch>[<shaq>]
-#   localtc|<var>=<value>
+#   localrc|<var>=<value>
 #   os|<var>=<value>
 #   pip|<package>|<version>
 #   pkg|<package>|<version>
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index 1267699..a5ccb19 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -24,7 +24,20 @@
 
 FILES=$TOP_DIR/files
 
-PIP_GET_PIP_URL=https://bootstrap.pypa.io/get-pip.py
+# The URL from where the get-pip.py file gets downloaded. If a local
+# get-pip.py mirror is available, PIP_GET_PIP_URL can be set to that
+# mirror in local.conf to avoid download timeouts.
+# Example:
+#  PIP_GET_PIP_URL="http://local-server/get-pip.py"
+#
+# Note that if get-pip.py already exists in $FILES this script will
+# not re-download or check for a new version.  For example, this is
+# done by openstack-infra diskimage-builder elements as part of image
+# preparation [1].  This prevents any network access, which can be
+# unreliable in CI situations.
+# [1] http://git.openstack.org/cgit/openstack-infra/project-config/tree/nodepool/elements/cache-devstack/source-repository-pip
+
+PIP_GET_PIP_URL=${PIP_GET_PIP_URL:-"https://bootstrap.pypa.io/get-pip.py"}
 LOCAL_PIP="$FILES/$(basename $PIP_GET_PIP_URL)"
 
 GetDistro
diff --git a/tools/make_cert.sh b/tools/make_cert.sh
index 2628b40..e91464f 100755
--- a/tools/make_cert.sh
+++ b/tools/make_cert.sh
@@ -45,6 +45,7 @@
 
 # Make sure the CA is set up
 configure_CA
+fix_system_ca_bundle_path
 init_CA
 
 # Create the server cert
diff --git a/tools/ping_neutron.sh b/tools/ping_neutron.sh
index dba7502..73fe3f3 100755
--- a/tools/ping_neutron.sh
+++ b/tools/ping_neutron.sh
@@ -54,7 +54,7 @@
 REMAINING_ARGS="${@:2}"
 
 # BUG: with duplicate network names, this fails pretty hard.
-NET_ID=$(neutron net-list | grep "$NET_NAME" | awk '{print $2}')
+NET_ID=$(openstack network show -f value -c id "$NET_NAME")
 PROBE_ID=$(neutron-debug probe-list -c id -c network_id | grep "$NET_ID" | awk '{print $2}' | head -n 1)
 
 # This runs a command inside the specific netns
diff --git a/tools/xen/functions b/tools/xen/functions
index cf14568..e1864eb 100644
--- a/tools/xen/functions
+++ b/tools/xen/functions
@@ -305,3 +305,25 @@
 
     xe vm-list name-label="$vm_name_label" params=dom-id minimal=true
 }
+
+function install_conntrack_tools {
+    local xs_host
+    local xs_ver_major
+    local centos_ver
+    local conntrack_conf
+    xs_host=$(xe host-list --minimal)
+    xs_ver_major=$(xe host-param-get uuid=$xs_host param-name=software-version param-key=product_version_text_short | cut -d'.' -f 1)
+    if [ $xs_ver_major -gt 6 ]; then
+        # Only support conntrack-tools in Dom0 with XS7.0 and above
+        if [ ! -f /usr/sbin/conntrackd ]; then
+            sed -i s/#baseurl=/baseurl=/g /etc/yum.repos.d/CentOS-Base.repo
+            centos_ver=$(yum version nogroups |grep Installed | cut -d' ' -f 2 | cut -d'.' -f1-2 | tr '-' '.')
+            yum install -y --enablerepo=base --releasever=$centos_ver conntrack-tools
+            # Backup conntrackd.conf after install conntrack-tools, use the one with statistic mode
+            mv /etc/conntrackd/conntrackd.conf /etc/conntrackd/conntrackd.conf.back
+            conntrack_conf=$(find /usr/share/doc -name conntrackd.conf |grep stats)
+            cp $conntrack_conf /etc/conntrackd/conntrackd.conf
+        fi
+        service conntrackd restart
+    fi
+}
diff --git a/unstack.sh b/unstack.sh
index ece69ac..c05d1f0 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -63,6 +63,7 @@
 source $TOP_DIR/lib/keystone
 source $TOP_DIR/lib/glance
 source $TOP_DIR/lib/nova
+source $TOP_DIR/lib/placement
 source $TOP_DIR/lib/cinder
 source $TOP_DIR/lib/swift
 source $TOP_DIR/lib/heat
@@ -96,11 +97,6 @@
 # Phase: unstack
 run_phase unstack
 
-if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then
-    source $TOP_DIR/openrc
-    teardown_neutron_debug
-fi
-
 # Call service stop
 
 if is_service_enabled heat; then
@@ -111,6 +107,10 @@
     stop_nova
 fi
 
+if is_service_enabled placement; then
+    stop_placement
+fi
+
 if is_service_enabled glance; then
     stop_glance
 fi
@@ -184,11 +184,13 @@
     fi
 fi
 
-# BUG: maybe it doesn't exist? We should isolate this further down.
 # NOTE: Cinder automatically installs the lvm2 package, independently of the
-# enabled backends. So if Cinder is enabled, we are sure lvm (lvremove,
-# /etc/lvm/lvm.conf, etc.) is here.
-if is_service_enabled cinder; then
+# enabled backends. So if Cinder is enabled, and installed successfully we are
+# sure lvm2 (lvremove, /etc/lvm/lvm.conf, etc.) is here.
+if is_service_enabled cinder && is_package_installed lvm2; then
+    # Using /bin/true here indicates a BUG - maybe the
+    # DEFAULT_VOLUME_GROUP_NAME doesn't exist?  We should
+    # isolate this further down in lib/cinder cleanup.
     clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME || /bin/true
     clean_lvm_filter
 fi