Merge "Fix provider networking error message"
diff --git a/.gitignore b/.gitignore
index a470ff5..d1781bc 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,6 +15,8 @@
 files/*.vmdk
 files/*.rpm
 files/*.rpm.*
+files/*.deb
+files/*.deb.*
 files/*.qcow2
 files/*.img
 files/images
diff --git a/README.md b/README.md
index 4ba4619..ff5598b 100644
--- a/README.md
+++ b/README.md
@@ -25,9 +25,9 @@
 The DevStack master branch generally points to trunk versions of OpenStack
 components.  For older, stable versions, look for branches named
 stable/[release] in the DevStack repo.  For example, you can do the
-following to create a juno OpenStack cloud:
+following to create a Newton OpenStack cloud:
 
-    git checkout stable/juno
+    git checkout stable/newton
     ./stack.sh
 
 You can also pick specific OpenStack project releases by setting the appropriate
diff --git a/clean.sh b/clean.sh
index bace3f5..e369eda 100755
--- a/clean.sh
+++ b/clean.sh
@@ -49,7 +49,6 @@
 source $TOP_DIR/lib/placement
 source $TOP_DIR/lib/cinder
 source $TOP_DIR/lib/swift
-source $TOP_DIR/lib/heat
 source $TOP_DIR/lib/neutron
 source $TOP_DIR/lib/neutron-legacy
 
@@ -108,7 +107,7 @@
 fi
 
 # Clean out /etc
-sudo rm -rf /etc/keystone /etc/glance /etc/nova /etc/cinder /etc/swift /etc/heat /etc/neutron /etc/openstack/
+sudo rm -rf /etc/keystone /etc/glance /etc/nova /etc/cinder /etc/swift /etc/neutron /etc/openstack/
 
 # Clean out tgt
 sudo rm -f /etc/tgt/conf.d/*
@@ -147,3 +146,8 @@
 done
 
 rm -rf ~/.config/openstack
+
+# Clean up all *.pyc files
+if [[ -n "$DEST" ]] && [[ -d "$DEST" ]]; then
+    sudo find $DEST -name "*.pyc" -print0 | xargs -0 rm
+fi
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 1161b34..53ae82f 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -63,7 +63,7 @@
 ::
 
     [[local|localrc]]
-    FIXED_RANGE=10.254.1.0/24
+    IPV4_ADDRS_SAFE_TO_USE=10.254.1.0/24
     ADMIN_PASSWORD=speciale
     LOGFILE=$DEST/logs/stack.sh.log
 
@@ -161,8 +161,8 @@
 
 -  no logging
 -  pre-set the passwords to prevent interactive prompts
--  move network ranges away from the local network (``FIXED_RANGE`` and
-   ``FLOATING_RANGE``, commented out below)
+-  move network ranges away from the local network (``IPV4_ADDRS_SAFE_TO_USE``
+   and ``FLOATING_RANGE``, commented out below)
 -  set the host IP if detection is unreliable (``HOST_IP``, commented
    out below)
 
@@ -173,7 +173,7 @@
     DATABASE_PASSWORD=$ADMIN_PASSWORD
     RABBIT_PASSWORD=$ADMIN_PASSWORD
     SERVICE_PASSWORD=$ADMIN_PASSWORD
-    #FIXED_RANGE=172.31.1.0/24
+    #IPV4_ADDRS_SAFE_TO_USE=172.31.1.0/24
     #FLOATING_RANGE=192.168.20.0/25
     #HOST_IP=10.3.4.5
 
@@ -521,16 +521,14 @@
 IP Version
 ----------
 
-``IP_VERSION`` can be used to configure DevStack to create either an
-IPv4, IPv6, or dual-stack self service project data-network by with
+``IP_VERSION`` can be used to configure Neutron to create either an
+IPv4, IPv6, or dual-stack self-service project data-network by with
 either ``IP_VERSION=4``, ``IP_VERSION=6``, or ``IP_VERSION=4+6``
-respectively.  This functionality requires that the Neutron networking
-service is enabled by setting the following options:
+respectively.
 
     ::
 
-        disable_service n-net
-        enable_service q-svc q-agt q-dhcp q-l3
+        IP_VERSION=4+6
 
 The following optional variables can be used to alter the default IPv6
 behavior:
@@ -539,12 +537,12 @@
 
         IPV6_RA_MODE=slaac
         IPV6_ADDRESS_MODE=slaac
-        FIXED_RANGE_V6=fd$IPV6_GLOBAL_ID::/64
+        IPV6_ADDRS_SAFE_TO_USE=fd$IPV6_GLOBAL_ID::/56
         IPV6_PRIVATE_NETWORK_GATEWAY=fd$IPV6_GLOBAL_ID::1
 
-*Note*: ``FIXED_RANGE_V6`` and ``IPV6_PRIVATE_NETWORK_GATEWAY`` can be
-configured with any valid IPv6 prefix. The default values make use of
-an auto-generated ``IPV6_GLOBAL_ID`` to comply with RFC4193.
+*Note*: ``IPV6_ADDRS_SAFE_TO_USE`` and ``IPV6_PRIVATE_NETWORK_GATEWAY``
+can be configured with any valid IPv6 prefix. The default values make
+use of an auto-generated ``IPV6_GLOBAL_ID`` to comply with RFC4193.
 
 Service Version
 ~~~~~~~~~~~~~~~
@@ -705,13 +703,13 @@
 ~~~~~~
 
 The logical volume group used to hold the Cinder-managed volumes is
-set by ``VOLUME_GROUP``, the logical volume name prefix is set with
+set by ``VOLUME_GROUP_NAME``, the logical volume name prefix is set with
 ``VOLUME_NAME_PREFIX`` and the size of the volume backing file is set
 with ``VOLUME_BACKING_FILE_SIZE``.
 
     ::
 
-        VOLUME_GROUP="stack-volumes"
+        VOLUME_GROUP_NAME="stack-volumes"
         VOLUME_NAME_PREFIX="volume-"
         VOLUME_BACKING_FILE_SIZE=10250M
 
diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst
index 0c439ad..21bea99 100644
--- a/doc/source/guides/devstack-with-lbaas-v2.rst
+++ b/doc/source/guides/devstack-with-lbaas-v2.rst
@@ -66,21 +66,21 @@
     ./stack.sh
     . ./openrc
 
-    neutron net-list  # should show public and private networks
+    openstack network list  # should show public and private networks
 
 Create two nova instances that we can use as test http servers:
 
   ::
 
     #create nova instances on private network
-    nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(neutron net-list | awk '/ private / {print $2}') node1
-    nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(neutron net-list | awk '/ private / {print $2}') node2
+    nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1
+    nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2
     nova list # should show the nova instances just created
 
     #add secgroup rules to allow ssh etc..
-    neutron security-group-rule-create default --protocol icmp
-    neutron security-group-rule-create default --protocol tcp --port-range-min 22 --port-range-max 22
-    neutron security-group-rule-create default --protocol tcp --port-range-min 80 --port-range-max 80
+    openstack security group rule create default --protocol icmp
+    openstack security group rule create default --protocol tcp --dst-port 22:22
+    openstack security group rule create default --protocol tcp --dst-port 80:80
 
 Set up a simple web server on each of these instances. ssh into each instance (username 'cirros', password 'cubswin:)') and run
 
diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst
index c996f95..dfc9936 100644
--- a/doc/source/guides/multinode-lab.rst
+++ b/doc/source/guides/multinode-lab.rst
@@ -260,7 +260,7 @@
     openstack user create $NAME --password=$PASSWORD --project $PROJECT
     openstack role add Member --user $NAME --project $PROJECT
     # The Member role is created by stack.sh
-    # openstack role list
+    # openstack role assignment list
 
 Swift
 -----
@@ -294,10 +294,10 @@
 
 ``stack-volumes`` can be pre-created on any physical volume supported by
 Linux's LVM. The name of the volume group can be changed by setting
-``VOLUME_GROUP`` in ``localrc``. ``stack.sh`` deletes all logical
-volumes in ``VOLUME_GROUP`` that begin with ``VOLUME_NAME_PREFIX`` as
+``VOLUME_GROUP_NAME`` in ``localrc``. ``stack.sh`` deletes all logical
+volumes in ``VOLUME_GROUP_NAME`` that begin with ``VOLUME_NAME_PREFIX`` as
 part of cleaning up from previous runs. It is recommended to not use the
-root volume group as ``VOLUME_GROUP``.
+root volume group as ``VOLUME_GROUP_NAME``.
 
 The details of creating the volume group depends on the server hardware
 involved but looks something like this:
@@ -400,6 +400,10 @@
 
         ssh-keyscan -H DEST_HOSTNAME | sudo tee -a /root/.ssh/known_hosts
 
+3. Verify that login via ssh works without a password::
+
+        ssh -i /root/.ssh/id_rsa.pub stack@DESTINATION
+
 In essence, this means that every compute node's root user's public RSA key
 must exist in every other compute node's stack user's authorized_keys file and
 every compute node's public ECDSA key needs to be in every other compute
diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst
index b26fd1e..092809a 100644
--- a/doc/source/guides/neutron.rst
+++ b/doc/source/guides/neutron.rst
@@ -79,7 +79,7 @@
         ## Neutron options
         Q_USE_SECGROUP=True
         FLOATING_RANGE="172.18.161.0/24"
-        FIXED_RANGE="10.0.0.0/24"
+        IPV4_ADDRS_SAFE_TO_USE="10.0.0.0/22"
         Q_FLOATING_ALLOCATION_POOL=start=172.18.161.250,end=172.18.161.254
         PUBLIC_NETWORK_GATEWAY="172.18.161.1"
         PUBLIC_INTERFACE=eth0
@@ -387,16 +387,17 @@
 
         ## Neutron Networking options used to create Neutron Subnets
 
-        FIXED_RANGE="203.0.113.0/24"
+        IPV4_ADDRS_SAFE_TO_USE="203.0.113.0/24"
         NETWORK_GATEWAY=203.0.113.1
         PROVIDER_SUBNET_NAME="provider_net"
         PROVIDER_NETWORK_TYPE="vlan"
         SEGMENTATION_ID=2010
+        USE_SUBNETPOOL=False
 
-In this configuration we are defining FIXED_RANGE to be a
+In this configuration we are defining IPV4_ADDRS_SAFE_TO_USE to be a
 publicly routed IPv4 subnet. In this specific instance we are using
 the special TEST-NET-3 subnet defined in `RFC 5737 <http://tools.ietf.org/html/rfc5737>`_,
-which is used for documentation.  In your DevStack setup, FIXED_RANGE
+which is used for documentation.  In your DevStack setup, IPV4_ADDRS_SAFE_TO_USE
 would be a public IP address range that you or your organization has
 allocated to you, so that you could access your instances from the
 public internet.
@@ -523,7 +524,7 @@
     ## Neutron options
     Q_USE_SECGROUP=True
     FLOATING_RANGE="172.18.161.0/24"
-    FIXED_RANGE="10.0.0.0/24"
+    IPV4_ADDRS_SAFE_TO_USE="10.0.0.0/24"
     Q_FLOATING_ALLOCATION_POOL=start=172.18.161.250,end=172.18.161.254
     PUBLIC_NETWORK_GATEWAY="172.18.161.1"
     PUBLIC_INTERFACE=eth0
@@ -572,11 +573,12 @@
     Q_AGENT=macvtap
     PHYSICAL_NETWORK=default
 
-    FIXED_RANGE="203.0.113.0/24"
+    IPV4_ADDRS_SAFE_TO_USE="203.0.113.0/24"
     NETWORK_GATEWAY=203.0.113.1
     PROVIDER_SUBNET_NAME="provider_net"
     PROVIDER_NETWORK_TYPE="vlan"
     SEGMENTATION_ID=2010
+    USE_SUBNETPOOL=False
 
     [[post-config|/$Q_PLUGIN_CONF_FILE]]
     [macvtap]
@@ -595,7 +597,7 @@
 
 For OVS, a similar configuration like described in the
 :ref:`OVS Provider Network <ovs-provider-network-controller>` section can be
-used. Just add the the following line to this local.conf, which also loads
+used. Just add the following line to this local.conf, which also loads
 the MacVTap mechanism driver:
 
 ::
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 435011b..b8dd506 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -45,31 +45,6 @@
 If you do not have a preference, Ubuntu 16.04 is the most tested, and
 will probably go the smoothest.
 
-Download DevStack
------------------
-
-::
-
-   git clone https://git.openstack.org/openstack-dev/devstack
-
-The ``devstack`` repo contains a script that installs OpenStack and
-templates for configuration files
-
-Create a local.conf
--------------------
-
-Create a ``local.conf`` file with 4 passwords preset
-
-::
-
-   [[local|localrc]]
-   ADMIN_PASSWORD=secret
-   DATABASE_PASSWORD=$ADMIN_PASSWORD
-   RABBIT_PASSWORD=$ADMIN_PASSWORD
-   SERVICE_PASSWORD=$ADMIN_PASSWORD
-
-This is the minimum required config to get started with DevStack.
-
 Add Stack User
 --------------
 
@@ -81,14 +56,48 @@
 
 ::
 
-   devstack/tools/create-stack-user.sh; su stack
+   $ adduser stack
+
+Since this user will be making many changes to your system, it should
+have sudo privileges:
+
+::
+
+    $ echo "stack ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
+    $ su stack
+
+Download DevStack
+-----------------
+
+::
+
+   $ git clone https://git.openstack.org/openstack-dev/devstack
+   $ cd devstack
+
+The ``devstack`` repo contains a script that installs OpenStack and
+templates for configuration files
+
+Create a local.conf
+-------------------
+
+Create a ``local.conf`` file with 4 passwords preset at the root of the
+devstack git repo.
+::
+
+   [[local|localrc]]
+   ADMIN_PASSWORD=secret
+   DATABASE_PASSWORD=$ADMIN_PASSWORD
+   RABBIT_PASSWORD=$ADMIN_PASSWORD
+   SERVICE_PASSWORD=$ADMIN_PASSWORD
+
+This is the minimum required config to get started with DevStack.
 
 Start the install
 -----------------
 
 ::
 
-   cd devstack; ./stack.sh
+   ./stack.sh
 
 This will take a 15 - 20 minutes, largely depending on the speed of
 your internet connection. Many git trees and packages will be
diff --git a/doc/source/networking.rst b/doc/source/networking.rst
index 1d56c33..bdbeaaa 100644
--- a/doc/source/networking.rst
+++ b/doc/source/networking.rst
@@ -4,7 +4,7 @@
 
 An important part of the DevStack experience is networking that works
 by default for created guests. This might not be optimal for your
-particular testing environment, so this document tries it's best to
+particular testing environment, so this document tries its best to
 explain what's going on.
 
 Defaults
@@ -15,10 +15,11 @@
 * neutron (including l3 with openvswitch)
 * private project networks for each openstack project
 * a floating ip range of 172.24.4.0/24 with the gateway of 172.24.4.1
-* the demo project configured with fixed ips on 10.0.0.0/24
-* a ``br-ex`` interface controlled by neutron for all it's networking
+* the demo project configured with fixed ips on a subnet allocated from
+  the 10.0.0.0/22 range
+* a ``br-ex`` interface controlled by neutron for all its networking
   (this is not connected to any physical interfaces).
-* DNS resolution for guests based on the resolv.conf for you host
+* DNS resolution for guests based on the resolv.conf for your host
 * an ip masq rule that allows created guests to route out
 
 This creates an environment which is isolated to the single
@@ -39,7 +40,7 @@
 Locally Accessible Guests
 =========================
 
-If you want to make you guests accessible other machines on your
+If you want to make you guests accessible from other machines on your
 network, we have to connect ``br-ex`` to a physical interface.
 
 Dedicated Guest Interface
@@ -95,3 +96,21 @@
 your existing network, you'll want to give it a slice that your local
 dhcp server is not allocating. Otherwise you could easily have
 conflicting ip addresses, and cause havoc with your local network.
+
+
+Private Network Addressing
+==========================
+
+The private networks addresses are controlled by the ``IPV4_ADDRS_SAFE_TO_USE``
+and the ``IPV6_ADDRS_SAFE_TO_USE`` variables. This allows users to specify one
+single variable of safe internal IPs to use that will be referenced whether or
+not subnetpools are in use.
+
+For IPv4, ``FIXED_RANGE`` and ``SUBNETPOOL_PREFIX_V4`` will just default to
+the value of ``IPV4_ADDRS_SAFE_TO_USE`` directly.
+
+For IPv6, ``FIXED_RANGE_V6`` will default to the first /64 of the value of
+``IPV6_ADDRS_SAFE_TO_USE``. If ``IPV6_ADDRS_SAFE_TO_USE`` is /64 or smaller,
+``FIXED_RANGE_V6`` will just use the value of that directly.
+``SUBNETPOOL_PREFIX_V6`` will just default to the value of
+``IPV6_ADDRS_SAFE_TO_USE`` directly.
diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst
index 9d023bf..cb9c437 100644
--- a/doc/source/plugin-registry.rst
+++ b/doc/source/plugin-registry.rst
@@ -58,7 +58,9 @@
 freezer                                `git://git.openstack.org/openstack/freezer <https://git.openstack.org/cgit/openstack/freezer>`__
 freezer-api                            `git://git.openstack.org/openstack/freezer-api <https://git.openstack.org/cgit/openstack/freezer-api>`__
 freezer-web-ui                         `git://git.openstack.org/openstack/freezer-web-ui <https://git.openstack.org/cgit/openstack/freezer-web-ui>`__
+fuxi                                   `git://git.openstack.org/openstack/fuxi <https://git.openstack.org/cgit/openstack/fuxi>`__
 gce-api                                `git://git.openstack.org/openstack/gce-api <https://git.openstack.org/cgit/openstack/gce-api>`__
+glare                                  `git://git.openstack.org/openstack/glare <https://git.openstack.org/cgit/openstack/glare>`__
 gnocchi                                `git://git.openstack.org/openstack/gnocchi <https://git.openstack.org/cgit/openstack/gnocchi>`__
 group-based-policy                     `git://git.openstack.org/openstack/group-based-policy <https://git.openstack.org/cgit/openstack/group-based-policy>`__
 heat                                   `git://git.openstack.org/openstack/heat <https://git.openstack.org/cgit/openstack/heat>`__
@@ -68,13 +70,18 @@
 ironic-staging-drivers                 `git://git.openstack.org/openstack/ironic-staging-drivers <https://git.openstack.org/cgit/openstack/ironic-staging-drivers>`__
 karbor                                 `git://git.openstack.org/openstack/karbor <https://git.openstack.org/cgit/openstack/karbor>`__
 karbor-dashboard                       `git://git.openstack.org/openstack/karbor-dashboard <https://git.openstack.org/cgit/openstack/karbor-dashboard>`__
+keystone                               `git://git.openstack.org/openstack/keystone <https://git.openstack.org/cgit/openstack/keystone>`__
 kingbird                               `git://git.openstack.org/openstack/kingbird <https://git.openstack.org/cgit/openstack/kingbird>`__
+kuryr-kubernetes                       `git://git.openstack.org/openstack/kuryr-kubernetes <https://git.openstack.org/cgit/openstack/kuryr-kubernetes>`__
 kuryr-libnetwork                       `git://git.openstack.org/openstack/kuryr-libnetwork <https://git.openstack.org/cgit/openstack/kuryr-libnetwork>`__
 magnum                                 `git://git.openstack.org/openstack/magnum <https://git.openstack.org/cgit/openstack/magnum>`__
 magnum-ui                              `git://git.openstack.org/openstack/magnum-ui <https://git.openstack.org/cgit/openstack/magnum-ui>`__
 manila                                 `git://git.openstack.org/openstack/manila <https://git.openstack.org/cgit/openstack/manila>`__
+manila-ui                              `git://git.openstack.org/openstack/manila-ui <https://git.openstack.org/cgit/openstack/manila-ui>`__
 masakari                               `git://git.openstack.org/openstack/masakari <https://git.openstack.org/cgit/openstack/masakari>`__
+meteos                                 `git://git.openstack.org/openstack/meteos <https://git.openstack.org/cgit/openstack/meteos>`__
 mistral                                `git://git.openstack.org/openstack/mistral <https://git.openstack.org/cgit/openstack/mistral>`__
+mixmatch                               `git://git.openstack.org/openstack/mixmatch <https://git.openstack.org/cgit/openstack/mixmatch>`__
 monasca-analytics                      `git://git.openstack.org/openstack/monasca-analytics <https://git.openstack.org/cgit/openstack/monasca-analytics>`__
 monasca-api                            `git://git.openstack.org/openstack/monasca-api <https://git.openstack.org/cgit/openstack/monasca-api>`__
 monasca-ceilometer                     `git://git.openstack.org/openstack/monasca-ceilometer <https://git.openstack.org/cgit/openstack/monasca-ceilometer>`__
@@ -82,11 +89,14 @@
 monasca-transform                      `git://git.openstack.org/openstack/monasca-transform <https://git.openstack.org/cgit/openstack/monasca-transform>`__
 murano                                 `git://git.openstack.org/openstack/murano <https://git.openstack.org/cgit/openstack/murano>`__
 networking-6wind                       `git://git.openstack.org/openstack/networking-6wind <https://git.openstack.org/cgit/openstack/networking-6wind>`__
+networking-arista                      `git://git.openstack.org/openstack/networking-arista <https://git.openstack.org/cgit/openstack/networking-arista>`__
 networking-bagpipe                     `git://git.openstack.org/openstack/networking-bagpipe <https://git.openstack.org/cgit/openstack/networking-bagpipe>`__
 networking-bgpvpn                      `git://git.openstack.org/openstack/networking-bgpvpn <https://git.openstack.org/cgit/openstack/networking-bgpvpn>`__
 networking-brocade                     `git://git.openstack.org/openstack/networking-brocade <https://git.openstack.org/cgit/openstack/networking-brocade>`__
 networking-calico                      `git://git.openstack.org/openstack/networking-calico <https://git.openstack.org/cgit/openstack/networking-calico>`__
 networking-cisco                       `git://git.openstack.org/openstack/networking-cisco <https://git.openstack.org/cgit/openstack/networking-cisco>`__
+networking-cumulus                     `git://git.openstack.org/openstack/networking-cumulus <https://git.openstack.org/cgit/openstack/networking-cumulus>`__
+networking-dpm                         `git://git.openstack.org/openstack/networking-dpm <https://git.openstack.org/cgit/openstack/networking-dpm>`__
 networking-fortinet                    `git://git.openstack.org/openstack/networking-fortinet <https://git.openstack.org/cgit/openstack/networking-fortinet>`__
 networking-generic-switch              `git://git.openstack.org/openstack/networking-generic-switch <https://git.openstack.org/cgit/openstack/networking-generic-switch>`__
 networking-huawei                      `git://git.openstack.org/openstack/networking-huawei <https://git.openstack.org/cgit/openstack/networking-huawei>`__
@@ -111,14 +121,17 @@
 neutron-lbaas                          `git://git.openstack.org/openstack/neutron-lbaas <https://git.openstack.org/cgit/openstack/neutron-lbaas>`__
 neutron-lbaas-dashboard                `git://git.openstack.org/openstack/neutron-lbaas-dashboard <https://git.openstack.org/cgit/openstack/neutron-lbaas-dashboard>`__
 neutron-vpnaas                         `git://git.openstack.org/openstack/neutron-vpnaas <https://git.openstack.org/cgit/openstack/neutron-vpnaas>`__
+nimble                                 `git://git.openstack.org/openstack/nimble <https://git.openstack.org/cgit/openstack/nimble>`__
 nova-docker                            `git://git.openstack.org/openstack/nova-docker <https://git.openstack.org/cgit/openstack/nova-docker>`__
+nova-dpm                               `git://git.openstack.org/openstack/nova-dpm <https://git.openstack.org/cgit/openstack/nova-dpm>`__
 nova-lxd                               `git://git.openstack.org/openstack/nova-lxd <https://git.openstack.org/cgit/openstack/nova-lxd>`__
 nova-mksproxy                          `git://git.openstack.org/openstack/nova-mksproxy <https://git.openstack.org/cgit/openstack/nova-mksproxy>`__
 nova-powervm                           `git://git.openstack.org/openstack/nova-powervm <https://git.openstack.org/cgit/openstack/nova-powervm>`__
+oaktree                                `git://git.openstack.org/openstack/oaktree <https://git.openstack.org/cgit/openstack/oaktree>`__
 octavia                                `git://git.openstack.org/openstack/octavia <https://git.openstack.org/cgit/openstack/octavia>`__
 osprofiler                             `git://git.openstack.org/openstack/osprofiler <https://git.openstack.org/cgit/openstack/osprofiler>`__
 panko                                  `git://git.openstack.org/openstack/panko <https://git.openstack.org/cgit/openstack/panko>`__
-python-freezerclient                   `git://git.openstack.org/openstack/python-freezerclient <https://git.openstack.org/cgit/openstack/python-freezerclient>`__
+picasso                                `git://git.openstack.org/openstack/picasso <https://git.openstack.org/cgit/openstack/picasso>`__
 rally                                  `git://git.openstack.org/openstack/rally <https://git.openstack.org/cgit/openstack/rally>`__
 sahara                                 `git://git.openstack.org/openstack/sahara <https://git.openstack.org/cgit/openstack/sahara>`__
 sahara-dashboard                       `git://git.openstack.org/openstack/sahara-dashboard <https://git.openstack.org/cgit/openstack/sahara-dashboard>`__
diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst
index 31987bc..5b3c6cf 100644
--- a/doc/source/plugins.rst
+++ b/doc/source/plugins.rst
@@ -99,7 +99,7 @@
       should exist at this point.
    -  **extra** - Called near the end after layer 1 and 2 services have
       been started.
-   - **test-config** - Called at the end of devstack used to configure tempest
+   -  **test-config** - Called at the end of devstack used to configure tempest
       or any other test environments
 
 -  **unstack** - Called by ``unstack.sh`` before other services are shut
diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh
index dc6bbbb..e8c8f62 100755
--- a/exercises/neutron-adv-test.sh
+++ b/exercises/neutron-adv-test.sh
@@ -148,7 +148,7 @@
 function get_role_id {
     local ROLE_NAME=$1
     local ROLE_ID
-    ROLE_ID=`openstack role list | grep $ROLE_NAME | awk '{print $2}'`
+    ROLE_ID=`openstack role assignment list | grep $ROLE_NAME | awk '{print $2}'`
     die_if_not_set $LINENO ROLE_ID "Failure retrieving ROLE_ID for $ROLE_NAME"
     echo "$ROLE_ID"
 }
@@ -156,7 +156,7 @@
 function get_network_id {
     local NETWORK_NAME="$1"
     local NETWORK_ID
-    NETWORK_ID=`neutron net-list -F id  -- --name=$NETWORK_NAME | awk "NR==4" | awk '{print $2}'`
+    NETWORK_ID=`openstack network show -f value -c id $NETWORK_NAME`
     echo $NETWORK_ID
 }
 
@@ -234,9 +234,9 @@
     PROJECT_ID=$(get_project_id $PROJECT)
     source $TOP_DIR/openrc $PROJECT $PROJECT
     local NET_ID
-    NET_ID=$(neutron net-create --project-id $PROJECT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' )
+    NET_ID=$(openstack network create --project $PROJECT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' )
     die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PROJECT_ID $NET_NAME $EXTRA"
-    neutron subnet-create --ip-version 4 --project-id $PROJECT_ID --gateway $GATEWAY --subnetpool None $NET_ID $CIDR
+    openstack subnet create --ip-version 4 --project $PROJECT_ID --gateway $GATEWAY --subnet-pool None --network $NET_ID --subnet-range $CIDR "${NET_NAME}_subnet"
     neutron_debug_admin probe-create --device-owner compute $NET_ID
     source $TOP_DIR/openrc demo demo
 }
@@ -325,10 +325,10 @@
     PROJECT_ID=$(get_project_id $PROJECT)
     #TODO(nati) comment out until l3-agent merged
     #for res in port subnet net router;do
-    for net_id in `neutron net-list -c id -c name | grep $NET_NAME | awk '{print $2}'`;do
+    for net_id in `openstack network list -c ID -c Name | grep $NET_NAME | awk '{print $2}'`;do
         delete_probe $net_id
-        neutron subnet-list | grep $net_id | awk '{print $2}' | xargs -I% neutron subnet-delete %
-        neutron net-delete $net_id
+        openstack subnet list | grep $net_id | awk '{print $2}' | xargs -I% openstack subnet delete %
+        openstack network delete $net_id
     done
     source $TOP_DIR/openrc demo demo
 }
diff --git a/extras.d/80-tempest.sh b/extras.d/80-tempest.sh
index 6a3d121..15ecfe3 100644
--- a/extras.d/80-tempest.sh
+++ b/extras.d/80-tempest.sh
@@ -11,13 +11,16 @@
         # Tempest config must come after layer 2 services are running
         :
     elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
+        # Tempest config must come after all other plugins are run
+        :
+    elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then
+        # local.conf Tempest option overrides
+        :
+    elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then
         echo_summary "Initializing Tempest"
         configure_tempest
         echo_summary "Installing Tempest Plugins"
         install_tempest_plugins
-    elif [[ "$1" == "stack" && "$2" == "post-extra" ]]; then
-        # local.conf Tempest option overrides
-        :
     fi
 
     if [[ "$1" == "unstack" ]]; then
diff --git a/files/apache-heat-api-cfn.template b/files/apache-heat-api-cfn.template
deleted file mode 100644
index ab33c66..0000000
--- a/files/apache-heat-api-cfn.template
+++ /dev/null
@@ -1,27 +0,0 @@
-Listen %PUBLICPORT%
-
-<VirtualHost *:%PUBLICPORT%>
-    WSGIDaemonProcess heat-api-cfn processes=2 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
-    WSGIProcessGroup heat-api-cfn
-    WSGIScriptAlias / %HEAT_BIN_DIR%/heat-wsgi-api-cfn
-    WSGIApplicationGroup %{GLOBAL}
-    WSGIPassAuthorization On
-    AllowEncodedSlashes On
-    <IfVersion >= 2.4>
-      ErrorLogFormat "%{cu}t %M"
-    </IfVersion>
-    ErrorLog /var/log/%APACHE_NAME%/heat-api-cfn.log
-    %SSLENGINE%
-    %SSLCERTFILE%
-    %SSLKEYFILE%
-
-    <Directory %HEAT_BIN_DIR%>
-        <IfVersion >= 2.4>
-            Require all granted
-        </IfVersion>
-        <IfVersion < 2.4>
-            Order allow,deny
-            Allow from all
-        </IfVersion>
-    </Directory>
-</VirtualHost>
diff --git a/files/apache-heat-api-cloudwatch.template b/files/apache-heat-api-cloudwatch.template
deleted file mode 100644
index 06c91bb..0000000
--- a/files/apache-heat-api-cloudwatch.template
+++ /dev/null
@@ -1,27 +0,0 @@
-Listen %PUBLICPORT%
-
-<VirtualHost *:%PUBLICPORT%>
-    WSGIDaemonProcess heat-api-cloudwatch processes=2 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
-    WSGIProcessGroup heat-api-cloudwatch
-    WSGIScriptAlias / %HEAT_BIN_DIR%/heat-wsgi-api-cloudwatch
-    WSGIApplicationGroup %{GLOBAL}
-    WSGIPassAuthorization On
-    AllowEncodedSlashes On
-    <IfVersion >= 2.4>
-      ErrorLogFormat "%{cu}t %M"
-    </IfVersion>
-    ErrorLog /var/log/%APACHE_NAME%/heat-api-cloudwatch.log
-    %SSLENGINE%
-    %SSLCERTFILE%
-    %SSLKEYFILE%
-
-    <Directory %HEAT_BIN_DIR%>
-        <IfVersion >= 2.4>
-            Require all granted
-        </IfVersion>
-        <IfVersion < 2.4>
-            Order allow,deny
-            Allow from all
-        </IfVersion>
-    </Directory>
-</VirtualHost>
diff --git a/files/apache-heat-api.template b/files/apache-heat-api.template
deleted file mode 100644
index 4924b39..0000000
--- a/files/apache-heat-api.template
+++ /dev/null
@@ -1,27 +0,0 @@
-Listen %PUBLICPORT%
-
-<VirtualHost *:%PUBLICPORT%>
-    WSGIDaemonProcess heat-api processes=3 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
-    WSGIProcessGroup heat-api
-    WSGIScriptAlias / %HEAT_BIN_DIR%/heat-wsgi-api
-    WSGIApplicationGroup %{GLOBAL}
-    WSGIPassAuthorization On
-    AllowEncodedSlashes On
-    <IfVersion >= 2.4>
-      ErrorLogFormat "%{cu}t %M"
-    </IfVersion>
-    ErrorLog /var/log/%APACHE_NAME%/heat-api.log
-    %SSLENGINE%
-    %SSLCERTFILE%
-    %SSLKEYFILE%
-
-    <Directory %HEAT_BIN_DIR%>
-        <IfVersion >= 2.4>
-            Require all granted
-        </IfVersion>
-        <IfVersion < 2.4>
-            Order allow,deny
-            Allow from all
-        </IfVersion>
-    </Directory>
-</VirtualHost>
diff --git a/files/apache-heat-pip-repo.template b/files/apache-heat-pip-repo.template
deleted file mode 100644
index d88ac3e..0000000
--- a/files/apache-heat-pip-repo.template
+++ /dev/null
@@ -1,15 +0,0 @@
-Listen %HEAT_PIP_REPO_PORT%
-
-<VirtualHost *:%HEAT_PIP_REPO_PORT%>
-    DocumentRoot %HEAT_PIP_REPO%
-    <Directory %HEAT_PIP_REPO%>
-        DirectoryIndex index.html
-        Require all granted
-        Order allow,deny
-        allow from all
-    </Directory>
-
-    ErrorLog /var/log/%APACHE_NAME%/heat_pip_repo_error.log
-    LogLevel warn
-    CustomLog /var/log/%APACHE_NAME%/heat_pip_repo_access.log combined
-</VirtualHost>
diff --git a/files/apache-keystone.template b/files/apache-keystone.template
index 428544f..84dc273 100644
--- a/files/apache-keystone.template
+++ b/files/apache-keystone.template
@@ -34,6 +34,12 @@
     %SSLKEYFILE%
 </VirtualHost>
 
+%SSLLISTEN%<VirtualHost *:443>
+%SSLLISTEN%    %SSLENGINE%
+%SSLLISTEN%    %SSLCERTFILE%
+%SSLLISTEN%    %SSLKEYFILE%
+%SSLLISTEN%</VirtualHost>
+
 Alias /identity %KEYSTONE_BIN%/keystone-wsgi-public
 <Location /identity>
     SetHandler wsgi-script
diff --git a/files/apache-placement-api.template b/files/apache-placement-api.template
index b89ef96..011abb9 100644
--- a/files/apache-placement-api.template
+++ b/files/apache-placement-api.template
@@ -1,6 +1,8 @@
-Listen %PUBLICPORT%
-
-<VirtualHost *:%PUBLICPORT%>
+# NOTE(sbauza): This virtualhost is only here because some directives can
+# only be set by a virtualhost or server context, so that's why the port is not bound.
+# TODO(sbauza): Find a better way to identify a free port that is not corresponding to an existing
+# vhost.
+<VirtualHost *:8780>
     WSGIDaemonProcess placement-api processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV%
     WSGIProcessGroup placement-api
     WSGIScriptAlias / %PUBLICWSGI%
diff --git a/files/debs/general b/files/debs/general
index a1f2a4b..c121770 100644
--- a/files/debs/general
+++ b/files/debs/general
@@ -2,6 +2,7 @@
 bridge-utils
 bsdmainutils
 curl
+default-jre-headless  # NOPRIME
 g++
 gcc
 gettext  # used for compiling message catalogs
@@ -17,7 +18,6 @@
 libxslt1-dev  # lxml
 libyaml-dev
 lsof # useful when debugging
-openjdk-7-jre-headless  # NOPRIME
 openssh-server
 openssl
 pkg-config
diff --git a/files/debs/heat b/files/debs/heat
deleted file mode 100644
index 1ecbc78..0000000
--- a/files/debs/heat
+++ /dev/null
@@ -1 +0,0 @@
-gettext # dist:trusty
diff --git a/files/rpms-suse/general b/files/rpms-suse/general
index 3b19071..1044c25 100644
--- a/files/rpms-suse/general
+++ b/files/rpms-suse/general
@@ -21,6 +21,7 @@
 psmisc
 python-cmd2 # dist:opensuse-12.3
 python-devel  # pyOpenSSL
+python-xml
 screen
 tar
 tcpdump
diff --git a/files/rpms/general b/files/rpms/general
index d0ceb56..77d2fa5 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -7,9 +7,9 @@
 gettext  # used for compiling message catalogs
 git-core
 graphviz # needed only for docs
-iptables-services  # NOPRIME f23,f24
+iptables-services  # NOPRIME f23,f24,f25
 java-1.7.0-openjdk-headless  # NOPRIME rhel7
-java-1.8.0-openjdk-headless  # NOPRIME f23,f24
+java-1.8.0-openjdk-headless  # NOPRIME f23,f24,f25
 libffi-devel
 libjpeg-turbo-devel # Pillow 3.0.0
 libxml2-devel # lxml
diff --git a/files/rpms/nova b/files/rpms/nova
index a883ec4..45f1c94 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -7,7 +7,7 @@
 genisoimage # required for config_drive
 iptables
 iputils
-kernel-modules # dist:f23,f24
+kernel-modules # dist:f23,f24,f25
 kpartx
 kvm # NOPRIME
 libvirt-bin # NOPRIME
diff --git a/files/rpms/swift b/files/rpms/swift
index bd249ee..2f12df0 100644
--- a/files/rpms/swift
+++ b/files/rpms/swift
@@ -2,7 +2,7 @@
 liberasurecode-devel
 memcached
 pyxattr
-rsync-daemon # dist:f23,f24
+rsync-daemon # dist:f23,f24,f25
 sqlite
 xfsprogs
 xinetd
diff --git a/files/swift/rsyncd.conf b/files/swift/rsyncd.conf
index c670531..c49f716 100644
--- a/files/swift/rsyncd.conf
+++ b/files/swift/rsyncd.conf
@@ -4,76 +4,76 @@
 pid file = %SWIFT_DATA_DIR%/run/rsyncd.pid
 address = 127.0.0.1
 
-[account6012]
+[account6612]
 max connections = 25
 path = %SWIFT_DATA_DIR%/1/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/account6012.lock
+lock file = %SWIFT_DATA_DIR%/run/account6612.lock
 
-[account6022]
+[account6622]
 max connections = 25
 path = %SWIFT_DATA_DIR%/2/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/account6022.lock
+lock file = %SWIFT_DATA_DIR%/run/account6622.lock
 
-[account6032]
+[account6632]
 max connections = 25
 path = %SWIFT_DATA_DIR%/3/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/account6032.lock
+lock file = %SWIFT_DATA_DIR%/run/account6632.lock
 
-[account6042]
+[account6642]
 max connections = 25
 path = %SWIFT_DATA_DIR%/4/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/account6042.lock
+lock file = %SWIFT_DATA_DIR%/run/account6642.lock
 
 
-[container6011]
+[container6611]
 max connections = 25
 path = %SWIFT_DATA_DIR%/1/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/container6011.lock
+lock file = %SWIFT_DATA_DIR%/run/container6611.lock
 
-[container6021]
+[container6621]
 max connections = 25
 path = %SWIFT_DATA_DIR%/2/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/container6021.lock
+lock file = %SWIFT_DATA_DIR%/run/container6621.lock
 
-[container6031]
+[container6631]
 max connections = 25
 path = %SWIFT_DATA_DIR%/3/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/container6031.lock
+lock file = %SWIFT_DATA_DIR%/run/container6631.lock
 
-[container6041]
+[container6641]
 max connections = 25
 path = %SWIFT_DATA_DIR%/4/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/container6041.lock
+lock file = %SWIFT_DATA_DIR%/run/container6641.lock
 
 
-[object6010]
+[object6613]
 max connections = 25
 path = %SWIFT_DATA_DIR%/1/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/object6010.lock
+lock file = %SWIFT_DATA_DIR%/run/object6613.lock
 
-[object6020]
+[object6623]
 max connections = 25
 path = %SWIFT_DATA_DIR%/2/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/object6020.lock
+lock file = %SWIFT_DATA_DIR%/run/object6623.lock
 
-[object6030]
+[object6633]
 max connections = 25
 path = %SWIFT_DATA_DIR%/3/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/object6030.lock
+lock file = %SWIFT_DATA_DIR%/run/object6633.lock
 
-[object6040]
+[object6643]
 max connections = 25
 path = %SWIFT_DATA_DIR%/4/node/
 read only = false
-lock file = %SWIFT_DATA_DIR%/run/object6040.lock
+lock file = %SWIFT_DATA_DIR%/run/object6643.lock
diff --git a/functions b/functions
index 5856578..6a0ac67 100644
--- a/functions
+++ b/functions
@@ -646,6 +646,24 @@
 }
 
 
+# enable_kernel_bridge_firewall - Enable kernel support for bridge firewalling
+function enable_kernel_bridge_firewall {
+    # Load bridge module. This module provides access to firewall for bridged
+    # frames; and also on older kernels (pre-3.18) it provides sysctl knobs to
+    # enable/disable bridge firewalling
+    sudo modprobe bridge
+    # For newer kernels (3.18+), those sysctl settings are split into a separate
+    # kernel module (br_netfilter). Load it too, if present.
+    sudo modprobe br_netfilter 2>> /dev/null || :
+    # Enable bridge firewalling in case it's disabled in kernel (upstream
+    # default is enabled, but some distributions may decide to change it).
+    # This is at least needed for RHEL 7.2 and earlier releases.
+    for proto in arp ip ip6; do
+        sudo sysctl -w net.bridge.bridge-nf-call-${proto}tables=1
+    done
+}
+
+
 # Restore xtrace
 $_XTRACE_FUNCTIONS
 
diff --git a/functions-common b/functions-common
index 4716567..8d03b88 100644
--- a/functions-common
+++ b/functions-common
@@ -216,7 +216,7 @@
 function deprecated {
     local text=$1
     DEPRECATED_TEXT+="\n$text"
-    echo "WARNING: $text"
+    echo "WARNING: $text" >&2
 }
 
 # Prints line number and "message" in error format
@@ -534,10 +534,8 @@
                 echo "the project to the \$PROJECTS variable in the job definition."
                 die $LINENO "Cloning not allowed in this configuration"
             fi
-            git_timed clone $git_clone_flags $git_remote $git_dest
-            cd $git_dest
-            # This checkout syntax works for both branches and tags
-            git checkout $git_ref
+            # '--branch' can also take tags
+            git_timed clone $git_clone_flags $git_remote $git_dest --branch $git_ref
         elif [[ "$RECLONE" = "True" ]]; then
             # if it does exist then simulate what clone does if asked to RECLONE
             cd $git_dest
@@ -865,11 +863,9 @@
     domain_args=$(_get_domain_args $4 $5)
 
     # Gets user role id
-    user_role_id=$(openstack role list \
+    user_role_id=$(openstack role assignment list \
         --user $2 \
-        --column "ID" \
         --project $3 \
-        --column "Name" \
         $domain_args \
         | grep " $1 " | get_field 1)
     if [[ -z "$user_role_id" ]]; then
@@ -878,11 +874,9 @@
             --user $2 \
             --project $3 \
             $domain_args
-        user_role_id=$(openstack role list \
+        user_role_id=$(openstack role assignment list \
             --user $2 \
-            --column "ID" \
             --project $3 \
-            --column "Name" \
             $domain_args \
             | grep " $1 " | get_field 1)
     fi
@@ -894,22 +888,18 @@
 function get_or_add_user_domain_role {
     local user_role_id
     # Gets user role id
-    user_role_id=$(openstack role list \
+    user_role_id=$(openstack role assignment list \
         --user $2 \
-        --column "ID" \
         --domain $3 \
-        --column "Name" \
         | grep " $1 " | get_field 1)
     if [[ -z "$user_role_id" ]]; then
         # Adds role to user and get it
         openstack role add $1 \
             --user $2 \
             --domain $3
-        user_role_id=$(openstack role list \
+        user_role_id=$(openstack role assignment list \
             --user $2 \
-            --column "ID" \
             --domain $3 \
-            --column "Name" \
             | grep " $1 " | get_field 1)
     fi
     echo $user_role_id
@@ -920,13 +910,11 @@
 function get_or_add_user_domain_role {
     local user_role_id
     # Gets user role id
-    user_role_id=$(openstack role list \
+    user_role_id=$(openstack role assignment list \
         --user $2 \
         --os-url=$KEYSTONE_SERVICE_URI_V3 \
         --os-identity-api-version=3 \
-        --column "ID" \
         --domain $3 \
-        --column "Name" \
         | grep " $1 " | get_field 1)
     if [[ -z "$user_role_id" ]]; then
         # Adds role to user and get it
@@ -935,13 +923,11 @@
             --domain $3 \
             --os-url=$KEYSTONE_SERVICE_URI_V3 \
             --os-identity-api-version=3
-        user_role_id=$(openstack role list \
+        user_role_id=$(openstack role assignment list \
             --user $2 \
             --os-url=$KEYSTONE_SERVICE_URI_V3 \
             --os-identity-api-version=3 \
-            --column "ID" \
             --domain $3 \
-            --column "Name" \
             | grep " $1 " | get_field 1)
     fi
     echo $user_role_id
@@ -952,19 +938,19 @@
 function get_or_add_group_project_role {
     local group_role_id
     # Gets group role id
-    group_role_id=$(openstack role list \
+    group_role_id=$(openstack role assignment list \
         --group $2 \
         --project $3 \
-        -c "ID" -f value)
+        -f value)
     if [[ -z "$group_role_id" ]]; then
         # Adds role to group and get it
         openstack role add $1 \
             --group $2 \
             --project $3
-        group_role_id=$(openstack role list \
+        group_role_id=$(openstack role assignment list \
             --group $2 \
             --project $3 \
-            -c "ID" -f value)
+            -f value)
     fi
     echo $group_role_id
 }
@@ -1330,7 +1316,7 @@
     elif is_fedora; then
         sudo ${YUM:-yum} remove -y "$@" ||:
     elif is_suse; then
-        sudo zypper rm "$@" ||:
+        sudo zypper remove -y "$@" ||:
     else
         exit_distro_not_supported "uninstalling packages"
     fi
@@ -1346,20 +1332,26 @@
 
     time_start "yum_install"
 
-    # - We run with LC_ALL=C so string matching *should* be OK
-    # - Exit 1 if the failure might get better with a retry.
-    # - Exit 2 if it is fatal.
-    parse_yum_result='             \
-        BEGIN { result=0 }         \
-        /^YUM_FAILED/ { exit $2 }  \
-        /^No package/ { result=2 } \
-        /^Failed:/    { result=2 } \
-        //{ print }                \
+    # This is a bit tricky, because yum -y assumes missing or failed
+    # packages are OK (see [1]).  We want devstack to stop if we are
+    # installing missing packages.
+    #
+    # Thus we manually match on the output (stack.sh runs in a fixed
+    # locale, so lang shouldn't change).
+    #
+    # If yum returns !0, we echo the result as "YUM_FAILED" and return
+    # that from the awk (we're subverting -e with this trick).
+    # Otherwise we use awk to look for failure strings and return "2"
+    # to indicate a terminal failure.
+    #
+    # [1] https://bugzilla.redhat.com/show_bug.cgi?id=965567
+    parse_yum_result='              \
+        BEGIN { result=0 }          \
+        /^YUM_FAILED/ { result=$2 } \
+        /^No package/ { result=2 }  \
+        /^Failed:/    { result=2 }  \
+        //{ print }                 \
         END { exit result }'
-
-    # The manual check for missing packages is because yum -y assumes
-    # missing or failed packages are OK.
-    # See https://bugzilla.redhat.com/show_bug.cgi?id=965567
     (sudo_with_proxies "${YUM:-yum}" install -y "$@" 2>&1 || echo YUM_FAILED $?) \
         | awk "$parse_yum_result" && result=$? || result=$?
 
@@ -1686,7 +1678,7 @@
     local logfile=$2
 
     if [[ "$USE_SCREEN" = "True" ]]; then
-        screen_process "$name" "sudo tail -f $logfile | sed 's/\\\\\\\\x1b/\o033/g'"
+        screen_process "$name" "sudo tail -f $logfile | sed -u 's/\\\\\\\\x1b/\o033/g'"
     fi
 }
 
@@ -1779,6 +1771,9 @@
     local name=$1
     local url=$2
     local branch=${3:-master}
+    if [[ ",${DEVSTACK_PLUGINS}," =~ ,${name}, ]]; then
+        die $LINENO "Plugin attempted to be enabled twice: ${name} ${url} ${branch}"
+    fi
     DEVSTACK_PLUGINS+=",$name"
     GITREPO[$name]=$url
     GITDIR[$name]=$DEST/$name
@@ -2207,6 +2202,18 @@
     echo ${1-0}.${2-0}.${3-0}.${4-0}
 }
 
+# Check if this is a valid ipv4 address string
+function is_ipv4_address {
+    local address=$1
+    local regex='([0-9]{1,3}.){3}[0-9]{1,3}'
+    # TODO(clarkb) make this more robust
+    if [[ "$address" =~ $regex ]] ; then
+        return 0
+    else
+        return 1
+    fi
+}
+
 # Gracefully cp only if source file/dir exists
 # cp_it source destination
 function cp_it {
@@ -2254,6 +2261,14 @@
     echo $subnet
 }
 
+function is_provider_network {
+    if [ "$Q_USE_PROVIDER_NETWORKING" == "True" ]; then
+        return 0
+    fi
+    return 1
+}
+
+
 # Return the current python as "python<major>.<minor>"
 function python_version {
     local python_version
@@ -2304,11 +2319,12 @@
     fi
 }
 
-# Service wrapper to stop services
+# Service wrapper to reload services
+# If the service was not in running state it will start it
 # reload_service service-name
 function reload_service {
     if [ -x /bin/systemctl ]; then
-        sudo /bin/systemctl reload $1
+        sudo /bin/systemctl reload-or-restart $1
     else
         sudo service $1 reload
     fi
diff --git a/inc/meta-config b/inc/meta-config
index 6eb7a00..6252135 100644
--- a/inc/meta-config
+++ b/inc/meta-config
@@ -40,12 +40,10 @@
     $CONFIG_AWK_CMD -v matchgroup=$matchgroup -v configfile=$configfile '
         BEGIN { group = "" }
         /^\[\[.+\|.*\]\]/ {
-            if (group == "") {
-                gsub("[][]", "", $1);
-                split($1, a, "|");
-                if (a[1] == matchgroup && a[2] == configfile) {
-                    group=a[1]
-                }
+            gsub("[][]", "", $1);
+            split($1, a, "|");
+            if (a[1] == matchgroup && a[2] == configfile) {
+                group=a[1]
             } else {
                 group=""
             }
diff --git a/inc/python b/inc/python
index e4cfab8..5a9a9ed 100644
--- a/inc/python
+++ b/inc/python
@@ -76,6 +76,27 @@
         | grep 'Language' | cut -f5 -d: | grep '\.' | tr '\n' ' '
 }
 
+# Check for python3 classifier in local directory
+function check_python3_support_for_package_local {
+    local name=$1
+    cd $name
+    set +e
+    classifier=$(python setup.py --classifiers \
+        | grep 'Programming Language :: Python :: 3$')
+    set -e
+    echo $classifier
+}
+
+# Check for python3 classifier on pypi
+function check_python3_support_for_package_remote {
+    local name=$1
+    set +e
+    classifier=$(curl -s -L "https://pypi.python.org/pypi/$name/json" \
+        | grep '"Programming Language :: Python :: 3"')
+    set -e
+    echo $classifier
+}
+
 # Wrapper for ``pip install`` to set cache and proxy environment variables
 # Uses globals ``OFFLINE``, ``PIP_VIRTUAL_ENV``,
 # ``PIP_UPGRADE``, ``TRACK_DEPENDS``, ``*_proxy``,
@@ -123,9 +144,41 @@
                 # default pip
                 local package_dir=${!#}
                 local python_versions
-                if [[ -d "$package_dir" ]]; then
+
+                # Special case some services that have experimental
+                # support for python3 in progress, but don't claim support
+                # in their classifier
+                echo "Check python version for : $package_dir"
+                if [[ ${package_dir##*/} == "nova" || ${package_dir##*/} == "glance" || \
+                        ${package_dir##*/} == "cinder" || ${package_dir##*/} == "swift" || \
+                        ${package_dir##*/} == "uwsgi" ]]; then
+                    echo "Using $PYTHON3_VERSION version to install $package_dir"
+                    sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8"
+                    cmd_pip=$(get_pip_command $PYTHON3_VERSION)
+                elif [[ -d "$package_dir" ]]; then
                     python_versions=$(get_python_versions_for_package $package_dir)
                     if [[ $python_versions =~ $PYTHON3_VERSION ]]; then
+                        echo "Using $PYTHON3_VERSION version to install $package_dir"
+                        sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8"
+                        cmd_pip=$(get_pip_command $PYTHON3_VERSION)
+                    else
+                        # The package may not have yet advertised python3.5
+                        # support so check for just python3 classifier and log
+                        # a warning.
+                        python3_classifier=$(check_python3_support_for_package_local $package_dir)
+                        if [[ ! -z "$python3_classifier" ]]; then
+                            echo "Using $PYTHON3_VERSION version to install $package_dir"
+                            sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8"
+                            cmd_pip=$(get_pip_command $PYTHON3_VERSION)
+                        fi
+                    fi
+                else
+                    # Check pypi as we don't have the package on disk
+                    package=$(echo $package_dir | grep -o '^[.a-zA-Z0-9_-]*')
+                    python3_classifier=$(check_python3_support_for_package_remote $package)
+                    if [[ ! -z "$python3_classifier" ]]; then
+                        echo "Using $PYTHON3_VERSION version to install $package"
+                        sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8"
                         cmd_pip=$(get_pip_command $PYTHON3_VERSION)
                     fi
                 fi
diff --git a/lib/apache b/lib/apache
index 740f588..d1a11ae 100644
--- a/lib/apache
+++ b/lib/apache
@@ -29,16 +29,22 @@
 
 
 # Set up apache name and configuration directory
+# Note that APACHE_CONF_DIR is really more accurately apache's vhost
+# configuration dir but we can't just change this because public interfaces.
 if is_ubuntu; then
     APACHE_NAME=apache2
     APACHE_CONF_DIR=${APACHE_CONF_DIR:-/etc/$APACHE_NAME/sites-available}
+    APACHE_SETTINGS_DIR=${APACHE_SETTINGS_DIR:-/etc/$APACHE_NAME/conf-enabled}
 elif is_fedora; then
     APACHE_NAME=httpd
     APACHE_CONF_DIR=${APACHE_CONF_DIR:-/etc/$APACHE_NAME/conf.d}
+    APACHE_SETTINGS_DIR=${APACHE_SETTINGS_DIR:-/etc/$APACHE_NAME/conf.d}
 elif is_suse; then
     APACHE_NAME=apache2
     APACHE_CONF_DIR=${APACHE_CONF_DIR:-/etc/$APACHE_NAME/vhosts.d}
+    APACHE_SETTINGS_DIR=${APACHE_SETTINGS_DIR:-/etc/$APACHE_NAME/conf.d}
 fi
+APACHE_LOG_DIR="/var/log/${APACHE_NAME}"
 
 # Functions
 # ---------
@@ -65,7 +71,15 @@
     # Apache installation, because we mark it NOPRIME
     if is_ubuntu; then
         # Install apache2, which is NOPRIME'd
-        install_package apache2 libapache2-mod-wsgi
+        install_package apache2
+        if python3_enabled; then
+            if is_package_installed libapache2-mod-wsgi; then
+                uninstall_package libapache2-mod-wsgi
+            fi
+            install_package libapache2-mod-wsgi-py3
+        else
+            install_package libapache2-mod-wsgi
+        fi
     elif is_fedora; then
         sudo rm -f /etc/httpd/conf.d/000-*
         install_package httpd mod_wsgi
diff --git a/lib/cinder b/lib/cinder
index 0fe950b..f6ad780 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -70,6 +70,9 @@
 # What type of LVM device should Cinder use for LVM backend
 # Defaults to default, which is thick, the other valid choice
 # is thin, which as the name implies utilizes lvm thin provisioning.
+# Thinly provisioned LVM volumes may be more efficient when using the Cinder
+# image cache, but there are also known race failures with volume snapshots
+# and thinly provisioned LVM volumes, see bug 1642111 for details.
 CINDER_LVM_TYPE=${CINDER_LVM_TYPE:-default}
 
 # Default backends
@@ -128,6 +131,17 @@
 CINDER_NOVA_CATALOG_INFO=${CINDER_NOVA_CATALOG_INFO:-compute:nova:publicURL}
 CINDER_NOVA_CATALOG_ADMIN_INFO=${CINDER_NOVA_CATALOG_ADMIN_INFO:-compute:nova:adminURL}
 
+# Environment variables to configure the image-volume cache
+CINDER_IMG_CACHE_ENABLED=${CINDER_IMG_CACHE_ENABLED:-True}
+
+# For limits, if left unset, it will use cinder defaults of 0 for unlimited
+CINDER_IMG_CACHE_SIZE_GB=${CINDER_IMG_CACHE_SIZE_GB:-}
+CINDER_IMG_CACHE_SIZE_COUNT=${CINDER_IMG_CACHE_SIZE_COUNT:-}
+
+# Configure which cinder backends will have the image-volume cache, this takes the same
+# form as the CINDER_ENABLED_BACKENDS config option. By default it will
+# enable the cache for all cinder backends.
+CINDER_CACHE_ENABLED_FOR_BACKENDS=${CINDER_CACHE_ENABLED_FOR_BACKENDS:-$CINDER_ENABLED_BACKENDS}
 
 # Functions
 # ---------
@@ -292,6 +306,7 @@
         if [[ -n "$default_name" ]]; then
             iniset $CINDER_CONF DEFAULT default_volume_type ${default_name}
         fi
+        configure_cinder_image_volume_cache
     fi
 
     if is_service_enabled swift; then
@@ -397,6 +412,8 @@
             "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s" \
             "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s" \
             "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s"
+
+        configure_cinder_internal_tenant
     fi
 }
 
@@ -408,11 +425,7 @@
 }
 
 # init_cinder() - Initialize database and volume group
-# Uses global ``NOVA_ENABLED_APIS``
 function init_cinder {
-    # Force nova volumes off
-    NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/osapi_volume,//")
-
     if is_service_enabled $DATABASE_BACKENDS; then
         # (Re)create cinder database
         recreate_database cinder
@@ -574,6 +587,31 @@
     :
 }
 
+function configure_cinder_internal_tenant {
+    # Re-use the Cinder service account for simplicity.
+    iniset $CINDER_CONF DEFAULT cinder_internal_tenant_project_id $(get_or_create_project $SERVICE_PROJECT_NAME)
+    iniset $CINDER_CONF DEFAULT cinder_internal_tenant_user_id $(get_or_create_user "cinder")
+}
+
+function configure_cinder_image_volume_cache {
+    # Expect CINDER_CACHE_ENABLED_FOR_BACKENDS to be a list of backends
+    # similar to CINDER_ENABLED_BACKENDS with NAME:TYPE where NAME will
+    # be the backend specific configuration stanza in cinder.conf.
+    for be in ${CINDER_CACHE_ENABLED_FOR_BACKENDS//,/ }; do
+        local be_name=${be##*:}
+
+        iniset $CINDER_CONF $be_name image_volume_cache_enabled $CINDER_IMG_CACHE_ENABLED
+
+        if [[ -n $CINDER_IMG_CACHE_SIZE_GB ]]; then
+            iniset $CINDER_CONF $be_name image_volume_cache_max_size_gb $CINDER_IMG_CACHE_SIZE_GB
+        fi
+
+        if [[ -n $CINDER_IMG_CACHE_SIZE_COUNT ]]; then
+            iniset $CINDER_CONF $be_name image_volume_cache_max_count $CINDER_IMG_CACHE_SIZE_COUNT
+        fi
+    done
+}
+
 
 # Restore xtrace
 $_XTRACE_CINDER
diff --git a/lib/cinder_backends/ceph b/lib/cinder_backends/ceph
index 9bff5be..00a0bb3 100644
--- a/lib/cinder_backends/ceph
+++ b/lib/cinder_backends/ceph
@@ -45,10 +45,10 @@
 
     iniset $CINDER_CONF $be_name volume_backend_name $be_name
     iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.rbd.RBDDriver"
-    iniset $CINDER_CONF $be_name rbd_ceph_conf "$CEPH_CONF"
+    iniset $CINDER_CONF $be_name rbd_ceph_conf "$CEPH_CONF_FILE"
     iniset $CINDER_CONF $be_name rbd_pool "$CINDER_CEPH_POOL"
     iniset $CINDER_CONF $be_name rbd_user "$CINDER_CEPH_USER"
-    iniset $CINDER_CONF $be_name rbd_uuid "$CINDER_CEPH_UUID"
+    iniset $CINDER_CONF $be_name rbd_secret_uuid "$CINDER_CEPH_UUID"
     iniset $CINDER_CONF $be_name rbd_flatten_volume_from_snapshot False
     iniset $CINDER_CONF $be_name rbd_max_clone_depth 5
     iniset $CINDER_CONF DEFAULT glance_api_version 2
@@ -66,7 +66,7 @@
         sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
 
         iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph"
-        iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF"
+        iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF_FILE"
         iniset $CINDER_CONF DEFAULT backup_ceph_pool "$CINDER_BAK_CEPH_POOL"
         iniset $CINDER_CONF DEFAULT backup_ceph_user "$CINDER_BAK_CEPH_USER"
         iniset $CINDER_CONF DEFAULT backup_ceph_stripe_unit 0
diff --git a/lib/databases/mysql b/lib/databases/mysql
index f6cc922..89ae082 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -82,10 +82,9 @@
     fi
 
     # Set the root password - only works the first time. For Ubuntu, we already
-    # did that with debconf before installing the package.
-    if ! is_ubuntu; then
-        sudo mysqladmin -u root password $DATABASE_PASSWORD || true
-    fi
+    # did that with debconf before installing the package, but we still try,
+    # because the package might have been installed already.
+    sudo mysqladmin -u root password $DATABASE_PASSWORD || true
 
     # Update the DB to give user '$DATABASE_USER'@'%' full control of the all databases:
     sudo mysql -uroot -p$DATABASE_PASSWORD -h127.0.0.1 -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';"
diff --git a/lib/databases/postgresql b/lib/databases/postgresql
index 14425a5..1f347f5 100644
--- a/lib/databases/postgresql
+++ b/lib/databases/postgresql
@@ -47,7 +47,7 @@
 }
 
 function configure_database_postgresql {
-    local pg_conf pg_dir pg_hba root_roles version
+    local pg_conf pg_dir pg_hba check_role version
     echo_summary "Configuring and starting PostgreSQL"
     if is_fedora; then
         pg_hba=/var/lib/pgsql/data/pg_hba.conf
@@ -85,8 +85,8 @@
     restart_service postgresql
 
     # Create the role if it's not here or else alter it.
-    root_roles=$(sudo -u root sudo -u postgres -i psql -t -c "SELECT 'HERE' from pg_roles where rolname='root'")
-    if [[ ${root_roles} == *HERE ]];then
+    check_role=$(sudo -u root sudo -u postgres -i psql -t -c "SELECT 'HERE' from pg_roles where rolname='$DATABASE_USER'")
+    if [[ ${check_role} == *HERE ]];then
         sudo -u root sudo -u postgres -i psql -c "ALTER ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'"
     else
         sudo -u root sudo -u postgres -i psql -c "CREATE ROLE $DATABASE_USER WITH SUPERUSER LOGIN PASSWORD '$DATABASE_PASSWORD'"
diff --git a/lib/glance b/lib/glance
index a31e564..da9cd43 100644
--- a/lib/glance
+++ b/lib/glance
@@ -187,8 +187,6 @@
 
         iniset $GLANCE_SWIFT_STORE_CONF ref1 key $SERVICE_PASSWORD
         iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $KEYSTONE_SERVICE_URI/v3
-        iniset $GLANCE_SWIFT_STORE_CONF ref1 user_domain_name $SERVICE_DOMAIN_NAME
-        iniset $GLANCE_SWIFT_STORE_CONF ref1 project_domain_name $SERVICE_DOMAIN_NAME
         iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_version 3
 
         # commenting is not strictly necessary but it's confusing to have bad values in conf
@@ -237,7 +235,7 @@
     iniset $GLANCE_CACHE_CONF DEFAULT use_syslog $SYSLOG
     iniset $GLANCE_CACHE_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/
     iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_url
-    iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_AUTH_URI/v2.0
+    iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_AUTH_URI/v3
     iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_tenant_name
     iniset $GLANCE_CACHE_CONF DEFAULT admin_tenant_name $SERVICE_PROJECT_NAME
     iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_user
@@ -312,6 +310,11 @@
             "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" \
             "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" \
             "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT"
+
+        # Note(frickler): Crude workaround for https://bugs.launchpad.net/glance-store/+bug/1620999
+        service_domain_id=$(get_or_create_domain $SERVICE_DOMAIN_NAME)
+        iniset $GLANCE_SWIFT_STORE_CONF ref1 project_domain_id $service_domain_id
+        iniset $GLANCE_SWIFT_STORE_CONF ref1 user_domain_id $service_domain_id
     fi
 
     # Add glance-glare service and endpoints
diff --git a/lib/heat b/lib/heat
deleted file mode 100644
index c841e0a..0000000
--- a/lib/heat
+++ /dev/null
@@ -1,469 +0,0 @@
-#!/bin/bash
-#
-# lib/heat
-# Install and start **Heat** service
-
-# To enable, add the following to localrc
-#
-#   ENABLED_SERVICES+=,heat,h-api,h-api-cfn,h-api-cw,h-eng
-
-# Dependencies:
-# (none)
-
-# stack.sh
-# ---------
-# - install_heatclient
-# - install_heat
-# - configure_heatclient
-# - configure_heat
-# - _config_heat_apache_wsgi
-# - init_heat
-# - start_heat
-# - stop_heat
-# - cleanup_heat
-
-# Save trace setting
-_XTRACE_HEAT=$(set +o | grep xtrace)
-set +o xtrace
-
-
-# Defaults
-# --------
-
-# set up default directories
-GITDIR["python-heatclient"]=$DEST/python-heatclient
-
-# Toggle for deploying Heat-API under HTTPD + mod_wsgi
-HEAT_USE_MOD_WSGI=${HEAT_USE_MOD_WSGI:-False}
-
-HEAT_DIR=$DEST/heat
-HEAT_CFNTOOLS_DIR=$DEST/heat-cfntools
-HEAT_TEMPLATES_REPO_DIR=$DEST/heat-templates
-OCC_DIR=$DEST/os-collect-config
-DIB_UTILS_DIR=$DEST/dib-utils
-ORC_DIR=$DEST/os-refresh-config
-OAC_DIR=$DEST/os-apply-config
-
-HEAT_PIP_REPO=$DATA_DIR/heat-pip-repo
-HEAT_PIP_REPO_PORT=${HEAT_PIP_REPO_PORT:-8899}
-
-HEAT_AUTH_CACHE_DIR=${HEAT_AUTH_CACHE_DIR:-/var/cache/heat}
-HEAT_STANDALONE=$(trueorfalse False HEAT_STANDALONE)
-HEAT_ENABLE_ADOPT_ABANDON=$(trueorfalse False HEAT_ENABLE_ADOPT_ABANDON)
-HEAT_CONF_DIR=/etc/heat
-HEAT_CONF=$HEAT_CONF_DIR/heat.conf
-HEAT_ENV_DIR=$HEAT_CONF_DIR/environment.d
-HEAT_TEMPLATES_DIR=$HEAT_CONF_DIR/templates
-HEAT_API_HOST=${HEAT_API_HOST:-$HOST_IP}
-HEAT_API_PORT=${HEAT_API_PORT:-8004}
-HEAT_SERVICE_USER=${HEAT_SERVICE_USER:-heat}
-HEAT_TRUSTEE_USER=${HEAT_TRUSTEE_USER:-$HEAT_SERVICE_USER}
-HEAT_TRUSTEE_PASSWORD=${HEAT_TRUSTEE_PASSWORD:-$SERVICE_PASSWORD}
-HEAT_TRUSTEE_DOMAIN=${HEAT_TRUSTEE_DOMAIN:-default}
-
-# Support entry points installation of console scripts
-HEAT_BIN_DIR=$(get_python_exec_prefix)
-
-# other default options
-if [[ "$HEAT_STANDALONE" = "True" ]]; then
-    # for standalone, use defaults which require no service user
-    HEAT_STACK_DOMAIN=$(trueorfalse False HEAT_STACK_DOMAIN)
-    HEAT_DEFERRED_AUTH=${HEAT_DEFERRED_AUTH:-password}
-    if [[ ${HEAT_DEFERRED_AUTH} != "password" ]]; then
-        # Heat does not support keystone trusts when deployed in
-        # standalone mode
-        die $LINENO \
-            'HEAT_DEFERRED_AUTH can only be set to "password" when HEAT_STANDALONE is True.'
-    fi
-else
-    HEAT_STACK_DOMAIN=$(trueorfalse True HEAT_STACK_DOMAIN)
-    HEAT_DEFERRED_AUTH=${HEAT_DEFERRED_AUTH:-}
-fi
-HEAT_PLUGIN_DIR=${HEAT_PLUGIN_DIR:-$DATA_DIR/heat/plugins}
-ENABLE_HEAT_PLUGINS=${ENABLE_HEAT_PLUGINS:-}
-
-# Functions
-# ---------
-
-# Test if any Heat services are enabled
-# is_heat_enabled
-function is_heat_enabled {
-    [[ ,${ENABLED_SERVICES} =~ ,"h-" ]] && return 0
-    return 1
-}
-
-# cleanup_heat() - Remove residual data files, anything left over from previous
-# runs that a clean run would need to clean up
-function cleanup_heat {
-    sudo rm -rf $HEAT_AUTH_CACHE_DIR
-    sudo rm -rf $HEAT_ENV_DIR
-    sudo rm -rf $HEAT_TEMPLATES_DIR
-    sudo rm -rf $HEAT_CONF_DIR
-}
-
-# configure_heat() - Set config files, create data dirs, etc
-function configure_heat {
-
-    sudo install -d -o $STACK_USER $HEAT_CONF_DIR
-    # remove old config files
-    rm -f $HEAT_CONF_DIR/heat-*.conf
-
-    HEAT_API_CFN_HOST=${HEAT_API_CFN_HOST:-$HOST_IP}
-    HEAT_API_CFN_PORT=${HEAT_API_CFN_PORT:-8000}
-    HEAT_ENGINE_HOST=${HEAT_ENGINE_HOST:-$SERVICE_HOST}
-    HEAT_ENGINE_PORT=${HEAT_ENGINE_PORT:-8001}
-    HEAT_API_CW_HOST=${HEAT_API_CW_HOST:-$HOST_IP}
-    HEAT_API_CW_PORT=${HEAT_API_CW_PORT:-8003}
-    HEAT_API_PASTE_FILE=$HEAT_CONF_DIR/api-paste.ini
-    HEAT_POLICY_FILE=$HEAT_CONF_DIR/policy.json
-
-    cp $HEAT_DIR/etc/heat/api-paste.ini $HEAT_API_PASTE_FILE
-    cp $HEAT_DIR/etc/heat/policy.json $HEAT_POLICY_FILE
-
-    # common options
-    iniset_rpc_backend heat $HEAT_CONF
-    iniset $HEAT_CONF DEFAULT heat_metadata_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT
-    iniset $HEAT_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1/waitcondition
-    iniset $HEAT_CONF DEFAULT heat_watch_server_url http://$HEAT_API_CW_HOST:$HEAT_API_CW_PORT
-    iniset $HEAT_CONF database connection `database_connection_url heat`
-    iniset $HEAT_CONF DEFAULT auth_encryption_key $(generate_hex_string 16)
-
-    iniset $HEAT_CONF DEFAULT region_name_for_services "$REGION_NAME"
-
-    # logging
-    iniset $HEAT_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-    iniset $HEAT_CONF DEFAULT use_syslog $SYSLOG
-    if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$HEAT_USE_MOD_WSGI" == "False" ]  ; then
-        # Add color to logging output
-        setup_colorized_logging $HEAT_CONF DEFAULT tenant user
-    fi
-
-    if [ ! -z "$HEAT_DEFERRED_AUTH" ]; then
-        iniset $HEAT_CONF DEFAULT deferred_auth_method $HEAT_DEFERRED_AUTH
-    fi
-
-    if [ "$HEAT_USE_MOD_WSGI" == "True" ]; then
-        _config_heat_apache_wsgi
-    fi
-
-    if [[ "$HEAT_STANDALONE" = "True" ]]; then
-        iniset $HEAT_CONF paste_deploy flavor standalone
-        iniset $HEAT_CONF clients_heat url "http://$HEAT_API_HOST:$HEAT_API_PORT/v1/%(tenant_id)s"
-    else
-        configure_auth_token_middleware $HEAT_CONF heat $HEAT_AUTH_CACHE_DIR
-    fi
-
-    # If HEAT_DEFERRED_AUTH is unset or explicitly set to trusts, configure
-    # the section for the client plugin associated with the trustee
-    if [ -z "$HEAT_DEFERRED_AUTH" -o "trusts" == "$HEAT_DEFERRED_AUTH" ]; then
-        iniset $HEAT_CONF trustee auth_type password
-        iniset $HEAT_CONF trustee auth_url $KEYSTONE_AUTH_URI
-        iniset $HEAT_CONF trustee username $HEAT_TRUSTEE_USER
-        iniset $HEAT_CONF trustee password $HEAT_TRUSTEE_PASSWORD
-        iniset $HEAT_CONF trustee user_domain_id $HEAT_TRUSTEE_DOMAIN
-    fi
-
-    # clients_keystone
-    iniset $HEAT_CONF clients_keystone auth_uri $KEYSTONE_AUTH_URI
-
-    # OpenStack API
-    iniset $HEAT_CONF heat_api bind_port $HEAT_API_PORT
-    iniset $HEAT_CONF heat_api workers "$API_WORKERS"
-
-    # Cloudformation API
-    iniset $HEAT_CONF heat_api_cfn bind_port $HEAT_API_CFN_PORT
-
-    # Cloudwatch API
-    iniset $HEAT_CONF heat_api_cloudwatch bind_port $HEAT_API_CW_PORT
-
-    if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then
-        iniset $HEAT_CONF clients_keystone ca_file $SSL_BUNDLE_FILE
-    fi
-
-    if is_ssl_enabled_service "nova" || is_service_enabled tls-proxy; then
-        iniset $HEAT_CONF clients_nova ca_file $SSL_BUNDLE_FILE
-    fi
-
-    if is_ssl_enabled_service "cinder" || is_service_enabled tls-proxy; then
-        iniset $HEAT_CONF clients_cinder ca_file $SSL_BUNDLE_FILE
-    fi
-
-    if [[ "$HEAT_ENABLE_ADOPT_ABANDON" = "True" ]]; then
-        iniset $HEAT_CONF DEFAULT enable_stack_adopt true
-        iniset $HEAT_CONF DEFAULT enable_stack_abandon true
-    fi
-
-    iniset $HEAT_CONF cache enabled "True"
-    iniset $HEAT_CONF cache backend "dogpile.cache.memory"
-
-    sudo install -d -o $STACK_USER $HEAT_ENV_DIR $HEAT_TEMPLATES_DIR
-
-    # copy the default environment
-    cp $HEAT_DIR/etc/heat/environment.d/* $HEAT_ENV_DIR/
-
-    # copy the default templates
-    cp $HEAT_DIR/etc/heat/templates/* $HEAT_TEMPLATES_DIR/
-
-    # Enable heat plugins.
-    # NOTE(nic): The symlink nonsense is necessary because when
-    # plugins are installed in "developer mode", the final component
-    # of their target directory is always "resources", which confuses
-    # Heat's plugin loader into believing that all plugins are named
-    # "resources", and therefore are all the same plugin; so it
-    # will only load one of them.  Linking them all to a common
-    # location with unique names avoids that type of collision,
-    # while still allowing the plugins to be edited in-tree.
-    local err_count=0
-
-    if [ -n "$ENABLE_HEAT_PLUGINS" ]; then
-        mkdir -p $HEAT_PLUGIN_DIR
-        # Clean up cruft from any previous runs
-        rm -f $HEAT_PLUGIN_DIR/*
-        iniset $HEAT_CONF DEFAULT plugin_dirs $HEAT_PLUGIN_DIR
-    fi
-
-    for heat_plugin in $ENABLE_HEAT_PLUGINS; do
-        if [ -d $HEAT_DIR/contrib/$heat_plugin ]; then
-            setup_package $HEAT_DIR/contrib/$heat_plugin -e
-            ln -s $HEAT_DIR/contrib/$heat_plugin/$heat_plugin/resources $HEAT_PLUGIN_DIR/$heat_plugin
-        else
-            : # clear retval on the test so that we can roll up errors
-            err $LINENO "Requested Heat plugin(${heat_plugin}) not found."
-            err_count=$(($err_count + 1))
-        fi
-    done
-    [ $err_count -eq 0 ] || die $LINENO "$err_count of the requested Heat plugins could not be installed."
-}
-
-# init_heat() - Initialize database
-function init_heat {
-
-    # (re)create heat database
-    recreate_database heat
-
-    $HEAT_BIN_DIR/heat-manage --config-file $HEAT_CONF db_sync
-    create_heat_cache_dir
-}
-
-# create_heat_cache_dir() - Part of the init_heat() process
-function create_heat_cache_dir {
-    # Create cache dirs
-    sudo install -d -o $STACK_USER $HEAT_AUTH_CACHE_DIR
-}
-
-# install_heatclient() - Collect source and prepare
-function install_heatclient {
-    if use_library_from_git "python-heatclient"; then
-        git_clone_by_name "python-heatclient"
-        setup_dev_lib "python-heatclient"
-        sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-heatclient"]}/tools/,/etc/bash_completion.d/}heat.bash_completion
-    fi
-}
-
-# install_heat() - Collect source and prepare
-function install_heat {
-    git_clone $HEAT_REPO $HEAT_DIR $HEAT_BRANCH
-    setup_develop $HEAT_DIR
-    if [ "$HEAT_USE_MOD_WSGI" == "True" ]; then
-        install_apache_wsgi
-    fi
-}
-
-# install_heat_other() - Collect source and prepare
-function install_heat_other {
-    git_clone $HEAT_CFNTOOLS_REPO $HEAT_CFNTOOLS_DIR $HEAT_CFNTOOLS_BRANCH
-    git_clone $HEAT_TEMPLATES_REPO $HEAT_TEMPLATES_REPO_DIR $HEAT_TEMPLATES_BRANCH
-    git_clone $OAC_REPO $OAC_DIR $OAC_BRANCH
-    git_clone $OCC_REPO $OCC_DIR $OCC_BRANCH
-    git_clone $ORC_REPO $ORC_DIR $ORC_BRANCH
-    git_clone $DIB_UTILS_REPO $DIB_UTILS_DIR $DIB_UTILS_BRANCH
-}
-
-# start_heat() - Start running processes, including screen
-function start_heat {
-    run_process h-eng "$HEAT_BIN_DIR/heat-engine --config-file=$HEAT_CONF"
-
-    # If the site is not enabled then we are in a grenade scenario
-    local enabled_site_file
-    enabled_site_file=$(apache_site_config_for heat-api)
-    if [ -f ${enabled_site_file} ] && [ "$HEAT_USE_MOD_WSGI" == "True" ]; then
-        enable_apache_site heat-api
-        enable_apache_site heat-api-cfn
-        enable_apache_site heat-api-cloudwatch
-        restart_apache_server
-        tail_log heat-api /var/log/$APACHE_NAME/heat-api.log
-        tail_log heat-api-cfn /var/log/$APACHE_NAME/heat-api-cfn.log
-        tail_log heat-api-cloudwatch /var/log/$APACHE_NAME/heat-api-cloudwatch.log
-    else
-        run_process h-api "$HEAT_BIN_DIR/heat-api --config-file=$HEAT_CONF"
-        run_process h-api-cfn "$HEAT_BIN_DIR/heat-api-cfn --config-file=$HEAT_CONF"
-        run_process h-api-cw "$HEAT_BIN_DIR/heat-api-cloudwatch --config-file=$HEAT_CONF"
-    fi
-}
-
-# stop_heat() - Stop running processes
-function stop_heat {
-    # Kill the screen windows
-    stop_process h-eng
-
-    if [ "$HEAT_USE_MOD_WSGI" == "True" ]; then
-        disable_apache_site heat-api
-        disable_apache_site heat-api-cfn
-        disable_apache_site heat-api-cloudwatch
-        restart_apache_server
-    else
-        local serv
-        for serv in h-api h-api-cfn h-api-cw; do
-            stop_process $serv
-        done
-    fi
-
-}
-
-# _cleanup_heat_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file
-function _cleanup_heat_apache_wsgi {
-    sudo rm -f $(apache_site_config_for heat-api)
-    sudo rm -f $(apache_site_config_for heat-api-cfn)
-    sudo rm -f $(apache_site_config_for heat-api-cloudwatch)
-}
-
-# _config_heat_apache_wsgi() - Set WSGI config files of Heat
-function _config_heat_apache_wsgi {
-
-    local heat_apache_conf
-    heat_apache_conf=$(apache_site_config_for heat-api)
-    local heat_cfn_apache_conf
-    heat_cfn_apache_conf=$(apache_site_config_for heat-api-cfn)
-    local heat_cloudwatch_apache_conf
-    heat_cloudwatch_apache_conf=$(apache_site_config_for heat-api-cloudwatch)
-    local heat_ssl=""
-    local heat_certfile=""
-    local heat_keyfile=""
-    local heat_api_port=$HEAT_API_PORT
-    local heat_cfn_api_port=$HEAT_API_CFN_PORT
-    local heat_cw_api_port=$HEAT_API_CW_PORT
-    local venv_path=""
-
-    sudo cp $FILES/apache-heat-api.template $heat_apache_conf
-    sudo sed -e "
-        s|%PUBLICPORT%|$heat_api_port|g;
-        s|%APACHE_NAME%|$APACHE_NAME|g;
-        s|%HEAT_BIN_DIR%|$HEAT_BIN_DIR|g;
-        s|%SSLENGINE%|$heat_ssl|g;
-        s|%SSLCERTFILE%|$heat_certfile|g;
-        s|%SSLKEYFILE%|$heat_keyfile|g;
-        s|%USER%|$STACK_USER|g;
-        s|%VIRTUALENV%|$venv_path|g
-    " -i $heat_apache_conf
-
-    sudo cp $FILES/apache-heat-api-cfn.template $heat_cfn_apache_conf
-    sudo sed -e "
-        s|%PUBLICPORT%|$heat_cfn_api_port|g;
-        s|%APACHE_NAME%|$APACHE_NAME|g;
-        s|%HEAT_BIN_DIR%|$HEAT_BIN_DIR|g;
-        s|%SSLENGINE%|$heat_ssl|g;
-        s|%SSLCERTFILE%|$heat_certfile|g;
-        s|%SSLKEYFILE%|$heat_keyfile|g;
-        s|%USER%|$STACK_USER|g;
-        s|%VIRTUALENV%|$venv_path|g
-    " -i $heat_cfn_apache_conf
-
-    sudo cp $FILES/apache-heat-api-cloudwatch.template $heat_cloudwatch_apache_conf
-    sudo sed -e "
-        s|%PUBLICPORT%|$heat_cw_api_port|g;
-        s|%APACHE_NAME%|$APACHE_NAME|g;
-        s|%HEAT_BIN_DIR%|$HEAT_BIN_DIR|g;
-        s|%SSLENGINE%|$heat_ssl|g;
-        s|%SSLCERTFILE%|$heat_certfile|g;
-        s|%SSLKEYFILE%|$heat_keyfile|g;
-        s|%USER%|$STACK_USER|g;
-        s|%VIRTUALENV%|$venv_path|g
-    " -i $heat_cloudwatch_apache_conf
-}
-
-
-# create_heat_accounts() - Set up common required heat accounts
-function create_heat_accounts {
-    if [[ "$HEAT_STANDALONE" != "True" ]]; then
-
-        create_service_user "heat" "admin"
-        get_or_create_service "heat" "orchestration" "Heat Orchestration Service"
-        get_or_create_endpoint \
-            "orchestration" \
-            "$REGION_NAME" \
-            "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(project_id)s" \
-            "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(project_id)s" \
-            "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(project_id)s"
-
-        get_or_create_service "heat-cfn" "cloudformation" "Heat CloudFormation Service"
-        get_or_create_endpoint \
-            "cloudformation"  \
-            "$REGION_NAME" \
-            "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \
-            "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \
-            "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1"
-
-        # heat_stack_user role is for users created by Heat
-        get_or_create_role "heat_stack_user"
-    fi
-
-    if [[ "$HEAT_STACK_DOMAIN" == "True" ]]; then
-        # domain -> heat and user -> heat_domain_admin
-        domain_id=$(get_or_create_domain heat 'Owns users and projects created by heat')
-        iniset $HEAT_CONF DEFAULT stack_user_domain_id ${domain_id}
-        get_or_create_user heat_domain_admin $SERVICE_PASSWORD heat
-        get_or_add_user_domain_role admin heat_domain_admin heat
-        iniset $HEAT_CONF DEFAULT stack_domain_admin heat_domain_admin
-        iniset $HEAT_CONF DEFAULT stack_domain_admin_password $SERVICE_PASSWORD
-    fi
-}
-
-# build_heat_pip_mirror() - Build a pip mirror containing heat agent projects
-function build_heat_pip_mirror {
-    local project_dirs="$OCC_DIR $OAC_DIR $ORC_DIR $HEAT_CFNTOOLS_DIR $DIB_UTILS_DIR"
-    local projpath proj package
-
-    rm -rf $HEAT_PIP_REPO
-    mkdir -p $HEAT_PIP_REPO
-
-    echo "<html><body>" > $HEAT_PIP_REPO/index.html
-    for projpath in $project_dirs; do
-        proj=$(basename $projpath)
-        mkdir -p $HEAT_PIP_REPO/$proj
-        pushd $projpath
-        rm -rf dist
-        python setup.py sdist
-        pushd dist
-        package=$(ls *)
-        mv $package $HEAT_PIP_REPO/$proj/$package
-        popd
-
-        echo "<html><body><a href=\"$package\">$package</a></body></html>" > $HEAT_PIP_REPO/$proj/index.html
-        echo "<a href=\"$proj\">$proj</a><br/>" >> $HEAT_PIP_REPO/index.html
-
-        popd
-    done
-
-    echo "</body></html>" >> $HEAT_PIP_REPO/index.html
-
-    local heat_pip_repo_apache_conf
-    heat_pip_repo_apache_conf=$(apache_site_config_for heat_pip_repo)
-
-    sudo cp $FILES/apache-heat-pip-repo.template $heat_pip_repo_apache_conf
-    sudo sed -e "
-        s|%HEAT_PIP_REPO%|$HEAT_PIP_REPO|g;
-        s|%HEAT_PIP_REPO_PORT%|$HEAT_PIP_REPO_PORT|g;
-        s|%APACHE_NAME%|$APACHE_NAME|g;
-    " -i $heat_pip_repo_apache_conf
-    enable_apache_site heat_pip_repo
-    restart_apache_server
-    sudo iptables -I INPUT -d $HOST_IP -p tcp --dport $HEAT_PIP_REPO_PORT -j ACCEPT || true
-}
-
-# Restore xtrace
-$_XTRACE_HEAT
-
-# Tell emacs to use shell-script-mode
-## Local variables:
-## mode: shell-script
-## End:
diff --git a/lib/horizon b/lib/horizon
index 78cbe8b..4cabbe4 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -81,7 +81,11 @@
     # Horizon is installed as develop mode, so we can compile here.
     # Message catalog compilation is handled by Django admin script,
     # so compiling them after the installation avoids Django installation twice.
-    (cd $HORIZON_DIR; ./run_tests.sh -N --compilemessages)
+    if python3_enabled; then
+        (cd $HORIZON_DIR; python${PYTHON3_VERSION} manage.py compilemessages)
+    else
+        (cd $HORIZON_DIR; python manage.py compilemessages)
+    fi
 
     # ``local_settings.py`` is used to override horizon default settings.
     local local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py
@@ -97,6 +101,11 @@
     _horizon_config_set $local_settings "" OPENSTACK_API_VERSIONS {\"identity\":3}
     _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_URI}/v3\""
 
+    # note(trebskit): if HOST_IP points at non-localhost ip address, horizon cannot be accessed
+    # from outside the virtual machine. This fixes is meant primarily for local development
+    # purpose
+    _horizon_config_set $local_settings "" ALLOWED_HOSTS [\"*\"]
+
     if [ -f $SSL_BUNDLE_FILE ]; then
         _horizon_config_set $local_settings "" OPENSTACK_SSL_CACERT \"${SSL_BUNDLE_FILE}\"
     fi
@@ -121,9 +130,7 @@
     if is_ubuntu; then
         disable_apache_site 000-default
         sudo touch $horizon_conf
-    elif is_fedora; then
-        sudo sed '/^Listen/s/^.*$/Listen 0.0.0.0:80/' -i /etc/httpd/conf/httpd.conf
-    elif is_suse; then
+    elif is_fedora || is_suse; then
         : # nothing to do
     else
         exit_distro_not_supported "horizon apache configuration"
@@ -159,7 +166,11 @@
         git_clone_by_name "django_openstack_auth"
         # Compile message catalogs before installation
         _prepare_message_catalog_compilation
-        (cd $dir; python setup.py compile_catalog)
+        if python3_enabled; then
+            (cd $dir; python${PYTHON3_VERSION} setup.py compile_catalog)
+        else
+            (cd $dir; python setup.py compile_catalog)
+        fi
         setup_dev_lib "django_openstack_auth"
     fi
     # if we aren't using this library from git, then we just let it
diff --git a/lib/keystone b/lib/keystone
index 13fa50b..825fe44 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -25,7 +25,6 @@
 # - create_keystone_accounts
 # - stop_keystone
 # - cleanup_keystone
-# - _cleanup_keystone_apache_wsgi
 
 # Save trace setting
 _XTRACE_KEYSTONE=$(set +o | grep xtrace)
@@ -52,9 +51,6 @@
 KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf
 KEYSTONE_PASTE_INI=${KEYSTONE_PASTE_INI:-$KEYSTONE_CONF_DIR/keystone-paste.ini}
 
-# NOTE(sdague): remove in Newton
-KEYSTONE_CATALOG_BACKEND="sql"
-
 # Toggle for deploying Keystone under HTTPD + mod_wsgi
 # Deprecated in Mitaka, use KEYSTONE_DEPLOY instead.
 KEYSTONE_USE_MOD_WSGI=${KEYSTONE_USE_MOD_WSGI:-${ENABLE_HTTPD_MOD_WSGI_SERVICES}}
@@ -89,7 +85,7 @@
 
 # Select Keystone's token provider (and format)
 # Choose from 'uuid', 'pki', 'pkiz', or 'fernet'
-KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-}
+KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-fernet}
 KEYSTONE_TOKEN_FORMAT=$(echo ${KEYSTONE_TOKEN_FORMAT} | tr '[:upper:]' '[:lower:]')
 
 # Set Keystone interface configuration
@@ -149,11 +145,7 @@
 # cleanup_keystone() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
 function cleanup_keystone {
-    _cleanup_keystone_apache_wsgi
-}
-
-# _cleanup_keystone_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file
-function _cleanup_keystone_apache_wsgi {
+    disable_apache_site keystone
     sudo rm -f $(apache_site_config_for keystone)
 }
 
@@ -161,6 +153,7 @@
 function _config_keystone_apache_wsgi {
     local keystone_apache_conf
     keystone_apache_conf=$(apache_site_config_for keystone)
+    keystone_ssl_listen="#"
     local keystone_ssl=""
     local keystone_certfile=""
     local keystone_keyfile=""
@@ -169,6 +162,7 @@
     local venv_path=""
 
     if is_ssl_enabled_service key; then
+        keystone_ssl_listen=""
         keystone_ssl="SSLEngine On"
         keystone_certfile="SSLCertificateFile $KEYSTONE_SSL_CERT"
         keystone_keyfile="SSLCertificateKeyFile $KEYSTONE_SSL_KEY"
@@ -186,6 +180,7 @@
         s|%PUBLICPORT%|$keystone_service_port|g;
         s|%ADMINPORT%|$keystone_auth_port|g;
         s|%APACHE_NAME%|$APACHE_NAME|g;
+        s|%SSLLISTEN%|$keystone_ssl_listen|g;
         s|%SSLENGINE%|$keystone_ssl|g;
         s|%SSLCERTFILE%|$keystone_certfile|g;
         s|%SSLKEYFILE%|$keystone_keyfile|g;
@@ -226,13 +221,6 @@
         iniset $KEYSTONE_CONF ldap password $LDAP_PASSWORD
         iniset $KEYSTONE_CONF ldap user $LDAP_MANAGER_DN
         iniset $KEYSTONE_CONF ldap suffix $LDAP_BASE_DN
-        iniset $KEYSTONE_CONF ldap use_dumb_member "True"
-        iniset $KEYSTONE_CONF ldap user_attribute_ignore "enabled,email,tenants,default_project_id"
-        iniset $KEYSTONE_CONF ldap tenant_attribute_ignore "enabled"
-        iniset $KEYSTONE_CONF ldap tenant_domain_id_attribute "businessCategory"
-        iniset $KEYSTONE_CONF ldap tenant_desc_attribute "description"
-        iniset $KEYSTONE_CONF ldap tenant_tree_dn "ou=Projects,$LDAP_BASE_DN"
-        iniset $KEYSTONE_CONF ldap user_domain_id_attribute "businessCategory"
         iniset $KEYSTONE_CONF ldap user_tree_dn "ou=Users,$LDAP_BASE_DN"
         iniset $KEYSTONE_CONF DEFAULT member_role_id "9fe2ff9ee4384b1894a90878d3e92bab"
         iniset $KEYSTONE_CONF DEFAULT member_role_name "_member_"
@@ -349,8 +337,8 @@
     # Configure the project created by the 'keystone-manage bootstrap' as the cloud-admin project.
     # The users from this project are globally admin as before, but it also
     # allows policy changes in order to clarify the adminess scope.
-    iniset $KEYSTONE_CONF resource admin_project_domain_name Default
-    iniset $KEYSTONE_CONF resource admin_project_name admin
+    #iniset $KEYSTONE_CONF resource admin_project_domain_name Default
+    #iniset $KEYSTONE_CONF resource admin_project_name admin
 }
 
 # create_keystone_accounts() - Sets up common required keystone accounts
@@ -457,14 +445,16 @@
 #
 # create_service_user <name> [role]
 #
-# The role defaults to the service role. It is allowed to be provided as optional as historically
+# We always add the service role, other roles are also allowed to be added as historically
 # a lot of projects have configured themselves with the admin or other role here if they are
 # using this user for other purposes beyond simply auth_token middleware.
 function create_service_user {
-    local role=${2:-service}
-
     get_or_create_user "$1" "$SERVICE_PASSWORD" "$SERVICE_DOMAIN_NAME"
-    get_or_add_user_project_role "$role" "$1" "$SERVICE_PROJECT_NAME" "$SERVICE_DOMAIN_NAME" "$SERVICE_DOMAIN_NAME"
+    get_or_add_user_project_role service "$1" "$SERVICE_PROJECT_NAME" "$SERVICE_DOMAIN_NAME" "$SERVICE_DOMAIN_NAME"
+
+    if [[ -n "$2" ]]; then
+        get_or_add_user_project_role "$2" "$1" "$SERVICE_PROJECT_NAME" "$SERVICE_DOMAIN_NAME" "$SERVICE_DOMAIN_NAME"
+    fi
 }
 
 # Configure the service to use the auth token middleware.
@@ -500,8 +490,10 @@
         init_ldap
     fi
 
-    # (Re)create keystone database
-    recreate_database keystone
+    if [[ "$RECREATE_KEYSTONE_DB" == True ]]; then
+        # (Re)create keystone database
+        recreate_database keystone
+    fi
 
     # Initialize keystone database
     $KEYSTONE_BIN_DIR/keystone-manage --config-file $KEYSTONE_CONF db_sync
diff --git a/lib/lvm b/lib/lvm
index d35a76f..0cebd92 100644
--- a/lib/lvm
+++ b/lib/lvm
@@ -23,11 +23,7 @@
 # Defaults
 # --------
 # Name of the lvm volume groups to use/create for iscsi volumes
-# This monkey-motion is for compatibility with icehouse-generation Grenade
-# If ``VOLUME_GROUP`` is set, use it, otherwise we'll build a VG name based
-# on ``VOLUME_GROUP_NAME`` that includes the backend name
-# Grenade doesn't use ``VOLUME_GROUP2`` so it is left out
-VOLUME_GROUP_NAME=${VOLUME_GROUP:-${VOLUME_GROUP_NAME:-stack-volumes}}
+VOLUME_GROUP_NAME=${VOLUME_GROUP_NAME:-stack-volumes}
 DEFAULT_VOLUME_GROUP_NAME=$VOLUME_GROUP_NAME-default
 
 # Backing file name is of the form $VOLUME_GROUP$BACKING_FILE_SUFFIX
@@ -105,7 +101,7 @@
 # init_lvm_volume_group() initializes the volume group creating the backing
 # file if necessary
 #
-# Usage: init_lvm_volume_group() $vg
+# Usage: init_lvm_volume_group() $vg $size
 function init_lvm_volume_group {
     local vg=$1
     local size=$2
diff --git a/lib/neutron b/lib/neutron
index ab84f7e..852787d 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -47,10 +47,10 @@
 NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron}
 
 # By default, use the ML2 plugin
-NEUTRON_PLUGIN=${NEUTRON_PLUGIN:-ml2}
-NEUTRON_PLUGIN_CONF_FILENAME=${NEUTRON_PLUGIN_CONF_FILENAME:-ml2_conf.ini}
-NEUTRON_PLUGIN_CONF_PATH=$NEUTRON_CONF_DIR/plugins/$NEUTRON_PLUGIN
-NEUTRON_PLUGIN_CONF=$NEUTRON_PLUGIN_CONF_PATH/$NEUTRON_PLUGIN_CONF_FILENAME
+NEUTRON_CORE_PLUGIN=${NEUTRON_CORE_PLUGIN:-ml2}
+NEUTRON_CORE_PLUGIN_CONF_FILENAME=${NEUTRON_CORE_PLUGIN_CONF_FILENAME:-ml2_conf.ini}
+NEUTRON_CORE_PLUGIN_CONF_PATH=$NEUTRON_CONF_DIR/plugins/$NEUTRON_CORE_PLUGIN
+NEUTRON_CORE_PLUGIN_CONF=$NEUTRON_CORE_PLUGIN_CONF_PATH/$NEUTRON_CORE_PLUGIN_CONF_FILENAME
 
 NEUTRON_AGENT_BINARY=${NEUTRON_AGENT_BINARY:-neutron-$NEUTRON_AGENT-agent}
 NEUTRON_L3_BINARY=${NEUTRON_L3_BINARY:-neutron-l3-agent}
@@ -90,6 +90,10 @@
     return 1
 }
 
+if is_neutron_legacy_enabled; then
+    source $TOP_DIR/lib/neutron-legacy
+fi
+
 # cleanup_neutron() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
 function cleanup_neutron_new {
@@ -117,9 +121,9 @@
 
     configure_neutron_rootwrap
 
-    mkdir -p $NEUTRON_PLUGIN_CONF_PATH
+    mkdir -p $NEUTRON_CORE_PLUGIN_CONF_PATH
 
-    cp $NEUTRON_DIR/etc/neutron/plugins/$NEUTRON_PLUGIN/$NEUTRON_PLUGIN_CONF_FILENAME.sample $NEUTRON_PLUGIN_CONF
+    cp $NEUTRON_DIR/etc/neutron/plugins/$NEUTRON_CORE_PLUGIN/$NEUTRON_CORE_PLUGIN_CONF_FILENAME.sample $NEUTRON_CORE_PLUGIN_CONF
 
     iniset $NEUTRON_CONF database connection `database_connection_url neutron`
     iniset $NEUTRON_CONF DEFAULT state_path $NEUTRON_STATE_PATH
@@ -139,7 +143,7 @@
 
         cp $NEUTRON_DIR/etc/api-paste.ini $NEUTRON_CONF_DIR/api-paste.ini
 
-        iniset $NEUTRON_CONF DEFAULT core_plugin $NEUTRON_PLUGIN
+        iniset $NEUTRON_CONF DEFAULT core_plugin $NEUTRON_CORE_PLUGIN
 
         iniset $NEUTRON_CONF DEFAULT policy_file $policy_file
         iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips True
@@ -147,10 +151,6 @@
         iniset $NEUTRON_CONF DEFAULT auth_strategy $NEUTRON_AUTH_STRATEGY
         configure_auth_token_middleware $NEUTRON_CONF neutron $NEUTRON_AUTH_CACHE_DIR keystone_authtoken
 
-        # Configuration for neutron notifations to nova.
-        iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES
-        iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES
-
         iniset $NEUTRON_CONF nova auth_type password
         iniset $NEUTRON_CONF nova auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v3"
         iniset $NEUTRON_CONF nova username nova
@@ -162,26 +162,30 @@
 
         # Configure VXLAN
         # TODO(sc68cal) not hardcode?
-        iniset $NEUTRON_PLUGIN_CONF ml2 tenant_network_types vxlan
-        iniset $NEUTRON_PLUGIN_CONF ml2 type_drivers vxlan
-        iniset $NEUTRON_PLUGIN_CONF ml2 mechanism_drivers openvswitch,linuxbridge
-        iniset $NEUTRON_PLUGIN_CONF ml2_type_vxlan vni_ranges 1001:2000
-        iniset $NEUTRON_PLUGIN_CONF ml2 extension_drivers port_security
+        iniset $NEUTRON_CORE_PLUGIN_CONF ml2 tenant_network_types vxlan
+        iniset $NEUTRON_CORE_PLUGIN_CONF ml2 type_drivers vxlan
+        iniset $NEUTRON_CORE_PLUGIN_CONF ml2 mechanism_drivers openvswitch,linuxbridge
+        iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vxlan vni_ranges 1001:2000
+        if [[ "$NEUTRON_PORT_SECURITY" = "True" ]]; then
+            iniset $NEUTRON_CORE_PLUGIN_CONF ml2 extension_drivers port_security
+        fi
     fi
 
     # Neutron OVS or LB agent
     if is_service_enabled neutron-agent; then
-        iniset $NEUTRON_PLUGIN_CONF agent tunnel_types vxlan
-        iniset $NEUTRON_PLUGIN_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+        iniset $NEUTRON_CORE_PLUGIN_CONF agent tunnel_types vxlan
+        iniset $NEUTRON_CORE_PLUGIN_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
 
         # Configure the neutron agent
         if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then
-            iniset $NEUTRON_PLUGIN_CONF securitygroup iptables
-            iniset $NEUTRON_PLUGIN_CONF vxlan local_ip $HOST_IP
+            iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup iptables
+            iniset $NEUTRON_CORE_PLUGIN_CONF vxlan local_ip $HOST_IP
         else
-            iniset $NEUTRON_PLUGIN_CONF securitygroup iptables_hybrid
-            iniset $NEUTRON_PLUGIN_CONF ovs local_ip $HOST_IP
+            iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup iptables_hybrid
+            iniset $NEUTRON_CORE_PLUGIN_CONF ovs local_ip $HOST_IP
         fi
+
+        enable_kernel_bridge_firewall
     fi
 
     # DHCP Agent
@@ -200,7 +204,7 @@
     if is_service_enabled neutron-l3; then
         cp $NEUTRON_DIR/etc/l3_agent.ini.sample $NEUTRON_L3_CONF
         iniset $NEUTRON_L3_CONF DEFAULT interface_driver $NEUTRON_AGENT
-        iniset $NEUTRON_CONF DEFAULT service_plugins router
+        neutron_service_plugin_class_add router
         iniset $NEUTRON_L3_CONF agent root_helper_daemon "$NEUTRON_ROOTWRAP_DAEMON_CMD"
         iniset $NEUTRON_L3_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
         neutron_plugin_configure_l3_agent $NEUTRON_L3_CONF
@@ -249,14 +253,8 @@
         source $TOP_DIR/lib/neutron_plugins/services/metering
         neutron_agent_metering_configure_common
         neutron_agent_metering_configure_agent
-        # TODO(sc68cal) hack because we don't pass around
-        # $Q_SERVICE_PLUGIN_CLASSES like -legacy does
-        local plugins=""
-        plugins=$(iniget $NEUTRON_CONF DEFAULT service_plugins)
-        plugins+=",metering"
-        iniset $NEUTRON_CONF DEFAULT service_plugins $plugins
+        neutron_service_plugin_class_add metering
     fi
-
 }
 
 # configure_neutron_rootwrap() - configure Neutron's rootwrap
@@ -397,7 +395,7 @@
 
     # Start the Neutron service
     # TODO(sc68cal) Stop hard coding this
-    run_process neutron-api "$NEUTRON_BIN_DIR/neutron-server --config-file $NEUTRON_CONF --config-file $NEUTRON_PLUGIN_CONF"
+    run_process neutron-api "$NEUTRON_BIN_DIR/neutron-server --config-file $NEUTRON_CONF --config-file $NEUTRON_CORE_PLUGIN_CONF"
 
     if is_ssl_enabled_service "neutron"; then
         ssl_ca="--ca-certificate=${SSL_BUNDLE_FILE}"
@@ -433,15 +431,17 @@
     if is_service_enabled neutron-l3; then
         run_process neutron-l3 "$NEUTRON_BIN_DIR/$NEUTRON_L3_BINARY $NEUTRON_CONFIG_ARG"
     fi
-    # XXX(sc68cal) - Here's where plugins can wire up their own networks instead
-    # of the code in lib/neutron_plugins/services/l3
-    if type -p neutron_plugin_create_initial_networks > /dev/null; then
-        neutron_plugin_create_initial_networks
-    else
-        # XXX(sc68cal) Load up the built in Neutron networking code and build a topology
-        source $TOP_DIR/lib/neutron_plugins/services/l3
-        # Create the networks using servic
-        create_neutron_initial_network
+    if is_service_enabled neutron-api; then
+        # XXX(sc68cal) - Here's where plugins can wire up their own networks instead
+        # of the code in lib/neutron_plugins/services/l3
+        if type -p neutron_plugin_create_initial_networks > /dev/null; then
+            neutron_plugin_create_initial_networks
+        else
+            # XXX(sc68cal) Load up the built in Neutron networking code and build a topology
+            source $TOP_DIR/lib/neutron_plugins/services/l3
+            # Create the networks using servic
+            create_neutron_initial_network
+        fi
     fi
     if is_service_enabled neutron-metadata-agent; then
         run_process neutron-metadata-agent "$NEUTRON_BIN_DIR/$NEUTRON_META_BINARY $NEUTRON_CONFIG_ARG"
@@ -475,9 +475,9 @@
 
     NEUTRON_CONFIG_ARG+=" --config-file $NEUTRON_CONF"
 
-    #TODO(sc68cal) OVS and LB agent uses settings in NEUTRON_PLUGIN_CONF (ml2_conf.ini) but others may not
+    #TODO(sc68cal) OVS and LB agent uses settings in NEUTRON_CORE_PLUGIN_CONF (ml2_conf.ini) but others may not
     if is_service_enabled neutron-agent; then
-        NEUTRON_CONFIG_ARG+=" --config-file $NEUTRON_PLUGIN_CONF"
+        NEUTRON_CONFIG_ARG+=" --config-file $NEUTRON_CORE_PLUGIN_CONF"
     fi
 
     if is_service_enabled neutron-dhcp; then
@@ -494,6 +494,16 @@
 
 }
 
+# neutron_service_plugin_class_add() - add service plugin class
+function neutron_service_plugin_class_add_new {
+    local service_plugin_class=$1
+    local plugins=""
+
+    plugins=$(iniget $NEUTRON_CONF DEFAULT service_plugins)
+    plugins+=",${service_plugin_class}"
+    iniset $NEUTRON_CONF DEFAULT service_plugins $plugins
+}
+
 # Dispatch functions
 # These are needed for compatibility between the old and new implementations
 # where there are function name overlaps.  These will be removed when
@@ -553,6 +563,24 @@
     fi
 }
 
+function neutron_service_plugin_class_add {
+    if is_neutron_legacy_enabled; then
+        # Call back to old function
+        _neutron_service_plugin_class_add "$@"
+    else
+        neutron_service_plugin_class_add_new "$@"
+    fi
+}
+
+function install_neutron_agent_packages {
+    if is_neutron_legacy_enabled; then
+        # Call back to old function
+        install_neutron_agent_packages_mutnauq "$@"
+    else
+        :
+    fi
+}
+
 function start_neutron {
     if is_neutron_legacy_enabled; then
         # Call back to old function
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index 123ba42..37d2783 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -24,11 +24,9 @@
 # - check_neutron_third_party_integration
 # - start_neutron_agents
 # - create_neutron_initial_network
-# - setup_neutron_debug
 #
 # ``unstack.sh`` calls the entry points in this order:
 #
-# - teardown_neutron_debug
 # - stop_neutron
 # - stop_neutron_third_party
 # - cleanup_neutron
@@ -125,8 +123,6 @@
 Q_META_DATA_IP=${Q_META_DATA_IP:-$SERVICE_HOST}
 # Allow Overlapping IP among subnets
 Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True}
-# The name of the default q-l3 router
-Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1}
 Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True}
 Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True}
 VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True}
@@ -427,7 +423,7 @@
 }
 
 # install_neutron_agent_packages() - Collect source and prepare
-function install_neutron_agent_packages {
+function install_neutron_agent_packages_mutnauq {
     # radvd doesn't come with the OS. Install it if the l3 service is enabled.
     if is_service_enabled q-l3; then
         install_package radvd
@@ -513,6 +509,10 @@
 
 function stop_mutnauq_l2_agent {
     stop_process q-agt
+
+    if [ "$VIRT_DRIVER" = 'xenserver' ]; then
+        stop_process q-domua
+    fi
 }
 
 # stop_mutnauq_other() - Stop running processes (non-screen)
diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent
index 7d59e13..0c8ccb8 100644
--- a/lib/neutron_plugins/linuxbridge_agent
+++ b/lib/neutron_plugins/linuxbridge_agent
@@ -62,13 +62,16 @@
         LB_INTERFACE_MAPPINGS=$PHYSICAL_NETWORK:$LB_PHYSICAL_INTERFACE
     fi
     if [[ "$PUBLIC_BRIDGE" != "" ]] && [[ "$PUBLIC_PHYSICAL_NETWORK" != "" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE linux_bridge bridge_mappings "$PUBLIC_PHYSICAL_NETWORK:$PUBLIC_BRIDGE"
+        if is_service_enabled q-l3 || is_service_enabled neutron-l3; then
+            iniset /$Q_PLUGIN_CONF_FILE linux_bridge bridge_mappings "$PUBLIC_PHYSICAL_NETWORK:$PUBLIC_BRIDGE"
+        fi
     fi
     if [[ "$LB_INTERFACE_MAPPINGS" != "" ]]; then
         iniset /$Q_PLUGIN_CONF_FILE linux_bridge physical_interface_mappings $LB_INTERFACE_MAPPINGS
     fi
     if [[ "$Q_USE_SECGROUP" == "True" ]]; then
         iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
+        enable_kernel_bridge_firewall
     else
         iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver
     fi
diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2
index 7e80209..e429714 100644
--- a/lib/neutron_plugins/ml2
+++ b/lib/neutron_plugins/ml2
@@ -35,7 +35,11 @@
 Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS:-vni_ranges=$TENANT_TUNNEL_RANGES}
 # List of extension drivers to load, use '-' instead of ':-' to allow people to
 # explicitly override this to blank
-Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS-port_security}
+if [[ "$NEUTRON_PORT_SECURITY" = "True" ]]; then
+    Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS-port_security}
+else
+    Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS:-}
+fi
 
 # L3 Plugin to load for ML2
 # For some flat network environment, they not want to extend L3 plugin.
diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent
index e27b8a6..f009966 100644
--- a/lib/neutron_plugins/openvswitch_agent
+++ b/lib/neutron_plugins/openvswitch_agent
@@ -81,19 +81,24 @@
         # integration bridge.  This is enabled by using a root wrapper
         # that executes commands on dom0 via a XenAPI plugin.
         # XenAPI does not support daemon rootwrap now, so set root_helper_daemon empty
-        iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_DOM0_COMMAND"
-        iniset /$Q_PLUGIN_CONF_FILE agent root_helper_daemon ""
+        iniset "/$Q_PLUGIN_CONF_FILE.domU" agent root_helper "$Q_RR_DOM0_COMMAND"
+        iniset "/$Q_PLUGIN_CONF_FILE.domU" agent root_helper_daemon ""
 
         # Disable minimize polling, so that it can always detect OVS and Port changes
         # This is a problem of xenserver + neutron, bug has been reported
         # https://bugs.launchpad.net/neutron/+bug/1495423
-        iniset /$Q_PLUGIN_CONF_FILE agent minimize_polling False
+        iniset "/$Q_PLUGIN_CONF_FILE.domU" agent minimize_polling False
 
         # Set "physical" mapping
-        iniset /$Q_PLUGIN_CONF_FILE ovs bridge_mappings "physnet1:$FLAT_NETWORK_BRIDGE"
+        iniset "/$Q_PLUGIN_CONF_FILE.domU" ovs bridge_mappings "physnet1:$FLAT_NETWORK_BRIDGE"
 
         # XEN_INTEGRATION_BRIDGE is the integration bridge in dom0
-        iniset /$Q_PLUGIN_CONF_FILE ovs integration_bridge $XEN_INTEGRATION_BRIDGE
+        iniset "/$Q_PLUGIN_CONF_FILE.domU" ovs integration_bridge $XEN_INTEGRATION_BRIDGE
+
+        # Set OVS native interface for ovs-agent in compute node
+        XEN_DOM0_IP=$(echo "$XENAPI_CONNECTION_URL" | cut -d "/" -f 3)
+        iniset /$Q_PLUGIN_CONF_FILE ovs ovsdb_connection tcp:$XEN_DOM0_IP:6640
+        iniset /$Q_PLUGIN_CONF_FILE ovs of_listen_address $HOST_IP
 
         # Set up domU's L2 agent:
 
@@ -107,11 +112,11 @@
         sudo ovs-vsctl -- --may-exist add-port $PUBLIC_BRIDGE $PUBLIC_INTERFACE
 
         # Set bridge mappings to "physnet1:br-$GUEST_INTERFACE_DEFAULT"
-        iniset "/$Q_PLUGIN_CONF_FILE.domU" ovs bridge_mappings "physnet1:br-$VLAN_INTERFACE,physnet-ex:$PUBLIC_BRIDGE"
+        iniset /$Q_PLUGIN_CONF_FILE ovs bridge_mappings "physnet1:br-$VLAN_INTERFACE,physnet-ex:$PUBLIC_BRIDGE"
         # Set integration bridge to domU's
-        iniset "/$Q_PLUGIN_CONF_FILE.domU" ovs integration_bridge $OVS_BRIDGE
+        iniset /$Q_PLUGIN_CONF_FILE ovs integration_bridge $OVS_BRIDGE
         # Set root wrap
-        iniset "/$Q_PLUGIN_CONF_FILE.domU" agent root_helper "$Q_RR_COMMAND"
+        iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_COMMAND"
     fi
     iniset /$Q_PLUGIN_CONF_FILE agent tunnel_types $Q_TUNNEL_TYPES
     iniset /$Q_PLUGIN_CONF_FILE ovs datapath_type $OVS_DATAPATH_TYPE
diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base
index f6d10ea..62a4d00 100644
--- a/lib/neutron_plugins/ovs_base
+++ b/lib/neutron_plugins/ovs_base
@@ -30,7 +30,7 @@
 
 function _neutron_ovs_base_setup_bridge {
     local bridge=$1
-    neutron-ovs-cleanup
+    neutron-ovs-cleanup --config-file $NEUTRON_CONF
     _neutron_ovs_base_add_bridge $bridge
     sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge
 }
@@ -69,7 +69,11 @@
         restart_service openvswitch
         sudo systemctl enable openvswitch
     elif is_suse; then
-        restart_service openvswitch-switch
+        if [[ $DISTRO == "sle12" ]] && [[ $os_RELEASE -lt 12.2 ]]; then
+            restart_service openvswitch-switch
+        else
+            restart_service openvswitch
+        fi
     fi
 }
 
@@ -83,9 +87,10 @@
 
 function _neutron_ovs_base_configure_firewall_driver {
     if [[ "$Q_USE_SECGROUP" == "True" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
+        iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver iptables_hybrid
+        enable_kernel_bridge_firewall
     else
-        iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver
+        iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver noop
     fi
 }
 
@@ -96,7 +101,7 @@
         iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE
     fi
 
-    neutron-ovs-cleanup
+    neutron-ovs-cleanup --config-file $NEUTRON_CONF
     if [[ "$Q_USE_PUBLIC_VETH" = "True" ]]; then
         ip link show $Q_PUBLIC_VETH_INT > /dev/null 2>&1 ||
         sudo ip link add $Q_PUBLIC_VETH_INT type veth \
diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3
index 14928fb..cd0c1ed 100644
--- a/lib/neutron_plugins/services/l3
+++ b/lib/neutron_plugins/services/l3
@@ -22,6 +22,9 @@
 # used.
 Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE=${Q_ASSIGN_GATEWAY_TO_PUBLIC_BRIDGE:-True}
 
+# The name of the default router
+Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1}
+
 # If Q_USE_PUBLIC_VETH=True, create and use a veth pair instead of
 # PUBLIC_BRIDGE.  This is intended to be used with
 # Q_USE_PROVIDERNET_FOR_PUBLIC=True.
@@ -67,7 +70,10 @@
 IPV6_ADDRESS_MODE=${IPV6_ADDRESS_MODE:-slaac}
 IPV6_PUBLIC_SUBNET_NAME=${IPV6_PUBLIC_SUBNET_NAME:-ipv6-public-subnet}
 IPV6_PRIVATE_SUBNET_NAME=${IPV6_PRIVATE_SUBNET_NAME:-ipv6-private-subnet}
-FIXED_RANGE_V6=${FIXED_RANGE_V6:-fd$IPV6_GLOBAL_ID::/64}
+IPV6_ADDRS_SAFE_TO_USE=${IPV6_ADDRS_SAFE_TO_USE:-fd$IPV6_GLOBAL_ID::/56}
+# if we got larger than a /64 safe to use, we only use the first /64 to
+# avoid side effects outlined in rfc7421
+FIXED_RANGE_V6=${FIXED_RANGE_V6:-$(echo $IPV6_ADDRS_SAFE_TO_USE | awk -F '/' '{ print $1"/"($2>63 ? $2 : 64) }')}
 IPV6_PRIVATE_NETWORK_GATEWAY=${IPV6_PRIVATE_NETWORK_GATEWAY:-}
 IPV6_PUBLIC_RANGE=${IPV6_PUBLIC_RANGE:-2001:db8::/64}
 IPV6_PUBLIC_NETWORK_GATEWAY=${IPV6_PUBLIC_NETWORK_GATEWAY:-2001:db8::2}
@@ -80,12 +86,13 @@
 PUBLIC_SUBNET_NAME=${PUBLIC_SUBNET_NAME:-"public-subnet"}
 
 # Subnetpool defaults
+USE_SUBNETPOOL=${USE_SUBNETPOOL:-True}
 SUBNETPOOL_NAME=${SUBNETPOOL_NAME:-"shared-default-subnetpool"}
 
-SUBNETPOOL_PREFIX_V4=${SUBNETPOOL_PREFIX_V4:-10.0.0.0/8}
-SUBNETPOOL_PREFIX_V6=${SUBNETPOOL_PREFIX_V6:-2001:db8:8000::/48}
+SUBNETPOOL_PREFIX_V4=${SUBNETPOOL_PREFIX_V4:-$IPV4_ADDRS_SAFE_TO_USE}
+SUBNETPOOL_PREFIX_V6=${SUBNETPOOL_PREFIX_V6:-$IPV6_ADDRS_SAFE_TO_USE}
 
-SUBNETPOOL_SIZE_V4=${SUBNETPOOL_SIZE_V4:-24}
+SUBNETPOOL_SIZE_V4=${SUBNETPOOL_SIZE_V4:-26}
 SUBNETPOOL_SIZE_V6=${SUBNETPOOL_SIZE_V6:-64}
 
 default_v4_route_devs=$(ip -4 route | grep ^default | awk '{print $5}')
@@ -150,14 +157,6 @@
 }
 
 function create_neutron_initial_network {
-    if ! is_service_enabled q-svc && ! is_service_enabled neutron-api; then
-        echo "Controller services not enabled. No networks configured!"
-        return
-    fi
-    if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "False" ]]; then
-        echo "Network creation disabled!"
-        return
-    fi
     local project_id
     project_id=$(openstack project list | grep " demo " | get_field 1)
     die_if_not_set $LINENO project_id "Failure retrieving project_id for demo"
@@ -168,25 +167,27 @@
     fi
 
     if is_networking_extension_supported "auto-allocated-topology"; then
-        if [[ "$IP_VERSION" =~ 4.* ]]; then
-            SUBNETPOOL_V4_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnetpool-create $SUBNETPOOL_NAME --default-prefixlen $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --shared --is-default=True | grep ' id ' | get_field 2)
-        fi
-        if [[ "$IP_VERSION" =~ .*6 ]]; then
-            SUBNETPOOL_V6_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnetpool-create $SUBNETPOOL_NAME --default-prefixlen $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --shared --is-default=True | grep ' id ' | get_field 2)
+        if [[ "$USE_SUBNETPOOL" == "True" ]]; then
+            if [[ "$IP_VERSION" =~ 4.* ]]; then
+                SUBNETPOOL_V4_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME --default-prefix-length $SUBNETPOOL_SIZE_V4 --pool-prefix $SUBNETPOOL_PREFIX_V4 --share --default | grep ' id ' | get_field 2)
+            fi
+            if [[ "$IP_VERSION" =~ .*6 ]]; then
+                SUBNETPOOL_V6_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet pool create $SUBNETPOOL_NAME --default-prefix-length $SUBNETPOOL_SIZE_V6 --pool-prefix $SUBNETPOOL_PREFIX_V6 --share --default | grep ' id ' | get_field 2)
+            fi
         fi
     fi
 
     if is_provider_network; then
         die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK"
         die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specify the PROVIDER_NETWORK_TYPE"
-        NET_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" net-create $PHYSICAL_NETWORK --tenant_id $project_id --provider:network_type $PROVIDER_NETWORK_TYPE --provider:physical_network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider:segmentation_id $SEGMENTATION_ID} --shared | grep ' id ' | get_field 2)
+        NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --project $project_id --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share | grep ' id ' | get_field 2)
         die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $project_id"
 
         if [[ "$IP_VERSION" =~ 4.* ]]; then
             if [ -z $SUBNETPOOL_V4_ID ]; then
                 fixed_range_v4=$FIXED_RANGE
             fi
-            SUBNET_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create --tenant_id $project_id --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --name $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnetpool $SUBNETPOOL_V4_ID} $NET_ID $fixed_range_v4 | grep ' id ' | get_field 2)
+            SUBNET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID --subnet-range $fixed_range_v4 | grep ' id ' | get_field 2)
             die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME $project_id"
         fi
 
@@ -196,7 +197,7 @@
             if [ -z $SUBNETPOOL_V6_ID ]; then
                 fixed_range_v6=$IPV6_PROVIDER_FIXED_RANGE
             fi
-            SUBNET_V6_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create --tenant_id $project_id --ip_version 6 --ipv6-address-mode $IPV6_ADDRESS_MODE --gateway $IPV6_PROVIDER_NETWORK_GATEWAY --name $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnetpool $SUBNETPOOL_V6_ID} $NET_ID $fixed_range_v6 | grep 'id' | get_field 2)
+            SUBNET_V6_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create --project $project_id --ip-version 6 --ipv6-address-mode $IPV6_ADDRESS_MODE --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID $fixed_range_v6 | grep 'id' | get_field 2)
             die_if_not_set $LINENO SUBNET_V6_ID "Failure creating SUBNET_V6_ID for $IPV6_PROVIDER_SUBNET_NAME $project_id"
         fi
 
@@ -206,7 +207,7 @@
             sudo ip link set $PUBLIC_INTERFACE up
         fi
     else
-        NET_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" net-create --tenant-id $project_id "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2)
+        NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create --project $project_id "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2)
         die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME $project_id"
 
         if [[ "$IP_VERSION" =~ 4.* ]]; then
@@ -224,23 +225,23 @@
         # Create a router, and add the private subnet as one of its interfaces
         if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then
             # create a tenant-owned router.
-            ROUTER_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" router-create --tenant-id $project_id $Q_ROUTER_NAME | grep ' id ' | get_field 2)
+            ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create --project $project_id $Q_ROUTER_NAME | grep ' id ' | get_field 2)
             die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $project_id $Q_ROUTER_NAME"
         else
             # Plugin only supports creating a single router, which should be admin owned.
-            ROUTER_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" router-create $Q_ROUTER_NAME | grep ' id ' | get_field 2)
+            ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create $Q_ROUTER_NAME | grep ' id ' | get_field 2)
             die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $Q_ROUTER_NAME"
         fi
 
-        EXTERNAL_NETWORK_FLAGS="--router:external"
+        EXTERNAL_NETWORK_FLAGS="--external"
         if is_networking_extension_supported "auto-allocated-topology"; then
-            EXTERNAL_NETWORK_FLAGS="$EXTERNAL_NETWORK_FLAGS --is-default"
+            EXTERNAL_NETWORK_FLAGS="$EXTERNAL_NETWORK_FLAGS --default"
         fi
         # Create an external network, and a subnet. Configure the external network as router gw
         if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then
-            EXT_NET_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" net-create "$PUBLIC_NETWORK_NAME" -- $EXTERNAL_NETWORK_FLAGS --provider:network_type=flat --provider:physical_network=${PUBLIC_PHYSICAL_NETWORK} | grep ' id ' | get_field 2)
+            EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type flat --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} | grep ' id ' | get_field 2)
         else
-            EXT_NET_ID=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" net-create "$PUBLIC_NETWORK_NAME" -- $EXTERNAL_NETWORK_FLAGS | grep ' id ' | get_field 2)
+            EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS | grep ' id ' | get_field 2)
         fi
         die_if_not_set $LINENO EXT_NET_ID "Failure creating EXT_NET_ID for $PUBLIC_NETWORK_NAME"
 
@@ -262,16 +263,16 @@
     if [ -z $SUBNETPOOL_V4_ID ]; then
         fixed_range_v4=$FIXED_RANGE
     fi
-    local subnet_params="--tenant-id $project_id "
-    subnet_params+="--ip_version 4 "
+    local subnet_params="--project $project_id "
+    subnet_params+="--ip-version 4 "
     if [[ -n "$NETWORK_GATEWAY" ]]; then
         subnet_params+="--gateway $NETWORK_GATEWAY "
     fi
-    subnet_params+="--name $PRIVATE_SUBNET_NAME "
-    subnet_params+="${SUBNETPOOL_V4_ID:+--subnetpool $SUBNETPOOL_V4_ID} "
-    subnet_params+="$NET_ID $fixed_range_v4"
+    subnet_params+="${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} "
+    subnet_params+="${fixed_range_v4:+--subnet-range $fixed_range_v4} "
+    subnet_params+="--network $NET_ID $PRIVATE_SUBNET_NAME"
     local subnet_id
-    subnet_id=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create $subnet_params | grep ' id ' | get_field 2)
+    subnet_id=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2)
     die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet for $project_id"
     echo $subnet_id
 }
@@ -285,52 +286,50 @@
     if [ -z $SUBNETPOOL_V6_ID ]; then
         fixed_range_v6=$FIXED_RANGE_V6
     fi
-    local subnet_params="--tenant-id $project_id "
-    subnet_params+="--ip_version 6 "
+    local subnet_params="--project $project_id "
+    subnet_params+="--ip-version 6 "
     if [[ -n "$IPV6_PRIVATE_NETWORK_GATEWAY" ]]; then
         subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY "
     fi
-    subnet_params+="--name $IPV6_PRIVATE_SUBNET_NAME "
-    subnet_params+="${SUBNETPOOL_V6_ID:+--subnetpool $SUBNETPOOL_V6_ID} "
-    subnet_params+="$NET_ID $fixed_range_v6 $ipv6_modes"
+    subnet_params+="${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} "
+    subnet_params+="${fixed_range_v6:+--subnet-range $fixed_range_v6 $ipv6_modes} "
+    subnet_params+="--network $NET_ID $IPV6_PRIVATE_SUBNET_NAME "
     local ipv6_subnet_id
-    ipv6_subnet_id=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create $subnet_params | grep ' id ' | get_field 2)
+    ipv6_subnet_id=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2)
     die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet for $project_id"
     echo $ipv6_subnet_id
 }
 
 # Create public IPv4 subnet
 function _neutron_create_public_subnet_v4 {
-    local subnet_params="--ip_version 4 "
+    local subnet_params="--ip-version 4 "
     subnet_params+="${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} "
     if [[ -n "$PUBLIC_NETWORK_GATEWAY" ]]; then
         subnet_params+="--gateway $PUBLIC_NETWORK_GATEWAY "
     fi
-    subnet_params+="--name $PUBLIC_SUBNET_NAME "
-    subnet_params+="$EXT_NET_ID $FLOATING_RANGE "
-    subnet_params+="-- --enable_dhcp=False"
+    subnet_params+="--network $EXT_NET_ID --subnet-range $FLOATING_RANGE --no-dhcp "
+    subnet_params+="$PUBLIC_SUBNET_NAME"
     local id_and_ext_gw_ip
-    id_and_ext_gw_ip=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ')
+    id_and_ext_gw_ip=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ')
     die_if_not_set $LINENO id_and_ext_gw_ip "Failure creating public IPv4 subnet"
     echo $id_and_ext_gw_ip
 }
 
 # Create public IPv6 subnet
 function _neutron_create_public_subnet_v6 {
-    local subnet_params="--ip_version 6 "
+    local subnet_params="--ip-version 6 "
     subnet_params+="--gateway $IPV6_PUBLIC_NETWORK_GATEWAY "
-    subnet_params+="--name $IPV6_PUBLIC_SUBNET_NAME "
-    subnet_params+="$EXT_NET_ID $IPV6_PUBLIC_RANGE "
-    subnet_params+="-- --enable_dhcp=False"
+    subnet_params+="--network $EXT_NET_ID --subnet-range $IPV6_PUBLIC_RANGE --no-dhcp "
+    subnet_params+="$IPV6_PUBLIC_SUBNET_NAME"
     local ipv6_id_and_ext_gw_ip
-    ipv6_id_and_ext_gw_ip=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ')
+    ipv6_id_and_ext_gw_ip=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" subnet create $subnet_params | grep -e 'gateway_ip' -e ' id ')
     die_if_not_set $LINENO ipv6_id_and_ext_gw_ip "Failure creating an IPv6 public subnet"
     echo $ipv6_id_and_ext_gw_ip
 }
 
 # Configure neutron router for IPv4 public access
 function _neutron_configure_router_v4 {
-    neutron --os-cloud devstack-admin --os-region "$REGION_NAME" router-interface-add $ROUTER_ID $SUBNET_ID
+    openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router add subnet $ROUTER_ID $SUBNET_ID
     # Create a public subnet on the external network
     local id_and_ext_gw_ip
     id_and_ext_gw_ip=$(_neutron_create_public_subnet_v4 $EXT_NET_ID)
@@ -338,7 +337,7 @@
     ext_gw_ip=$(echo $id_and_ext_gw_ip  | get_field 2)
     PUB_SUBNET_ID=$(echo $id_and_ext_gw_ip | get_field 5)
     # Configure the external network as the default router gateway
-    neutron --os-cloud devstack-admin --os-region "$REGION_NAME" router-gateway-set $ROUTER_ID $EXT_NET_ID
+    openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID
 
     # This logic is specific to using the l3-agent for layer 3
     if is_service_enabled q-l3 || is_service_enabled neutron-l3;  then
@@ -365,13 +364,8 @@
                 sudo ip addr add $ext_gw_ip/$cidr_len dev $ext_gw_interface
                 sudo ip link set $ext_gw_interface up
             fi
-            ROUTER_GW_IP=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F'ip_address'  '{ print $2 }' | cut -f3 -d\" | tr '\n' ' ')
+            ROUTER_GW_IP=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' --device-owner network:router_gateway | awk -F'ip_address'  '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ')
             die_if_not_set $LINENO ROUTER_GW_IP "Failure retrieving ROUTER_GW_IP"
-            local replace_range=${SUBNETPOOL_PREFIX_V4}
-            if [[ -z "${SUBNETPOOL_V4_ID}" ]]; then
-                replace_range=${FIXED_RANGE}
-            fi
-            sudo ip route replace $replace_range via $ROUTER_GW_IP
         fi
         _neutron_set_router_id
     fi
@@ -379,7 +373,7 @@
 
 # Configure neutron router for IPv6 public access
 function _neutron_configure_router_v6 {
-    neutron --os-cloud devstack-admin --os-region "$REGION_NAME" router-interface-add $ROUTER_ID $IPV6_SUBNET_ID
+    openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router add subnet $ROUTER_ID $IPV6_SUBNET_ID
     # Create a public subnet on the external network
     local ipv6_id_and_ext_gw_ip
     ipv6_id_and_ext_gw_ip=$(_neutron_create_public_subnet_v6 $EXT_NET_ID)
@@ -391,7 +385,7 @@
     # If the external network has not already been set as the default router
     # gateway when configuring an IPv4 public subnet, do so now
     if [[ "$IP_VERSION" == "6" ]]; then
-        neutron --os-cloud devstack-admin --os-region "$REGION_NAME" router-gateway-set $ROUTER_ID $EXT_NET_ID
+        openstack --os-cloud devstack-admin --os-region "$REGION_NAME" set --external-gateway $EXT_NET_ID $ROUTER_ID
     fi
 
     # This logic is specific to using the l3-agent for layer 3
@@ -401,13 +395,16 @@
         # IPv6-only clouds in the gate. Please do not remove this without
         # talking to folks in Infra.
         for d in $default_v6_route_devs; do
-            sudo sysctl -w net.ipv6.conf.$d.accept_ra=2
+            # Slashes must be used in this sysctl command because route devices
+            # can have dots in their names. If dots were used, dots in the
+            # device name would be reinterpreted as a slash, causing an error.
+            sudo sysctl -w net/ipv6/conf/$d/accept_ra=2
         done
         # Ensure IPv6 forwarding is enabled on the host
         sudo sysctl -w net.ipv6.conf.all.forwarding=1
         # Configure and enable public bridge
         # Override global IPV6_ROUTER_GW_IP with the true value from neutron
-        IPV6_ROUTER_GW_IP=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" port-list -c fixed_ips | grep $ipv6_pub_subnet_id | awk -F'ip_address' '{ print $2 }' | cut -f3 -d\" | tr '\n' ' ')
+        IPV6_ROUTER_GW_IP=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' | grep $ipv6_pub_subnet_id | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ')
         die_if_not_set $LINENO IPV6_ROUTER_GW_IP "Failure retrieving IPV6_ROUTER_GW_IP"
 
         if is_neutron_ovs_base_plugin; then
@@ -427,16 +424,9 @@
     fi
 }
 
-function is_provider_network {
-    if [ "$Q_USE_PROVIDER_NETWORKING" == "True" ]; then
-        return 0
-    fi
-    return 1
-}
-
 function is_networking_extension_supported {
     local extension=$1
     # TODO(sc68cal) cache this instead of calling every time
-    EXT_LIST=$(neutron --os-cloud devstack-admin --os-region "$REGION_NAME" ext-list -c alias -f value)
+    EXT_LIST=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" extension list --network -c Alias -f value)
     [[ $EXT_LIST =~ $extension ]] && return 0
 }
diff --git a/lib/nova b/lib/nova
index 334cba6..d5db5ea 100644
--- a/lib/nova
+++ b/lib/nova
@@ -85,9 +85,6 @@
 # NOTE: Set ``FORCE_CONFIG_DRIVE="False"`` to turn OFF config drive
 FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"False"}
 
-# Option to initialize CellsV2 environment
-NOVA_CONFIGURE_CELLSV2=$(trueorfalse False NOVA_CONFIGURE_CELLSV2)
-
 # Nova supports pluggable schedulers.  The default ``FilterScheduler``
 # should work in most cases.
 SCHEDULER=${SCHEDULER:-filter_scheduler}
@@ -302,8 +299,6 @@
     # Put config files in ``/etc/nova`` for everyone to find
     sudo install -d -o $STACK_USER $NOVA_CONF_DIR
 
-    install_default_policy nova
-
     configure_rootwrap nova
 
     if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then
@@ -683,9 +678,7 @@
     if is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-api; then
         # (Re)create nova databases
         recreate_database nova
-        if [ "$NOVA_CONFIGURE_CELLSV2" != "False" ]; then
-            recreate_database nova_api_cell0
-        fi
+        recreate_database nova_api_cell0
 
         # Migrate nova database. If "nova-manage cell_v2 simple_cell_setup" has
         # been run this migrates the "nova" and "nova_api_cell0" database.
@@ -829,6 +822,8 @@
         run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" $LIBVIRT_GROUP
     elif [[ "$VIRT_DRIVER" = 'lxd' ]]; then
         run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" $LXD_GROUP
+    elif [[ "$VIRT_DRIVER" = 'docker' ]]; then
+        run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" $DOCKER_GROUP
     elif [[ "$VIRT_DRIVER" = 'fake' ]]; then
         local i
         for i in `seq 1 $NUMBER_FAKE_NOVA_COMPUTE`; do
@@ -866,9 +861,13 @@
     run_process n-cond "$NOVA_BIN_DIR/nova-conductor --config-file $compute_cell_conf"
     run_process n-cell-region "$NOVA_BIN_DIR/nova-cells --config-file $api_cell_conf"
     run_process n-cell-child "$NOVA_BIN_DIR/nova-cells --config-file $compute_cell_conf"
-
     run_process n-crt "$NOVA_BIN_DIR/nova-cert --config-file $api_cell_conf"
+
+    if is_service_enabled n-net; then
+        enable_kernel_bridge_firewall
+    fi
     run_process n-net "$NOVA_BIN_DIR/nova-network --config-file $compute_cell_conf"
+
     run_process n-sch "$NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf"
     run_process n-api-meta "$NOVA_BIN_DIR/nova-api-metadata --config-file $compute_cell_conf"
 
diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt
index 6b7c7c2..5e7695a 100644
--- a/lib/nova_plugins/functions-libvirt
+++ b/lib/nova_plugins/functions-libvirt
@@ -23,12 +23,7 @@
 # Installs required distro-specific libvirt packages.
 function install_libvirt {
     if is_ubuntu; then
-        if is_arch "aarch64" && [[ ${DISTRO} == "trusty" ]]; then
-            install_package qemu-system
-        else
-            install_package qemu-kvm
-            install_package libguestfs0
-        fi
+        install_package qemu-system
         install_package libvirt-bin libvirt-dev
         pip_install_gr libvirt-python
         if [[ "$EBTABLES_RACE_FIX" == "True" ]]; then
diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic
index c40427c..7ffd14d 100644
--- a/lib/nova_plugins/hypervisor-ironic
+++ b/lib/nova_plugins/hypervisor-ironic
@@ -45,11 +45,13 @@
     iniset $NOVA_CONF DEFAULT ram_allocation_ratio 1.0
     iniset $NOVA_CONF DEFAULT reserved_host_memory_mb 0
     # ironic section
-    iniset $NOVA_CONF ironic admin_username admin
-    iniset $NOVA_CONF ironic admin_password $ADMIN_PASSWORD
-    iniset $NOVA_CONF ironic admin_url $KEYSTONE_AUTH_URI/v2.0
-    iniset $NOVA_CONF ironic admin_tenant_name demo
-    iniset $NOVA_CONF ironic api_endpoint $IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT/v1
+    iniset $NOVA_CONF ironic auth_type password
+    iniset $NOVA_CONF ironic username admin
+    iniset $NOVA_CONF ironic password $ADMIN_PASSWORD
+    iniset $NOVA_CONF ironic auth_url $KEYSTONE_AUTH_URI/v3
+    iniset $NOVA_CONF ironic project_domain_id default
+    iniset $NOVA_CONF ironic user_domain_id default
+    iniset $NOVA_CONF ironic project_name demo
 }
 
 # install_nova_hypervisor() - Install external components
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index b4eb3c1..f3c8add 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -40,7 +40,8 @@
     configure_libvirt
     iniset $NOVA_CONF libvirt virt_type "$LIBVIRT_TYPE"
     iniset $NOVA_CONF libvirt cpu_mode "none"
-    iniset $NOVA_CONF libvirt use_usb_tablet "False"
+    # Do not enable USB tablet input devices to avoid QEMU CPU overhead.
+    iniset $NOVA_CONF DEFAULT pointer_model "ps2mouse"
     iniset $NOVA_CONF libvirt live_migration_uri "qemu+ssh://$STACK_USER@%s/system"
     iniset $NOVA_CONF DEFAULT default_ephemeral_format "ext4"
     iniset $NOVA_CONF DEFAULT compute_driver "libvirt.LibvirtDriver"
@@ -104,6 +105,16 @@
     if [[ "$ENABLE_FILE_INJECTION" == "True" ]] ; then
         if is_ubuntu; then
             install_package python-guestfs
+            # NOTE(andreaf) Ubuntu kernel can only be read by root, which breaks libguestfs:
+            # https://bugs.launchpad.net/ubuntu/+source/linux/+bug/759725)
+            INSTALLED_KERNELS="$(ls /boot/vmlinuz-*)"
+            for kernel in $INSTALLED_KERNELS; do
+                STAT_OVERRIDE="root root 644 ${kernel}"
+                # unstack won't remove the statoverride, so make this idempotent
+                if [[ ! $(dpkg-statoverride --list | grep "$STAT_OVERRIDE") ]]; then
+                    sudo dpkg-statoverride --add --update $STAT_OVERRIDE
+                fi
+            done
         elif is_fedora || is_suse; then
             install_package python-libguestfs
         fi
diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver
index e5d25da..b053856 100644
--- a/lib/nova_plugins/hypervisor-xenserver
+++ b/lib/nova_plugins/hypervisor-xenserver
@@ -90,6 +90,19 @@
         echo "install_conntrack_tools"
     } | $ssh_dom0
 
+    if is_service_enabled neutron; then
+        # Remove restriction on linux bridge in Dom0 when neutron is enabled
+        $ssh_dom0 "rm -f /etc/modprobe.d/blacklist-bridge*"
+
+        count=`$ssh_dom0 "iptables -t filter -L XenServerDevstack |wc -l"`
+        if [ "$count" = "0" ]; then
+        {
+            echo "iptables -t filter --new XenServerDevstack"
+            echo "iptables -t filter -I INPUT -j XenServerDevstack"
+            echo "iptables -t filter -I XenServerDevstack -p tcp --dport 6640 -j ACCEPT"
+        } | $ssh_dom0
+        fi
+    fi
 }
 
 # install_nova_hypervisor() - Install external components
diff --git a/lib/placement b/lib/placement
index 165c670..93b72eb 100644
--- a/lib/placement
+++ b/lib/placement
@@ -47,7 +47,6 @@
 # Public facing bits
 PLACEMENT_SERVICE_PROTOCOL=${PLACEMENT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
 PLACEMENT_SERVICE_HOST=${PLACEMENT_SERVICE_HOST:-$SERVICE_HOST}
-PLACEMENT_SERVICE_PORT=${PLACEMENT_SERVICE_PORT:-8778}
 
 # Functions
 # ---------
@@ -55,7 +54,7 @@
 # Test if any placement services are enabled
 # is_placement_enabled
 function is_placement_enabled {
-    [[ ,${ENABLED_SERVICES} =~ ,"placement-" ]] && return 0
+    [[ ,${ENABLED_SERVICES} =~ ,"placement-api" ]] && return 0
     return 1
 }
 
@@ -68,7 +67,6 @@
 # _config_placement_apache_wsgi() - Set WSGI config files
 function _config_placement_apache_wsgi {
     local placement_api_apache_conf
-    local placement_api_port=$PLACEMENT_SERVICE_PORT
     local venv_path=""
     local nova_bin_dir=""
     nova_bin_dir=$(get_python_exec_prefix)
@@ -89,7 +87,6 @@
 
     sudo cp $FILES/apache-placement-api.template $placement_api_apache_conf
     sudo sed -e "
-        s|%PUBLICPORT%|$placement_api_port|g;
         s|%APACHE_NAME%|$APACHE_NAME|g;
         s|%PUBLICWSGI%|$nova_bin_dir/nova-placement-api|g;
         s|%SSLENGINE%|$placement_ssl|g;
@@ -101,12 +98,7 @@
     " -i $placement_api_apache_conf
 }
 
-# configure_placement() - Set config files, create data dirs, etc
-function configure_placement {
-    if [ "$PLACEMENT_DB_ENABLED" != False ]; then
-        iniset $PLACEMENT_CONF placement_database connection `database_connection_url placement`
-    fi
-
+function configure_placement_nova_compute {
     iniset $NOVA_CONF placement auth_type "password"
     iniset $NOVA_CONF placement auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v3"
     iniset $NOVA_CONF placement username placement
@@ -121,7 +113,13 @@
     # established by the nova api. This avoids, for the time, being,
     # creating redundant configuration items that are just used for
     # testing.
+}
 
+# configure_placement() - Set config files, create data dirs, etc
+function configure_placement {
+    if [ "$PLACEMENT_DB_ENABLED" != False ]; then
+        iniset $PLACEMENT_CONF placement_database connection `database_connection_url placement`
+    fi
     _config_placement_apache_wsgi
 }
 
@@ -160,10 +158,6 @@
 
 # start_placement_api() - Start the API processes ahead of other things
 function start_placement_api {
-    # Get right service port for testing
-    local service_port=$PLACEMENT_SERVICE_PORT
-    local placement_api_port=$PLACEMENT_SERVICE_PORT
-
     enable_apache_site placement-api
     restart_apache_server
     tail_log placement-api /var/log/$APACHE_NAME/placement-api.log
diff --git a/lib/swift b/lib/swift
index f9ea028..b175f2e 100644
--- a/lib/swift
+++ b/lib/swift
@@ -397,6 +397,9 @@
     iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server node_timeout 120
     iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server conn_timeout 20
 
+    # Versioned Writes
+    iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:versioned_writes allow_versioned_writes true
+
     # Configure Ceilometer
     if is_service_enabled ceilometer; then
         iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer "set log_level" "WARN"
@@ -489,8 +492,6 @@
         generate_swift_config_services ${swift_node_config} ${node_number} $(( CONTAINER_PORT_BASE + 10 * (node_number - 1) )) container
         iniuncomment ${swift_node_config} DEFAULT bind_ip
         iniset ${swift_node_config} DEFAULT bind_ip ${SWIFT_SERVICE_LISTEN_ADDRESS}
-        iniuncomment ${swift_node_config} app:container-server allow_versions
-        iniset ${swift_node_config} app:container-server allow_versions  "true"
 
         swift_node_config=${SWIFT_CONF_DIR}/account-server/${node_number}.conf
         cp ${SWIFT_DIR}/etc/account-server.conf-sample ${swift_node_config}
diff --git a/lib/tempest b/lib/tempest
index b491bf8..6dc83b5 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -15,7 +15,6 @@
 #   - ``SERVICE_HOST``
 #   - ``BASE_SQL_CONN`` ``lib/database`` declares
 #   - ``PUBLIC_NETWORK_NAME``
-#   - ``Q_ROUTER_NAME``
 #   - ``VIRT_DRIVER``
 #   - ``LIBVIRT_TYPE``
 #   - ``KEYSTONE_SERVICE_PROTOCOL``, ``KEYSTONE_SERVICE_HOST`` from lib/keystone
@@ -194,11 +193,11 @@
         available_flavors=$(nova flavor-list)
         if  [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then
             if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then
-                nova flavor-create m1.nano 42 64 0 1
+                openstack flavor create --id 42 --ram 64 --disk 0 --vcpus 1 m1.nano
             fi
             flavor_ref=42
             if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then
-                nova flavor-create m1.micro 84 128 0 1
+                openstack flavor create --id 84 --ram 128 --disk 0 --vcpus 1 m1.micro
             fi
             flavor_ref_alt=84
         else
@@ -243,8 +242,7 @@
     # the public network (for floating ip access) is only available
     # if the extension is enabled.
     if is_networking_extension_supported 'external-net'; then
-        public_network_id=$(neutron net-list | grep $PUBLIC_NETWORK_NAME | \
-            awk '{print $2}')
+        public_network_id=$(openstack network show -f value -c id $PUBLIC_NETWORK_NAME)
     fi
 
     iniset $TEMPEST_CONFIG DEFAULT use_syslog $SYSLOG
@@ -268,17 +266,18 @@
     if [[ "$TEMPEST_HAS_ADMIN" == "True" ]]; then
         iniset $TEMPEST_CONFIG auth admin_username $admin_username
         iniset $TEMPEST_CONFIG auth admin_password "$password"
-        iniset $TEMPEST_CONFIG auth admin_tenant_name $admin_project_name
-        iniset $TEMPEST_CONFIG auth admin_tenant_id $admin_project_id
+        iniset $TEMPEST_CONFIG auth admin_project_name $admin_project_name
         iniset $TEMPEST_CONFIG auth admin_domain_name $admin_domain_name
     fi
-    if [ "$ENABLE_IDENTITY_V2" == "False" ]; then
-        # Only Identity v3 is available; then skip Identity API v2 tests
-        iniset $TEMPEST_CONFIG identity-feature-enabled api_v2 False
-        # In addition, use v3 auth tokens for running all Tempest tests
-        iniset $TEMPEST_CONFIG identity auth_version v3
-    else
+    if [ "$ENABLE_IDENTITY_V2" == "True" ]; then
+        # Run Identity API v2 tests ONLY if needed
+        iniset $TEMPEST_CONFIG identity-feature-enabled api_v2 True
         iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v2}
+    else
+        # Skip Identity API v2 tests by default
+        iniset $TEMPEST_CONFIG identity-feature-enabled api_v2 False
+        # Use v3 auth tokens for running all Tempest tests
+        iniset $TEMPEST_CONFIG identity auth_version v3
     fi
 
     if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then
@@ -297,7 +296,6 @@
     fi
     if [ "$VIRT_DRIVER" = "xenserver" ]; then
         iniset $TEMPEST_CONFIG image disk_formats "ami,ari,aki,vhd,raw,iso"
-        iniset $TEMPEST_CONFIG scenario img_disk_format vhd
     fi
 
     # Image Features
@@ -307,17 +305,12 @@
     fi
 
     # Compute
-    iniset $TEMPEST_CONFIG compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED
     iniset $TEMPEST_CONFIG compute image_ref $image_uuid
     iniset $TEMPEST_CONFIG compute image_ref_alt $image_uuid_alt
-    iniset $TEMPEST_CONFIG compute image_alt_ssh_user ${ALT_INSTANCE_USER:-cirros}
     iniset $TEMPEST_CONFIG compute flavor_ref $flavor_ref
     iniset $TEMPEST_CONFIG compute flavor_ref_alt $flavor_ref_alt
-    iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method
-    # set the equiv validation option here as well to ensure they are
-    # in sync. They shouldn't be separate options.
     iniset $TEMPEST_CONFIG validation connect_method $ssh_connect_method
-    if [[ ! $(is_service_enabled n-cell) && ! $(is_service_enabled neutron) ]]; then
+    if ! is_service_enabled n-cell && ! is_service_enabled neutron; then
         iniset $TEMPEST_CONFIG compute fixed_network_name $PRIVATE_NETWORK_NAME
     fi
 
@@ -388,14 +381,10 @@
     iniset $TEMPEST_CONFIG network default_network "$FIXED_RANGE"
     iniset $TEMPEST_CONFIG network-feature-enabled ipv6 "$IPV6_ENABLED"
     iniset $TEMPEST_CONFIG network-feature-enabled ipv6_subnet_attributes "$IPV6_SUBNET_ATTRIBUTES_ENABLED"
+    iniset $TEMPEST_CONFIG network-feature-enabled port_security $NEUTRON_PORT_SECURITY
 
     # Orchestration Tests
     if is_service_enabled heat; then
-        # Though this is not needed by heat, some tempest tests explicitly
-        # try to set this role. Removing them from the tempest tests breaks
-        # some non-devstack CIs.
-        get_or_create_role "heat_stack_owner"
-
         if [[ ! -z "$HEAT_CFN_IMAGE_URL" ]]; then
             iniset $TEMPEST_CONFIG orchestration image_ref $(basename "${HEAT_CFN_IMAGE_URL%.*}")
         fi
@@ -404,31 +393,41 @@
             # build a specialized heat flavor
             available_flavors=$(nova flavor-list)
             if [[ ! ( $available_flavors =~ 'm1.heat' ) ]]; then
-                nova flavor-create m1.heat 451 512 0 1
+                openstack flavor create --id 451 --ram 512 --disk 0 --vcpus 1 m1.heat
             fi
             iniset $TEMPEST_CONFIG orchestration instance_type "m1.heat"
         fi
         iniset $TEMPEST_CONFIG orchestration build_timeout 900
-        iniset $TEMPEST_CONFIG orchestration stack_owner_role "heat_stack_owner"
+        iniset $TEMPEST_CONFIG orchestration stack_owner_role Member
     fi
 
     # Scenario
-    SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES/images/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec}
+    if [ "$VIRT_DRIVER" = "xenserver" ]; then
+        SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES}
+        SCENARIO_IMAGE_FILE="cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.vhd.tgz"
+        iniset $TEMPEST_CONFIG scenario img_disk_format vhd
+        iniset $TEMPEST_CONFIG scenario img_container_format ovf
+    else
+        SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES/images/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec}
+        SCENARIO_IMAGE_FILE="cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img"
+    fi
     iniset $TEMPEST_CONFIG scenario img_dir $SCENARIO_IMAGE_DIR
+    iniset $TEMPEST_CONFIG scenario img_file $SCENARIO_IMAGE_FILE
     iniset $TEMPEST_CONFIG scenario ami_img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-blank.img"
     iniset $TEMPEST_CONFIG scenario ari_img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-initrd"
     iniset $TEMPEST_CONFIG scenario aki_img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-vmlinuz"
-    iniset $TEMPEST_CONFIG scenario img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img"
 
-    # Telemetry
-    iniset $TEMPEST_CONFIG telemetry-feature-enabled events "True"
-
+    # If using provider networking, use the physical network for validation rather than private
+    TEMPEST_SSH_NETWORK_NAME=$PRIVATE_NETWORK_NAME
+    if is_provider_network; then
+        TEMPEST_SSH_NETWORK_NAME=$PHYSICAL_NETWORK
+    fi
     # Validation
     iniset $TEMPEST_CONFIG validation run_validation ${TEMPEST_RUN_VALIDATION:-False}
     iniset $TEMPEST_CONFIG validation ip_version_for_ssh 4
     iniset $TEMPEST_CONFIG validation ssh_timeout $BUILD_TIMEOUT
     iniset $TEMPEST_CONFIG validation image_ssh_user ${DEFAULT_INSTANCE_USER:-cirros}
-    iniset $TEMPEST_CONFIG validation network_for_ssh $PRIVATE_NETWORK_NAME
+    iniset $TEMPEST_CONFIG validation network_for_ssh $TEMPEST_SSH_NETWORK_NAME
 
     # Volume
     # TODO(obutenko): Remove snapshot_backup when liberty-eol happens.
diff --git a/lib/tls b/lib/tls
index 2443d7d..57b5e52 100644
--- a/lib/tls
+++ b/lib/tls
@@ -201,7 +201,6 @@
 # Create root and intermediate CAs
 # init_CA
 function init_CA {
-    fix_system_ca_bundle_path
     # Ensure CAs are built
     make_root_CA $ROOT_CA_DIR
     make_int_CA $INT_CA_DIR $ROOT_CA_DIR
@@ -226,7 +225,7 @@
     if [[ ! -r $DEVSTACK_CERT ]]; then
         if [[ -n "$TLS_IP" ]]; then
             # Lie to let incomplete match routines work
-            TLS_IP="DNS:$TLS_IP"
+            TLS_IP="DNS:$TLS_IP,IP:$TLS_IP"
         fi
         make_cert $INT_CA_DIR $DEVSTACK_CERT_NAME $DEVSTACK_HOSTNAME "$TLS_IP"
 
@@ -249,6 +248,9 @@
         else
             alt_names="$alt_names,DNS:$SERVICE_HOST"
         fi
+        if is_ipv4_address "$SERVICE_HOST" ; then
+            alt_names="$alt_names,IP:$SERVICE_HOST"
+        fi
     fi
 
     # Only generate the certificate if it doesn't exist yet on the disk
@@ -440,6 +442,52 @@
 # Proxy Functions
 # ===============
 
+function tune_apache_connections {
+    local tuning_file=$APACHE_SETTINGS_DIR/connection-tuning.conf
+    if ! [ -f $tuning_file ] ; then
+        sudo bash -c "cat > $tuning_file" << EOF
+# worker MPM
+# StartServers: initial number of server processes to start
+# MinSpareThreads: minimum number of worker threads which are kept spare
+# MaxSpareThreads: maximum number of worker threads which are kept spare
+# ThreadLimit: ThreadsPerChild can be changed to this maximum value during a
+#              graceful restart. ThreadLimit can only be changed by stopping
+#              and starting Apache.
+# ThreadsPerChild: constant number of worker threads in each server process
+# MaxClients: maximum number of simultaneous client connections
+# MaxRequestsPerChild: maximum number of requests a server process serves
+#
+# The apache defaults are too conservative if we want reliable tempest
+# testing. Bump these values up from ~400 max clients to 1024 max clients.
+<IfModule mpm_worker_module>
+# Note that the next three conf values must be changed together.
+# MaxClients = ServerLimit * ThreadsPerChild
+ServerLimit          32
+ThreadsPerChild      32
+MaxClients         1024
+StartServers          3
+MinSpareThreads      96
+MaxSpareThreads     192
+ThreadLimit          64
+MaxRequestsPerChild   0
+</IfModule>
+<IfModule mpm_event_module>
+# Note that the next three conf values must be changed together.
+# MaxClients = ServerLimit * ThreadsPerChild
+ServerLimit          32
+ThreadsPerChild      32
+MaxClients         1024
+StartServers          3
+MinSpareThreads      96
+MaxSpareThreads     192
+ThreadLimit          64
+MaxRequestsPerChild   0
+</IfModule>
+EOF
+        restart_apache_server
+    fi
+}
+
 # Starts the TLS proxy for the given IP/ports
 # start_tls_proxy front-host front-port back-host back-port
 function start_tls_proxy {
@@ -449,6 +497,8 @@
     local b_host=$4
     local b_port=$5
 
+    tune_apache_connections
+
     local config_file
     config_file=$(apache_site_config_for $b_service)
     local listen_string
@@ -473,6 +523,11 @@
         ProxyPass http://$b_host:$b_port/ retry=5 nocanon
         ProxyPassReverse http://$b_host:$b_port/
     </Location>
+    ErrorLog $APACHE_LOG_DIR/tls-proxy_error.log
+    ErrorLogFormat "[%{u}t] [%-m:%l] [pid %P:tid %T] %7F: %E: [client\ %a] [frontend\ %A] %M% ,\ referer\ %{Referer}i"
+    LogLevel info
+    CustomLog $APACHE_LOG_DIR/tls-proxy_access.log common
+    LogFormat "%v %h %l %u %t \"%r\" %>s %b"
 </VirtualHost>
 EOF
     for mod in ssl proxy proxy_http; do
@@ -487,6 +542,13 @@
     reload_apache_server
 }
 
+# Follow TLS proxy
+function follow_tls_proxy {
+    sudo touch /var/log/$APACHE_NAME/tls-proxy_error.log
+    tail_log tls-error /var/log/$APACHE_NAME/tls-proxy_error.log
+    sudo touch /var/log/$APACHE_NAME/tls-proxy_access.log
+    tail_log tls-proxy /var/log/$APACHE_NAME/tls-proxy_access.log
+}
 
 # Cleanup Functions
 # =================
diff --git a/openrc b/openrc
index 8d8ae8b..d1c6129 100644
--- a/openrc
+++ b/openrc
@@ -81,12 +81,12 @@
 KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST}
 
 # Identity API version
-export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-2.0}
+export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-3}
 
 # Authenticating against an OpenStack cloud using Keystone returns a **Token**
 # and **Service Catalog**.  The catalog contains the endpoints for all services
 # the user/project has access to - including nova, glance, keystone, swift, ...
-# We currently recommend using the 2.0 *identity api*.
+# We currently recommend using the version 3 *identity api*.
 #
 export OS_AUTH_URL=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:5000/v${OS_IDENTITY_API_VERSION}
 
diff --git a/pkg/elasticsearch.sh b/pkg/elasticsearch.sh
index 856eaff..fefd454 100755
--- a/pkg/elasticsearch.sh
+++ b/pkg/elasticsearch.sh
@@ -83,7 +83,7 @@
         return
     fi
     if is_ubuntu; then
-        is_package_installed openjdk-7-jre-headless || install_package openjdk-7-jre-headless
+        is_package_installed default-jre-headless || install_package default-jre-headless
 
         sudo dpkg -i ${FILES}/elasticsearch-${ELASTICSEARCH_VERSION}.deb
         sudo update-rc.d elasticsearch defaults 95 10
diff --git a/samples/local.sh b/samples/local.sh
index 634f6dd..9cd0bdc 100755
--- a/samples/local.sh
+++ b/samples/local.sh
@@ -36,7 +36,7 @@
     # Add first keypair found in localhost:$HOME/.ssh
     for i in $HOME/.ssh/id_rsa.pub $HOME/.ssh/id_dsa.pub; do
         if [[ -r $i ]]; then
-            nova keypair-add --pub_key=$i `hostname`
+            openstack keypair create --public-key $i `hostname`
             break
         fi
     done
@@ -53,8 +53,8 @@
     MI_NAME=m1.micro
 
     # Create micro flavor if not present
-    if [[ -z $(nova flavor-list | grep $MI_NAME) ]]; then
-        nova flavor-create $MI_NAME 6 128 0 1
+    if [[ -z $(openstack flavor list | grep $MI_NAME) ]]; then
+        openstack flavor create $MI_NAME --id 6 --ram 128 --disk 0 --vcpus 1
     fi
 
 
@@ -62,7 +62,7 @@
     # ----------
 
     # Add tcp/22 and icmp to default security group
-    nova secgroup-add-rule default tcp 22 22 0.0.0.0/0
-    nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0
+    openstack security group rule create --project $OS_PROJECT_NAME default --protocol tcp --ingress --dst-port 22
+    openstack security group rule create --project $OS_PROJECT_NAME default --protocol icmp
 
 fi
diff --git a/stack.sh b/stack.sh
index 6a5a2a3..0aaa604 100755
--- a/stack.sh
+++ b/stack.sh
@@ -192,7 +192,7 @@
 
 # Warn users who aren't on an explicitly supported distro, but allow them to
 # override check and attempt installation with ``FORCE=yes ./stack``
-if [[ ! ${DISTRO} =~ (trusty|wily|xenial|7.0|wheezy|sid|testing|jessie|f23|f24|rhel7|kvmibm1) ]]; then
+if [[ ! ${DISTRO} =~ (trusty|xenial|yakkety|7.0|wheezy|sid|testing|jessie|f23|f24|f25|rhel7|kvmibm1) ]]; then
     echo "WARNING: this script has not been tested on $DISTRO"
     if [[ "$FORCE" != "yes" ]]; then
         die $LINENO "If you wish to run this script anyway run with FORCE=yes"
@@ -572,9 +572,7 @@
 source $TOP_DIR/lib/placement
 source $TOP_DIR/lib/cinder
 source $TOP_DIR/lib/swift
-source $TOP_DIR/lib/heat
 source $TOP_DIR/lib/neutron
-source $TOP_DIR/lib/neutron-legacy
 source $TOP_DIR/lib/ldap
 source $TOP_DIR/lib/dstat
 source $TOP_DIR/lib/dlm
@@ -800,15 +798,19 @@
 if is_service_enabled neutron nova horizon; then
     install_neutronclient
 fi
-if is_service_enabled heat horizon; then
-    install_heatclient
-fi
 
 # Install shared libraries
 if is_service_enabled cinder nova; then
     install_os_brick
 fi
 
+# Setup TLS certs
+if is_service_enabled tls-proxy || [ "$USE_SSL" == "True" ]; then
+    configure_CA
+    init_CA
+    init_cert
+fi
+
 # Install middleware
 install_keystonemiddleware
 
@@ -866,6 +868,16 @@
     configure_placement
 fi
 
+# create a placement-client fake service to know we need to configure
+# placement connectivity. We configure the placement service for nova
+# if placement-api or placement-client is active, and n-cpu on the
+# same box.
+if is_service_enabled placement placement-client; then
+    if is_service_enabled n-cpu; then
+        configure_placement_nova_compute
+    fi
+fi
+
 if is_service_enabled horizon; then
     # django openstack_auth
     install_django_openstack_auth
@@ -873,22 +885,10 @@
     stack_install_service horizon
 fi
 
-if is_service_enabled heat; then
-    stack_install_service heat
-    install_heat_other
-    cleanup_heat
-    configure_heat
-fi
-
 if is_service_enabled tls-proxy || [ "$USE_SSL" == "True" ]; then
-    configure_CA
-    init_CA
-    init_cert
-    # Add name to ``/etc/hosts``.
-    # Don't be naive and add to existing line!
+    fix_system_ca_bundle_path
 fi
 
-
 # Extras Install
 # --------------
 
@@ -993,6 +993,10 @@
     fi
     screen -r $SCREEN_NAME -X hardstatus alwayslastline "$SCREEN_HARDSTATUS"
     screen -r $SCREEN_NAME -X setenv PROMPT_COMMAND /bin/true
+
+    if is_service_enabled tls-proxy; then
+        follow_tls_proxy
+    fi
 fi
 
 # Clear ``screenrc`` file
@@ -1021,21 +1025,12 @@
 # Keystone
 # --------
 
-if is_service_enabled keystone; then
-    echo_summary "Starting Keystone"
-
-    if [ "$KEYSTONE_AUTH_HOST" == "$SERVICE_HOST" ]; then
-        init_keystone
-        start_keystone
-        bootstrap_keystone
-    fi
-
-    # Rather than just export these, we write them out to a
-    # intermediate userrc file that can also be used to debug if
-    # something goes wrong between here and running
-    # tools/create_userrc.sh (this script relies on services other
-    # than keystone being available, so we can't call it right now)
-    cat > $TOP_DIR/userrc_early <<EOF
+# Rather than just export these, we write them out to a
+# intermediate userrc file that can also be used to debug if
+# something goes wrong between here and running
+# tools/create_userrc.sh (this script relies on services other
+# than keystone being available, so we can't call it right now)
+cat > $TOP_DIR/userrc_early <<EOF
 # Use this for debugging issues before files in accrc are created
 
 # Set up password auth credentials now that Keystone is bootstrapped
@@ -1050,12 +1045,21 @@
 
 EOF
 
-    if is_service_enabled tls-proxy; then
-        echo "export OS_CACERT=$INT_CA_DIR/ca-chain.pem" >> $TOP_DIR/userrc_early
-        start_tls_proxy http-services '*' 443 $SERVICE_HOST 80
-    fi
+if is_service_enabled tls-proxy; then
+    echo "export OS_CACERT=$INT_CA_DIR/ca-chain.pem" >> $TOP_DIR/userrc_early
+    start_tls_proxy http-services '*' 443 $SERVICE_HOST 80
+fi
 
-    source $TOP_DIR/userrc_early
+source $TOP_DIR/userrc_early
+
+if is_service_enabled keystone; then
+    echo_summary "Starting Keystone"
+
+    if [ "$KEYSTONE_AUTH_HOST" == "$SERVICE_HOST" ]; then
+        init_keystone
+        start_keystone
+        bootstrap_keystone
+    fi
 
     create_keystone_accounts
     create_nova_accounts
@@ -1067,10 +1071,6 @@
         create_swift_accounts
     fi
 
-    if is_service_enabled heat; then
-        create_heat_accounts
-    fi
-
 fi
 
 # Write a clouds.yaml file
@@ -1221,11 +1221,6 @@
 
     echo_summary "Uploading images"
 
-    # Option to upload legacy ami-tty, which works with xenserver
-    if [[ -n "$UPLOAD_LEGACY_TTY" ]]; then
-        IMAGE_URLS="${IMAGE_URLS:+${IMAGE_URLS},}https://github.com/downloads/citrix-openstack/warehouse/tty.tgz"
-    fi
-
     for image_url in ${IMAGE_URLS//,/ }; do
         upload_image $image_url
     done
@@ -1268,7 +1263,10 @@
     start_neutron
 fi
 # Once neutron agents are started setup initial network elements
-create_neutron_initial_network
+if is_service_enabled q-svc && [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]]; then
+    echo_summary "Creating initial neutron network elements"
+    create_neutron_initial_network
+fi
 
 if is_service_enabled nova; then
     echo_summary "Starting Nova"
@@ -1285,18 +1283,6 @@
     create_volume_types
 fi
 
-# Configure and launch Heat engine, api and metadata
-if is_service_enabled heat; then
-    # Initialize heat
-    echo_summary "Configuring Heat"
-    init_heat
-    echo_summary "Starting Heat"
-    start_heat
-    if [ "$HEAT_BUILD_PIP_MIRROR" = "True" ]; then
-        echo_summary "Building Heat pip mirror"
-        build_heat_pip_mirror
-    fi
-fi
 
 if is_service_enabled horizon; then
     echo_summary "Starting Horizon"
@@ -1381,20 +1367,23 @@
 # ----------------------
 
 # Do this late because it requires compute hosts to have started
-if is_service_enabled n-api && [ "$NOVA_CONFIGURE_CELLSV2" == "True" ]; then
-    create_cell
+if is_service_enabled n-api; then
+    if is_service_enabled n-cpu; then
+        create_cell
+    else
+        # Some CI systems like Hyper-V build the control plane on
+        # Linux, and join in non Linux Computes after setup. This
+        # allows them to delay the processing until after their whole
+        # environment is up.
+        echo_summary "SKIPPING Cell setup because n-cpu is not enabled. You will have to do this manually before you have a working environment."
+    fi
 fi
 
 # Bash completion
 # ===============
 
 # Prepare bash completion for OSC
-#
-# BUG: https://bugs.launchpad.net/python-openstackclient/+bug/1619274
-# the os-cloud param should not be required but if we don't provide it
-# then this command hangs indefinitely if something is wrong with
-# default environment credentials.
-openstack --os-cloud=devstack complete | sudo tee /etc/bash_completion.d/osc.bash_completion > /dev/null
+openstack complete | sudo tee /etc/bash_completion.d/osc.bash_completion > /dev/null
 
 # If cinder is configured, set global_filter for PV devices
 if is_service_enabled cinder; then
@@ -1412,6 +1401,9 @@
 # Phase: test-config
 run_phase stack test-config
 
+# Apply late configuration from ``local.conf`` if it exists for layer 2 services
+# Phase: test-config
+merge_config_group $TOP_DIR/local.conf test-config
 
 # Fin
 # ===
diff --git a/stackrc b/stackrc
index c419ef4..7ce6c51 100644
--- a/stackrc
+++ b/stackrc
@@ -44,26 +44,18 @@
 # Specify which services to launch.  These generally correspond to
 # screen tabs. To change the default list, use the ``enable_service`` and
 # ``disable_service`` functions in ``local.conf``.
-# For example, to enable Swift add this to ``local.conf``:
-#  enable_service s-proxy s-object s-container s-account
-# In order to enable Neutron (a single node setup) add the following
+# For example, to enable Swift as part of DevStack add the following
 # settings in ``local.conf``:
 #  [[local|localrc]]
-#  disable_service n-net
-#  enable_service q-svc
-#  enable_service q-agt
-#  enable_service q-dhcp
-#  enable_service q-l3
-#  enable_service q-meta
-#  # Optional, to enable tempest configuration as part of DevStack
-#  enable_service tempest
-
+#  enable_service s-proxy s-object s-container s-account
 # This allows us to pass ``ENABLED_SERVICES``
 if ! isset ENABLED_SERVICES ; then
     # Keystone - nothing works without keystone
     ENABLED_SERVICES=key
     # Nova - services to support libvirt based openstack clouds
     ENABLED_SERVICES+=,n-api,n-cpu,n-cond,n-sch,n-novnc,n-cauth
+    # Placement service needed for Nova
+    ENABLED_SERVICES+=,placement-api,placement-client
     # Glance services needed for Nova
     ENABLED_SERVICES+=,g-api,g-reg
     # Cinder
@@ -111,12 +103,12 @@
 fi
 
 # Control whether Python 3 should be used.
-export USE_PYTHON3=${USE_PYTHON3:-False}
+export USE_PYTHON3=$(trueorfalse False USE_PYTHON3)
 
 # When Python 3 is supported by an application, adding the specific
 # version of Python 3 to this variable will install the app using that
 # version of the interpreter instead of 2.7.
-export PYTHON3_VERSION=${PYTHON3_VERSION:-3.4}
+export PYTHON3_VERSION=${PYTHON3_VERSION:-3.5}
 
 # Just to be more explicit on the Python 2 version to use.
 export PYTHON2_VERSION=${PYTHON2_VERSION:-2.7}
@@ -158,7 +150,7 @@
 fi
 
 # Configure Identity API version: 2.0, 3
-IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-2.0}
+IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-3}
 
 # Set the option ENABLE_IDENTITY_V2 to True. It defines whether the DevStack
 # deployment will be deploying the Identity v2 pipelines. If this option is set
@@ -239,10 +231,6 @@
 GLANCE_REPO=${GLANCE_REPO:-${GIT_BASE}/openstack/glance.git}
 GLANCE_BRANCH=${GLANCE_BRANCH:-master}
 
-# heat service
-HEAT_REPO=${HEAT_REPO:-${GIT_BASE}/openstack/heat.git}
-HEAT_BRANCH=${HEAT_BRANCH:-master}
-
 # django powered web control panel for openstack
 HORIZON_REPO=${HORIZON_REPO:-${GIT_BASE}/openstack/horizon.git}
 HORIZON_BRANCH=${HORIZON_BRANCH:-master}
@@ -301,10 +289,6 @@
 GITREPO["python-glanceclient"]=${GLANCECLIENT_REPO:-${GIT_BASE}/openstack/python-glanceclient.git}
 GITBRANCH["python-glanceclient"]=${GLANCECLIENT_BRANCH:-master}
 
-# python heat client library
-GITREPO["python-heatclient"]=${HEATCLIENT_REPO:-${GIT_BASE}/openstack/python-heatclient.git}
-GITBRANCH["python-heatclient"]=${HEATCLIENT_BRANCH:-master}
-
 # ironic client
 GITREPO["python-ironicclient"]=${IRONICCLIENT_REPO:-${GIT_BASE}/openstack/python-ironicclient.git}
 GITBRANCH["python-ironicclient"]=${IRONICCLIENT_BRANCH:-master}
@@ -589,6 +573,9 @@
     lxd)
         LXD_GROUP=${LXD_GROUP:-"lxd"}
         ;;
+    docker)
+        DOCKER_GROUP=${DOCKER_GROUP:-"docker"}
+        ;;
     fake)
         NUMBER_FAKE_NOVA_COMPUTE=${NUMBER_FAKE_NOVA_COMPUTE:-1}
         ;;
@@ -765,7 +752,8 @@
 # Note that setting ``FIXED_RANGE`` may be necessary when running DevStack
 # in an OpenStack cloud that uses either of these address ranges internally.
 FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24}
-FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24}
+IPV4_ADDRS_SAFE_TO_USE=${IPV4_ADDRS_SAFE_TO_USE:-10.0.0.0/22}
+FIXED_RANGE=${FIXED_RANGE:-$IPV4_ADDRS_SAFE_TO_USE}
 FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256}
 HOST_IP_IFACE=${HOST_IP_IFACE:-}
 HOST_IP=${HOST_IP:-}
@@ -778,6 +766,9 @@
 
 HOST_IPV6=$(get_default_host_ip "" "" "$HOST_IP_IFACE" "$HOST_IPV6" "inet6")
 
+# Whether or not the port_security extension should be enabled for Neutron.
+NEUTRON_PORT_SECURITY=$(trueorfalse True NEUTRON_PORT_SECURITY)
+
 # SERVICE IP version
 # This is the IP version that services should be listening on, as well
 # as using to register their endpoints with keystone.
@@ -830,6 +821,10 @@
 # Use native SSL for servers in ``SSL_ENABLED_SERVICES``
 USE_SSL=$(trueorfalse False USE_SSL)
 
+# We may not need to recreate database in case 2 Keystone services
+# sharing the same database. It would be useful for multinode Grenade tests.
+RECREATE_KEYSTONE_DB=$(trueorfalse True RECREATE_KEYSTONE_DB)
+
 # ebtables is inherently racey. If you run it by two or more processes
 # simultaneously it will collide, badly, in the kernel and produce
 # failures or corruption of ebtables. The only way around it is for
diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh
index fb55023..415fec5 100755
--- a/tests/test_libs_from_pypi.sh
+++ b/tests/test_libs_from_pypi.sh
@@ -32,7 +32,7 @@
 ALL_LIBS="python-novaclient oslo.config pbr oslo.context"
 ALL_LIBS+=" python-keystoneclient taskflow oslo.middleware pycadf"
 ALL_LIBS+=" python-glanceclient python-ironicclient"
-ALL_LIBS+=" oslo.messaging oslo.log cliff python-heatclient stevedore"
+ALL_LIBS+=" oslo.messaging oslo.log cliff stevedore"
 ALL_LIBS+=" python-cinderclient glance_store oslo.concurrency oslo.db"
 ALL_LIBS+=" oslo.versionedobjects oslo.vmware keystonemiddleware"
 ALL_LIBS+=" oslo.serialization django_openstack_auth"
diff --git a/tests/test_meta_config.sh b/tests/test_meta_config.sh
index 327fb56..92f9c01 100755
--- a/tests/test_meta_config.sh
+++ b/tests/test_meta_config.sh
@@ -125,6 +125,14 @@
 [[test10|does-not-exist-dir/test.conf]]
 foo=bar
 
+[[test11|test-same.conf]]
+[DEFAULT]
+foo=bar
+
+[[test11|test-same.conf]]
+[some]
+random=config
+
 [[test-multi-sections|test-multi-sections.conf]]
 [sec-1]
 cfg_item1 = abcd
@@ -147,6 +155,9 @@
 cfg_item2 = efgh
 cfg_item2 = \${FOO_BAR_BAZ}
 
+[[test11|test-same.conf]]
+[another]
+non = sense
 EOF
 
 echo -n "get_meta_section_files: test0 doesn't exist: "
@@ -385,8 +396,24 @@
 check_result "$VAL" "$EXPECT_VAL"
 set -e
 
+echo -n "merge_config_file test11 same section: "
+rm -f test-same.conf
+merge_config_group test.conf test11
+VAL=$(cat test-same.conf)
+EXPECT_VAL='
+[DEFAULT]
+foo = bar
+
+[some]
+random = config
+
+[another]
+non = sense'
+check_result "$VAL" "$EXPECT_VAL"
+
+
 rm -f test.conf test1c.conf test2a.conf \
     test-space.conf test-equals.conf test-strip.conf \
     test-colon.conf test-env.conf test-multiline.conf \
-    test-multi-sections.conf
+    test-multi-sections.conf test-same.conf
 rm -rf test-etc
diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh
index b6db5d1..f4a4edc 100755
--- a/tools/create_userrc.sh
+++ b/tools/create_userrc.sh
@@ -152,7 +152,7 @@
 fi
 
 if [ -z "$OS_AUTH_URL" ]; then
-    export OS_AUTH_URL=http://localhost:5000/v2.0/
+    export OS_AUTH_URL=http://localhost:5000/v3/
 fi
 
 if [ -z "$OS_USER_DOMAIN_ID" -a -z "$OS_USER_DOMAIN_NAME" ]; then
@@ -193,7 +193,6 @@
 export OS_AUTH_URL="$OS_AUTH_URL"
 export OS_CACERT="$OS_CACERT"
 export NOVA_CERT="$ACCOUNT_DIR/cacert.pem"
-export OS_AUTH_TYPE=v2password
 EOF
     if [ -n "$ADDPASS" ]; then
         echo "export OS_PASSWORD=\"$user_passwd\"" >>"$rcfile"
diff --git a/tools/make_cert.sh b/tools/make_cert.sh
index 2628b40..e91464f 100755
--- a/tools/make_cert.sh
+++ b/tools/make_cert.sh
@@ -45,6 +45,7 @@
 
 # Make sure the CA is set up
 configure_CA
+fix_system_ca_bundle_path
 init_CA
 
 # Create the server cert
diff --git a/tools/ping_neutron.sh b/tools/ping_neutron.sh
index dba7502..73fe3f3 100755
--- a/tools/ping_neutron.sh
+++ b/tools/ping_neutron.sh
@@ -54,7 +54,7 @@
 REMAINING_ARGS="${@:2}"
 
 # BUG: with duplicate network names, this fails pretty hard.
-NET_ID=$(neutron net-list | grep "$NET_NAME" | awk '{print $2}')
+NET_ID=$(openstack network show -f value -c id "$NET_NAME")
 PROBE_ID=$(neutron-debug probe-list -c id -c network_id | grep "$NET_ID" | awk '{print $2}' | head -n 1)
 
 # This runs a command inside the specific netns
diff --git a/tools/worlddump.py b/tools/worlddump.py
index e1ef544..1ce931e 100755
--- a/tools/worlddump.py
+++ b/tools/worlddump.py
@@ -34,6 +34,7 @@
     'neutron-linuxbridge-agent',
     'neutron-metadata-agent',
     'neutron-openvswitch-agent',
+    'cinder-volume',
 )
 
 
diff --git a/unstack.sh b/unstack.sh
index d93b835..b0ebaf7 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -66,9 +66,7 @@
 source $TOP_DIR/lib/placement
 source $TOP_DIR/lib/cinder
 source $TOP_DIR/lib/swift
-source $TOP_DIR/lib/heat
 source $TOP_DIR/lib/neutron
-source $TOP_DIR/lib/neutron-legacy
 source $TOP_DIR/lib/ldap
 source $TOP_DIR/lib/dstat
 source $TOP_DIR/lib/dlm
@@ -97,17 +95,8 @@
 # Phase: unstack
 run_phase unstack
 
-if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then
-    source $TOP_DIR/openrc
-    teardown_neutron_debug
-fi
-
 # Call service stop
 
-if is_service_enabled heat; then
-    stop_heat
-fi
-
 if is_service_enabled nova; then
     stop_nova
 fi
@@ -189,11 +178,13 @@
     fi
 fi
 
-# BUG: maybe it doesn't exist? We should isolate this further down.
 # NOTE: Cinder automatically installs the lvm2 package, independently of the
-# enabled backends. So if Cinder is enabled, we are sure lvm (lvremove,
-# /etc/lvm/lvm.conf, etc.) is here.
-if is_service_enabled cinder; then
+# enabled backends. So if Cinder is enabled, and installed successfully we are
+# sure lvm2 (lvremove, /etc/lvm/lvm.conf, etc.) is here.
+if is_service_enabled cinder && is_package_installed lvm2; then
+    # Using /bin/true here indicates a BUG - maybe the
+    # DEFAULT_VOLUME_GROUP_NAME doesn't exist?  We should
+    # isolate this further down in lib/cinder cleanup.
     clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME || /bin/true
     clean_lvm_filter
 fi