Merge "Fix DevStack to configure tempest's service_availability"
diff --git a/.gitignore b/.gitignore
index e5e1f6a..8fe56ad 100644
--- a/.gitignore
+++ b/.gitignore
@@ -28,7 +28,7 @@
 files/ir-deploy*
 files/ironic-inspector*
 files/etcd*
-local.conf
+/local.conf
 local.sh
 localrc
 proto
diff --git a/.gitreview b/.gitreview
index 570d31a..e1bf63b 100644
--- a/.gitreview
+++ b/.gitreview
@@ -1,4 +1,4 @@
 [gerrit]
-host=review.openstack.org
+host=review.opendev.org
 port=29418
-project=openstack-dev/devstack.git
+project=openstack/devstack.git
diff --git a/.zuul.yaml b/.zuul.yaml
index 7ac30d8..cbb9d99 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -19,6 +19,16 @@
           - controller
 
 - nodeset:
+    name: openstack-single-node-xenial
+    nodes:
+      - name: controller
+        label: ubuntu-xenial
+    groups:
+      - name: tempest
+        nodes:
+          - controller
+
+- nodeset:
     name: devstack-single-node-centos-7
     nodes:
       - name: controller
@@ -29,10 +39,10 @@
           - controller
 
 - nodeset:
-    name: devstack-single-node-opensuse-150
+    name: devstack-single-node-opensuse-15
     nodes:
       - name: controller
-        label: opensuse-150
+        label: opensuse-15
     groups:
       - name: tempest
         nodes:
@@ -42,7 +52,7 @@
     name: devstack-single-node-fedora-latest
     nodes:
       - name: controller
-        label: fedora-28
+        label: fedora-29
     groups:
       - name: tempest
         nodes:
@@ -78,6 +88,101 @@
         nodes:
           - compute1
 
+- nodeset:
+    name: openstack-two-node-bionic
+    nodes:
+      - name: controller
+        label: ubuntu-bionic
+      - name: compute1
+        label: ubuntu-bionic
+    groups:
+      # Node where tests are executed and test results collected
+      - name: tempest
+        nodes:
+          - controller
+      # Nodes running the compute service
+      - name: compute
+        nodes:
+          - controller
+          - compute1
+      # Nodes that are not the controller
+      - name: subnode
+        nodes:
+          - compute1
+      # Switch node for multinode networking setup
+      - name: switch
+        nodes:
+          - controller
+      # Peer nodes for multinode networking setup
+      - name: peers
+        nodes:
+          - compute1
+
+- nodeset:
+    name: openstack-two-node-xenial
+    nodes:
+      - name: controller
+        label: ubuntu-xenial
+      - name: compute1
+        label: ubuntu-xenial
+    groups:
+      # Node where tests are executed and test results collected
+      - name: tempest
+        nodes:
+          - controller
+      # Nodes running the compute service
+      - name: compute
+        nodes:
+          - controller
+          - compute1
+      # Nodes that are not the controller
+      - name: subnode
+        nodes:
+          - compute1
+      # Switch node for multinode networking setup
+      - name: switch
+        nodes:
+          - controller
+      # Peer nodes for multinode networking setup
+      - name: peers
+        nodes:
+          - compute1
+
+- nodeset:
+    name: openstack-three-node-bionic
+    nodes:
+      - name: controller
+        label: ubuntu-bionic
+      - name: compute1
+        label: ubuntu-bionic
+      - name: compute2
+        label: ubuntu-bionic
+    groups:
+      # Node where tests are executed and test results collected
+      - name: tempest
+        nodes:
+          - controller
+      # Nodes running the compute service
+      - name: compute
+        nodes:
+          - controller
+          - compute1
+          - compute2
+      # Nodes that are not the controller
+      - name: subnode
+        nodes:
+          - compute1
+          - compute2
+      # Switch node for multinode networking setup
+      - name: switch
+        nodes:
+          - controller
+      # Peer nodes for multinode networking setup
+      - name: peers
+        nodes:
+          - compute1
+          - compute2
+
 - job:
     name: devstack-base
     parent: multinode
@@ -93,10 +198,10 @@
       job.group-vars.peers, which is what is used by multi node jobs for subnode
       nodes (everything but the controller).
     required-projects:
-      - git.openstack.org/openstack-dev/devstack
+      - opendev.org/openstack/devstack
     roles:
-      - zuul: git.openstack.org/openstack-infra/devstack-gate
-      - zuul: git.openstack.org/openstack-infra/openstack-zuul-jobs
+      - zuul: opendev.org/openstack/devstack-gate
+      - zuul: opendev.org/openstack/openstack-zuul-jobs
     vars:
       devstack_localrc:
         DATABASE_PASSWORD: secretdatabase
@@ -114,8 +219,8 @@
         VERBOSE_NO_TIMESTAMP: true
         NOVNC_FROM_PACKAGE: true
         ERROR_ON_CLONE: true
-        # Gate jobs can't deal with nested virt. Disable it.
-        LIBVIRT_TYPE: qemu
+        # Gate jobs can't deal with nested virt. Disable it by default.
+        LIBVIRT_TYPE: '{{ devstack_libvirt_type | default("qemu") }}'
       devstack_services:
         # Ignore any default set by devstack. Emit a "disable_all_services".
         base: false
@@ -127,6 +232,8 @@
         '{{ devstack_log_dir }}/dstat-csv.log': logs
         '{{ devstack_log_dir }}/devstacklog.txt': logs
         '{{ devstack_log_dir }}/devstacklog.txt.summary': logs
+        '{{ devstack_log_dir }}/tcpdump.pcap': logs
+        '{{ devstack_log_dir }}/worlddump-latest.txt': logs
         '{{ devstack_full_log}}': logs
         '{{ stage_dir }}/verify_tempest_conf.log': logs
         '{{ stage_dir }}/apache': logs
@@ -134,8 +241,7 @@
         '{{ stage_dir }}/etc': logs
         /var/log/rabbitmq: logs
         /var/log/postgresql: logs
-        /var/log/mysql.err: logs
-        /var/log/mysql.log: logs
+        /var/log/mysql: logs
         /var/log/libvirt: logs
         /etc/sudoers: logs
         /etc/sudoers.d: logs
@@ -148,6 +254,7 @@
         '{{ stage_dir }}/core': logs
         '{{ stage_dir }}/listen53.txt': logs
         '{{ stage_dir }}/deprecations.log': logs
+        '{{ stage_dir }}/audit.log': logs
         /var/log/ceph: logs
         /var/log/openvswitch: logs
         /var/log/glusterfs: logs
@@ -199,9 +306,9 @@
     description: |
       Minimal devstack base job, intended for use by jobs that need
       less than the normal minimum set of required-projects.
-    nodeset: openstack-single-node
+    nodeset: openstack-single-node-bionic
     required-projects:
-      - git.openstack.org/openstack/requirements
+      - opendev.org/openstack/requirements
     vars:
       devstack_localrc:
         # Multinode specific settings
@@ -241,8 +348,8 @@
       This base job can be used for single node and multinode devstack jobs.
 
       With a single node nodeset, this job sets up an "all-in-one" (aio)
-      devstack with the six OpenStack services included in the devstack tree:
-      keystone, glance, cinder, neutron, nova and swift.
+      devstack with the seven OpenStack services included in the devstack tree:
+      keystone, glance, cinder, neutron, nova, placement, and swift.
 
       With a two node nodeset, this job sets up an aio + compute node.
       The controller can be customised using host-vars.controller, the
@@ -258,14 +365,14 @@
 
       The run playbook consists of a single role, so it can be easily rewritten
       and extended.
-    nodeset: openstack-single-node
     required-projects:
-      - git.openstack.org/openstack/cinder
-      - git.openstack.org/openstack/glance
-      - git.openstack.org/openstack/keystone
-      - git.openstack.org/openstack/neutron
-      - git.openstack.org/openstack/nova
-      - git.openstack.org/openstack/swift
+      - opendev.org/openstack/cinder
+      - opendev.org/openstack/glance
+      - opendev.org/openstack/keystone
+      - opendev.org/openstack/neutron
+      - opendev.org/openstack/nova
+      - opendev.org/openstack/placement
+      - opendev.org/openstack/swift
     timeout: 7200
     vars:
       devstack_localrc:
@@ -273,11 +380,8 @@
         SWIFT_REPLICAS: 1
         SWIFT_START_ALL_SERVICES: false
         SWIFT_HASH: 1234123412341234
-        CINDER_PERIODIC_INTERVAL: 10
         DEBUG_LIBVIRT_COREDUMPS: true
         NOVA_VNC_ENABLED: true
-        VNCSERVER_LISTEN: 0.0.0.0
-        VNCSERVER_PROXYCLIENT_ADDRESS: $HOST_IP
       devstack_local_conf:
         post-config:
           $NEUTRON_CONF:
@@ -301,11 +405,9 @@
         # Nova services
         n-api: true
         n-api-meta: true
-        n-cauth: true
         n-cond: true
         n-cpu: true
         n-novnc: true
-        n-obj: true
         n-sch: true
         placement-api: true
         # Neutron services
@@ -354,7 +456,10 @@
           n-cpu: true
           placement-client: true
           # Neutron services
-          neutron-agent: true
+          # We need to keep using the neutron-legacy based services for
+          # now until all issues with the new lib/neutron code are solved
+          q-agt: true
+          # neutron-agent: true
           # Cinder services
           c-bak: true
           c-vol: true
@@ -372,8 +477,6 @@
           GLANCE_HOSTPORT: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}:9292"
           Q_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}"
           NOVA_VNC_ENABLED: true
-          VNCSERVER_LISTEN: 0.0.0.0
-          VNCSERVER_PROXYCLIENT_ADDRESS: $HOST_IP
 
 - job:
     name: devstack-ipv6
@@ -391,7 +494,7 @@
 - job:
     name: devstack-multinode
     parent: devstack
-    nodeset: openstack-two-node
+    nodeset: openstack-two-node-bionic
     description: |
       Simple multinode test to verify multinode functionality on devstack side.
       This is not meant to be used as a parent job.
@@ -401,22 +504,15 @@
 # and these platforms don't have the round-the-clock support to avoid
 # becoming blockers in that situation.
 - job:
-    name: devstack-platform-centos-7
-    parent: tempest-full
-    description: Centos 7 platform test
-    nodeset: devstack-single-node-centos-7
-    voting: false
-
-- job:
-    name: devstack-platform-opensuse-150
-    parent: tempest-full
-    description: openSUSE 15.0 platform test
-    nodeset: devstack-single-node-opensuse-150
+    name: devstack-platform-opensuse-15
+    parent: tempest-full-py3
+    description: openSUSE 15.x platform test
+    nodeset: devstack-single-node-opensuse-15
     voting: false
 
 - job:
     name: devstack-platform-fedora-latest
-    parent: tempest-full
+    parent: tempest-full-py3
     description: Fedora latest platform test
     nodeset: devstack-single-node-fedora-latest
     voting: false
@@ -487,27 +583,25 @@
 - project:
     templates:
       - integrated-gate
-      - integrated-gate-py35
+      - integrated-gate-py3
       - publish-openstack-docs-pti
     check:
       jobs:
         - devstack
-        - devstack-ipv6:
-            voting: false
-        - devstack-platform-centos-7
-        - devstack-platform-opensuse-150
+        - devstack-ipv6
+        - devstack-platform-opensuse-15
         - devstack-platform-fedora-latest
         - devstack-multinode
         - devstack-unit-tests
         - openstack-tox-bashate
-        - ironic-tempest-dsvm-ipa-wholedisk-bios-agent_ipmitool-tinyipa:
+        - ironic-tempest-ipa-wholedisk-bios-agent_ipmitool-tinyipa:
             voting: false
         - swift-dsvm-functional:
             voting: false
             irrelevant-files:
               - ^.*\.rst$
               - ^doc/.*$
-        - neutron-grenade:
+        - grenade-py3:
             irrelevant-files:
               - ^.*\.rst$
               - ^doc/.*$
@@ -524,9 +618,19 @@
             irrelevant-files:
               - ^.*\.rst$
               - ^doc/.*$
+        - openstacksdk-functional-devstack:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+        - tempest-ipv6-only:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
     gate:
       jobs:
         - devstack
+        - devstack-ipv6
+        - devstack-multinode
         - devstack-unit-tests
         - openstack-tox-bashate
         - neutron-grenade-multinode:
@@ -537,7 +641,15 @@
             irrelevant-files:
               - ^.*\.rst$
               - ^doc/.*$
-        - neutron-grenade:
+        - grenade-py3:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+        - openstacksdk-functional-devstack:
+            irrelevant-files:
+              - ^.*\.rst$
+              - ^doc/.*$
+        - tempest-ipv6-only:
             irrelevant-files:
               - ^.*\.rst$
               - ^doc/.*$
@@ -545,11 +657,8 @@
     # being experimental any more, so we can keep this list somewhat
     # pruned.
     #
-    # * nova-cells-v1: maintained by nova for cells v1 (nova-cells service);
-    #    nova gates on this job, it's in experimental for testing cells v1
-    #    changes to devstack w/o gating on it for all devstack changes.
     # * nova-next: maintained by nova for unreleased/undefaulted
-    #    things like cellsv2 and placement-api
+    #    things
     # * neutron-fullstack-with-uwsgi: maintained by neutron for fullstack test
     #    when neutron-api is served by uwsgi, it's in exprimental for testing.
     #    the next cycle we can remove this  job if things turn out to be
@@ -559,13 +668,12 @@
     #    stable engouh with uwsgi.
     # * neutron-tempest-with-uwsgi: maintained by neutron for tempest test.
     #    Next cycle we can remove this if everything run out stable enough.
+    # * nova-multi-cell: maintained by nova and currently non-voting in the
+    #    check queue for nova changes but relies on devstack configuration
 
     experimental:
       jobs:
-        - nova-cells-v1:
-            irrelevant-files:
-              - ^.*\.rst$
-              - ^doc/.*$
+        - nova-multi-cell
         - nova-next
         - neutron-fullstack-with-uwsgi
         - neutron-functional-with-uwsgi
diff --git a/HACKING.rst b/HACKING.rst
index 3853eed..f0bb269 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -11,7 +11,7 @@
 set up and interact with OpenStack components.
 
 DevStack's official repository is located on git.openstack.org at
-https://git.openstack.org/openstack-dev/devstack.  Besides the master branch that
+https://opendev.org/openstack/devstack.  Besides the master branch that
 tracks the OpenStack trunk branches a separate branch is maintained for all
 OpenStack releases starting with Diablo (stable/diablo).
 
@@ -26,7 +26,7 @@
 .. _lp: https://launchpad.net/~devstack
 
 The `Gerrit review
-queue <https://review.openstack.org/#/q/project:openstack-dev/devstack,n,z>`__
+queue <https://review.opendev.org/#/q/project:openstack/devstack>`__
 is used for all commits.
 
 The primary script in DevStack is ``stack.sh``, which performs the bulk of the
@@ -145,8 +145,8 @@
 * Global configuration that may be referenced in ``local.conf``, i.e. ``DEST``, ``DATA_DIR``
 * Global service configuration like ``ENABLED_SERVICES``
 * Variables used by multiple services that do not have a clear owner, i.e.
-  ``VOLUME_BACKING_FILE_SIZE`` (nova-compute, nova-volumes and cinder) or
-  ``PUBLIC_NETWORK_NAME`` (nova-network and neutron)
+  ``VOLUME_BACKING_FILE_SIZE`` (nova-compute and cinder) or
+  ``PUBLIC_NETWORK_NAME`` (only neutron but formerly nova-network too)
 * Variables that can not be cleanly declared in a project file due to
   dependency ordering, i.e. the order of sourcing the project files can
   not be changed for other reasons but the earlier file needs to dereference a
@@ -189,7 +189,7 @@
 list below is not complete for what bashate checks, nor is it all checked
 by bashate.  So many lines of code, so little time.
 
-.. _bashate: https://pypi.python.org/pypi/bashate
+.. _bashate: https://pypi.org/project/bashate/
 
 Whitespace Rules
 ----------------
diff --git a/README.rst b/README.rst
index 6885546..f3a585a 100644
--- a/README.rst
+++ b/README.rst
@@ -38,7 +38,7 @@
 `stackrc` for the default set).  Usually just before a release there will be
 milestone-proposed branches that need to be tested::
 
-    GLANCE_REPO=git://git.openstack.org/openstack/glance.git
+    GLANCE_REPO=https://opendev.org/openstack/glance.git
     GLANCE_BRANCH=milestone-proposed
 
 Start A Dev Cloud
diff --git a/clean.sh b/clean.sh
index a29ebd9..d6c6b40 100755
--- a/clean.sh
+++ b/clean.sh
@@ -123,7 +123,7 @@
     sudo rm -rf $LOGDIR
 fi
 
-# Clean out the sytemd user unit files if systemd was used.
+# Clean out the systemd user unit files if systemd was used.
 if [[ "$USE_SYSTEMD" = "True" ]]; then
     sudo find $SYSTEMD_DIR -type f -name '*devstack@*service' -delete
     # Make systemd aware of the deletion.
diff --git a/doc/requirements.txt b/doc/requirements.txt
index f65e9df..fffb83d 100644
--- a/doc/requirements.txt
+++ b/doc/requirements.txt
@@ -3,7 +3,7 @@
 Pygments
 docutils
 sphinx>=1.6.2
-openstackdocstheme>=1.11.0
+openstackdocstheme>=1.20.0
 nwdiag
 blockdiag
 sphinxcontrib-blockdiag
diff --git a/doc/source/assets/local.conf b/doc/source/assets/local.conf
new file mode 120000
index 0000000..cfc2a4e
--- /dev/null
+++ b/doc/source/assets/local.conf
@@ -0,0 +1 @@
+../../../samples/local.conf
\ No newline at end of file
diff --git a/doc/source/conf.py b/doc/source/conf.py
index e9708fa..56043ba 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -11,9 +11,6 @@
 # All configuration values have a default; values that are commented out
 # serve to show the default.
 
-import sys
-import os
-
 # If extensions (or modules to document with autodoc) are in another directory,
 # add these directories to sys.path here. If the directory is relative to the
 # documentation root, use os.path.abspath to make it absolute, like shown here.
@@ -26,13 +23,16 @@
 
 # Add any Sphinx extension module names here, as strings. They can be extensions
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = [ 'sphinx.ext.autodoc', 'zuul_sphinx', 'openstackdocstheme', 'sphinxcontrib.blockdiag', 'sphinxcontrib.nwdiag' ]
+extensions = [ 'sphinx.ext.autodoc',
+               'zuul_sphinx',
+               'openstackdocstheme',
+               'sphinxcontrib.blockdiag',
+               'sphinxcontrib.nwdiag' ]
 
 # openstackdocstheme options
 repository_name = 'openstack-dev/devstack'
 bug_project = 'devstack'
 bug_tag = ''
-html_last_updated_fmt = '%Y-%m-%d %H:%M'
 
 todo_include_todos = True
 
@@ -119,11 +119,6 @@
 # pixels large.
 #html_favicon = None
 
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1"
-html_last_updated_fmt = os.popen(git_cmd).read()
-
 # If true, SmartyPants will be used to convert quotes and dashes to
 # typographically correct entities.
 #html_use_smartypants = True
@@ -167,21 +162,10 @@
 
 # -- Options for LaTeX output --------------------------------------------------
 
-latex_elements = {
-# The paper size ('letterpaper' or 'a4paper').
-#'papersize': 'letterpaper',
-
-# The font size ('10pt', '11pt' or '12pt').
-#'pointsize': '10pt',
-
-# Additional stuff for the LaTeX preamble.
-#'preamble': '',
-}
-
 # Grouping the document tree into LaTeX files. List of tuples
 # (source start file, target name, title, author, documentclass [howto/manual]).
 latex_documents = [
-  ('index', 'DevStack-doc.tex', u'DevStack Docs',
+  ('index', 'doc-devstack.tex', u'DevStack Docs',
    u'OpenStack DevStack Team', 'manual'),
 ]
 
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index 022e6ba..45f4ffe 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -205,7 +205,7 @@
 Historically DevStack obtained all local configuration and
 customizations from a ``localrc`` file.  In Oct 2013 the
 ``local.conf`` configuration method was introduced (in `review 46768
-<https://review.openstack.org/#/c/46768/>`__) to simplify this
+<https://review.opendev.org/#/c/46768/>`__) to simplify this
 process.
 
 Configuration Notes
@@ -224,25 +224,22 @@
 from a different repo for testing, such as a Gerrit branch
 proposal. ``GIT_BASE`` points to the primary repository server.
 
-    ::
+::
 
-        NOVA_REPO=$GIT_BASE/openstack/nova.git
-        NOVA_BRANCH=master
+    NOVA_REPO=$GIT_BASE/openstack/nova.git
+    NOVA_BRANCH=master
 
 To pull a branch directly from Gerrit, get the repo and branch from
-the Gerrit review page:
+the Gerrit review page::
 
-    ::
+    git fetch https://review.opendev.org/openstack/nova \
+        refs/changes/50/5050/1 && git checkout FETCH_HEAD
 
-        git fetch https://review.openstack.org/p/openstack/nova refs/changes/50/5050/1 && git checkout FETCH_HEAD
+The repo is the stanza following ``fetch`` and the branch is the
+stanza following that::
 
-    The repo is the stanza following ``fetch`` and the branch is the
-    stanza following that:
-
-    ::
-
-        NOVA_REPO=https://review.openstack.org/p/openstack/nova
-        NOVA_BRANCH=refs/changes/50/5050/1
+    NOVA_REPO=https://review.opendev.org/openstack/nova
+    NOVA_BRANCH=refs/changes/50/5050/1
 
 
 Installation Directory
@@ -255,13 +252,15 @@
 later variables.  It can be useful to set it even though it is not
 changed from the default value.
 
-    ::
+::
 
-        DEST=/opt/stack
+    DEST=/opt/stack
 
 Logging
 -------
 
+.. _enable_logging:
+
 Enable Logging
 ~~~~~~~~~~~~~~
 
@@ -271,21 +270,21 @@
 timestamp will be appended to the given filename for each run of
 ``stack.sh``.
 
-    ::
+::
 
-        LOGFILE=$DEST/logs/stack.sh.log
+    LOGFILE=$DEST/logs/stack.sh.log
 
 Old log files are cleaned automatically if ``LOGDAYS`` is set to the
 number of days of old log files to keep.
 
-    ::
+::
 
-        LOGDAYS=1
+    LOGDAYS=1
 
 Some coloring is used during the DevStack runs to make it easier to
 see what is going on. This can be disabled with::
 
-        LOG_COLOR=False
+    LOG_COLOR=False
 
 When using the logfile, by default logs are sent to the console and
 the file.  You can set ``VERBOSE`` to ``false`` if you only wish the
@@ -317,12 +316,12 @@
 For example, non-interactive installs probably wish to save output to
 a file, keep service logs and disable color in the stored files.
 
-   ::
+::
 
-       [[local|localrc]]
-       DEST=/opt/stack/
-       LOGFILE=$LOGDIR/stack.sh.log
-       LOG_COLOR=False
+   [[local|localrc]]
+   DEST=/opt/stack/
+   LOGFILE=$LOGDIR/stack.sh.log
+   LOG_COLOR=False
 
 Database Backend
 ----------------
@@ -330,12 +329,10 @@
 Multiple database backends are available. The available databases are defined
 in the lib/databases directory.
 ``mysql`` is the default database, choose a different one by putting the
-following in the ``localrc`` section:
+following in the ``localrc`` section::
 
-   ::
-
-      disable_service mysql
-      enable_service postgresql
+  disable_service mysql
+  enable_service postgresql
 
 ``mysql`` is the default database.
 
@@ -347,11 +344,9 @@
 RabbitMQ is handled via the usual service functions and
 ``ENABLED_SERVICES``.
 
-Example disabling RabbitMQ in ``local.conf``:
+Example disabling RabbitMQ in ``local.conf``::
 
-::
-
-    disable_service rabbit
+  disable_service rabbit
 
 
 Apache Frontend
@@ -370,34 +365,23 @@
 
 Keystone is run under Apache with ``mod_wsgi`` by default.
 
-Example (Keystone)
-
-::
+Example (Keystone)::
 
     KEYSTONE_USE_MOD_WSGI="True"
 
-Example (Nova):
-
-::
+Example (Nova)::
 
     NOVA_USE_MOD_WSGI="True"
 
-Example (Swift):
-
-::
+Example (Swift)::
 
     SWIFT_USE_MOD_WSGI="True"
 
-Example (Heat):
-
-::
+Example (Heat)::
 
     HEAT_USE_MOD_WSGI="True"
 
-
-Example (Cinder):
-
-::
+Example (Cinder)::
 
     CINDER_USE_MOD_WSGI="True"
 
@@ -413,9 +397,9 @@
 git trees by specifying it in ``LIBS_FROM_GIT``.  Multiple libraries
 can be specified as a comma separated list.
 
-   ::
+::
 
-      LIBS_FROM_GIT=python-keystoneclient,oslo.config
+  LIBS_FROM_GIT=python-keystoneclient,oslo.config
 
 Setting the variable to ``ALL`` will activate the download for all
 libraries.
@@ -431,9 +415,9 @@
 of a venv to be used for the project.  The array index is the project
 name.  Multiple projects can use the same venv if desired.
 
-  ::
+::
 
-    PROJECT_VENV["glance"]=${GLANCE_DIR}.venv
+  PROJECT_VENV["glance"]=${GLANCE_DIR}.venv
 
 ``ADDITIONAL_VENV_PACKAGES`` is a comma-separated list of additional
 packages to be installed into each venv.  Often projects will not have
@@ -442,9 +426,9 @@
 configurations.  By default, the enabled databases will have their
 Python bindings added when they are enabled.
 
-  ::
+::
 
-     ADDITIONAL_VENV_PACKAGES="python-foo, python-bar"
+  ADDITIONAL_VENV_PACKAGES="python-foo, python-bar"
 
 Use python3
 ------------
@@ -453,9 +437,9 @@
 ``PYTHON2_VERSION``). This can be overriden so devstack will run
 python3 (the exact version set by ``PYTHON3_VERSION``).
 
-  ::
+::
 
-     USE_PYTHON3=True
+  USE_PYTHON3=True
 
 A clean install every time
 --------------------------
@@ -465,9 +449,9 @@
 ``RECLONE`` is set to ``yes``. This avoids having to manually remove
 repos in order to get the current branch from ``$GIT_BASE``.
 
-    ::
+::
 
-        RECLONE=yes
+  RECLONE=yes
 
 Upgrade packages installed by pip
 ---------------------------------
@@ -478,9 +462,9 @@
 required Python packages will be upgraded to the most recent version
 that matches requirements.
 
-    ::
+::
 
-        PIP_UPGRADE=True
+  PIP_UPGRADE=True
 
 Guest Images
 ------------
@@ -494,11 +478,11 @@
 these default images; in that case, you will want to populate
 ``IMAGE_URLS`` with sufficient images to satisfy testing-requirements.
 
-    ::
+::
 
-        DOWNLOAD_DEFAULT_IMAGES=False
-        IMAGE_URLS="http://foo.bar.com/image.qcow,"
-        IMAGE_URLS+="http://foo.bar.com/image2.qcow"
+  DOWNLOAD_DEFAULT_IMAGES=False
+  IMAGE_URLS="http://foo.bar.com/image.qcow,"
+  IMAGE_URLS+="http://foo.bar.com/image2.qcow"
 
 
 Instance Type
@@ -513,13 +497,13 @@
 default flavors instead.
 
 KVM on Power with QEMU 2.4 requires 512 MB to load the firmware -
-`QEMU 2.4 - PowerPC <http://wiki.qemu.org/ChangeLog/2.4>`__ so users
+`QEMU 2.4 - PowerPC <https://wiki.qemu.org/ChangeLog/2.4>`__ so users
 running instances on ppc64/ppc64le can choose one of the default
 created flavors as follows:
 
-    ::
+::
 
-        DEFAULT_INSTANCE_TYPE=m1.tiny
+  DEFAULT_INSTANCE_TYPE=m1.tiny
 
 
 IP Version
@@ -530,19 +514,19 @@
 either ``IP_VERSION=4``, ``IP_VERSION=6``, or ``IP_VERSION=4+6``
 respectively.
 
-    ::
+::
 
-        IP_VERSION=4+6
+  IP_VERSION=4+6
 
 The following optional variables can be used to alter the default IPv6
 behavior:
 
-    ::
+::
 
-        IPV6_RA_MODE=slaac
-        IPV6_ADDRESS_MODE=slaac
-        IPV6_ADDRS_SAFE_TO_USE=fd$IPV6_GLOBAL_ID::/56
-        IPV6_PRIVATE_NETWORK_GATEWAY=fd$IPV6_GLOBAL_ID::1
+  IPV6_RA_MODE=slaac
+  IPV6_ADDRESS_MODE=slaac
+  IPV6_ADDRS_SAFE_TO_USE=fd$IPV6_GLOBAL_ID::/56
+  IPV6_PRIVATE_NETWORK_GATEWAY=fd$IPV6_GLOBAL_ID::1
 
 *Note*: ``IPV6_ADDRS_SAFE_TO_USE`` and ``IPV6_PRIVATE_NETWORK_GATEWAY``
 can be configured with any valid IPv6 prefix. The default values make
@@ -565,11 +549,9 @@
 
 The default value for this setting is ``4``.  Dual-mode support, for
 example ``4+6`` is not currently supported.  ``HOST_IPV6`` can
-optionally be used to alter the default IPv6 address
+optionally be used to alter the default IPv6 address::
 
-    ::
-
-        HOST_IPV6=${some_local_ipv6_address}
+  HOST_IPV6=${some_local_ipv6_address}
 
 Multi-node setup
 ~~~~~~~~~~~~~~~~
@@ -627,8 +609,8 @@
 Swift S3
 ++++++++
 
-If you are enabling ``swift3`` in ``ENABLED_SERVICES`` DevStack will
-install the swift3 middleware emulation. Swift will be configured to
+If you are enabling ``s3api`` in ``ENABLED_SERVICES`` DevStack will
+install the s3api middleware emulation. Swift will be configured to
 act as a S3 endpoint for Keystone so effectively replacing the
 ``nova-objectstore``.
 
@@ -663,20 +645,6 @@
 If you would like to use Xenserver as the hypervisor, please refer to
 the instructions in ``./tools/xen/README.md``.
 
-Cells
-~~~~~
-
-`Cells <https://wiki.openstack.org/wiki/Blueprint-nova-compute-cells>`__ is
-an alternative scaling option.  To setup a cells environment add the
-following to your ``localrc`` section:
-
-::
-
-    enable_service n-cell
-
-Be aware that there are some features currently missing in cells, one
-notable one being security groups.
-
 Cinder
 ~~~~~~
 
@@ -685,11 +653,11 @@
 ``VOLUME_NAME_PREFIX`` and the size of the volume backing file is set
 with ``VOLUME_BACKING_FILE_SIZE``.
 
-    ::
+::
 
-        VOLUME_GROUP_NAME="stack-volumes"
-        VOLUME_NAME_PREFIX="volume-"
-        VOLUME_BACKING_FILE_SIZE=24G
+  VOLUME_GROUP_NAME="stack-volumes"
+  VOLUME_NAME_PREFIX="volume-"
+  VOLUME_BACKING_FILE_SIZE=24G
 
 
 Keystone
diff --git a/doc/source/debugging.rst b/doc/source/debugging.rst
new file mode 100644
index 0000000..fd0d9cd
--- /dev/null
+++ b/doc/source/debugging.rst
@@ -0,0 +1,46 @@
+=====================
+System-wide debugging
+=====================
+
+A lot can go wrong during a devstack run, and there are a few inbuilt
+tools to help you.
+
+dstat
+-----
+
+Enable the ``dstat`` service to produce performance logs during the
+devstack run.  These will be logged to the journal and also as a CSV
+file.
+
+memory_tracker
+--------------
+
+The ``memory_tracker`` service periodically monitors RAM usage and
+provides consumption output when available memory is seen to be
+falling (i.e. processes are consuming memory).  It also provides
+output showing locked (unswappable) memory.
+
+tcpdump
+-------
+
+Enable the ``tcpdump`` service to run a background tcpdump.  You must
+set the ``TCPDUMP_ARGS`` variable to something suitable (there is no
+default).  For example, to trace iSCSI communication during a job in
+the OpenStack gate and copy the result into the log output, you might
+use:
+
+.. code-block:: yaml
+
+   job:
+     name: devstack-job
+     parent: devstack
+     vars:
+       devstack_services:
+         tcpdump: true
+       devstack_localrc:
+         TCPDUMP_ARGS: "-i any tcp port 3260"
+       zuul_copy_output:
+         '{{ devstack_log_dir }}/tcpdump.pcap': logs
+
+
+
diff --git a/doc/source/faq.rst b/doc/source/faq.rst
index efb315c..8214de0 100644
--- a/doc/source/faq.rst
+++ b/doc/source/faq.rst
@@ -80,8 +80,7 @@
 ~~~~~~~~~~~~~~~~~
 
 That isn't a question, but please do! The source for DevStack is at
-`git.openstack.org
-<https://git.openstack.org/cgit/openstack-dev/devstack>`__ and bug
+`opendev.org <https://opendev.org/openstack/devstack>`__ and bug
 reports go to `LaunchPad
 <https://bugs.launchpad.net/devstack/>`__. Contributions follow the
 usual process as described in the `developer guide
diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst
index df3c7ce..669a70d 100644
--- a/doc/source/guides/devstack-with-lbaas-v2.rst
+++ b/doc/source/guides/devstack-with-lbaas-v2.rst
@@ -1,39 +1,54 @@
-Configure Load-Balancer Version 2
-=================================
+Devstack with Octavia Load Balancing
+====================================
 
-Starting in the OpenStack Liberty release, the
-`neutron LBaaS v2 API <https://developer.openstack.org/api-ref/network/v2/index.html>`_
-is now stable while the LBaaS v1 API has been deprecated.  The LBaaS v2 reference
-driver is based on Octavia.
+Starting with the OpenStack Pike release, Octavia is now a standalone service
+providing load balancing services for OpenStack.
 
+This guide will show you how to create a devstack with `Octavia API`_ enabled.
+
+.. _Octavia API: https://docs.openstack.org/api-ref/load-balancer/v2/index.html
 
 Phase 1: Create DevStack + 2 nova instances
 --------------------------------------------
 
 First, set up a vm of your choice with at least 8 GB RAM and 16 GB disk space,
-make sure it is updated. Install git and any other developer tools you find useful.
+make sure it is updated. Install git and any other developer tools you find
+useful.
 
 Install devstack
 
 ::
 
-    git clone https://git.openstack.org/openstack-dev/devstack
-    cd devstack
+    git clone https://opendev.org/openstack/devstack
+    cd devstack/tools
+    sudo ./create-stack-user.sh
+    cd ../..
+    sudo mv devstack /opt/stack
+    sudo chown -R stack.stack /opt/stack/devstack
 
+This will clone the current devstack code locally, then setup the "stack"
+account that devstack services will run under. Finally, it will move devstack
+into its default location in /opt/stack/devstack.
 
-Edit your ``local.conf`` to look like
+Edit your ``/opt/stack/devstack/local.conf`` to look like
 
 ::
 
     [[local|localrc]]
-    # Load the external LBaaS plugin.
-    enable_plugin neutron-lbaas https://git.openstack.org/openstack/neutron-lbaas
-    enable_plugin octavia https://git.openstack.org/openstack/octavia
+    enable_plugin octavia https://opendev.org/openstack/octavia
+    # If you are enabling horizon, include the octavia dashboard
+    # enable_plugin octavia-dashboard https://opendev.org/openstack/octavia-dashboard.git
+    # If you are enabling barbican for TLS offload in Octavia, include it here.
+    # enable_plugin barbican https://opendev.org/openstack/barbican
+
+    # If you have python3 available:
+    # USE_PYTHON3=True
 
     # ===== BEGIN localrc =====
     DATABASE_PASSWORD=password
     ADMIN_PASSWORD=password
     SERVICE_PASSWORD=password
+    SERVICE_TOKEN=password
     RABBIT_PASSWORD=password
     # Enable Logging
     LOGFILE=$DEST/logs/stack.sh.log
@@ -41,27 +56,30 @@
     LOG_COLOR=True
     # Pre-requisite
     ENABLED_SERVICES=rabbit,mysql,key
-    # Horizon
-    ENABLED_SERVICES+=,horizon
+    # Horizon - enable for the OpenStack web GUI
+    # ENABLED_SERVICES+=,horizon
     # Nova
-    ENABLED_SERVICES+=,n-api,n-cpu,n-cond,n-sch
+    ENABLED_SERVICES+=,n-api,n-crt,n-cpu,n-cond,n-sch,n-api-meta,n-sproxy
+    ENABLED_SERVICES+=,placement-api,placement-client
     # Glance
     ENABLED_SERVICES+=,g-api,g-reg
     # Neutron
-    ENABLED_SERVICES+=,q-svc,q-agt,q-dhcp,q-l3,q-meta
-    # Enable LBaaS v2
-    ENABLED_SERVICES+=,q-lbaasv2
+    ENABLED_SERVICES+=,q-svc,q-agt,q-dhcp,q-l3,q-meta,neutron
     ENABLED_SERVICES+=,octavia,o-cw,o-hk,o-hm,o-api
     # Cinder
     ENABLED_SERVICES+=,c-api,c-vol,c-sch
     # Tempest
     ENABLED_SERVICES+=,tempest
+    # Barbican - Optionally used for TLS offload in Octavia
+    # ENABLED_SERVICES+=,barbican
     # ===== END localrc =====
 
 Run stack.sh and do some sanity checks
 
 ::
 
+    sudo su - stack
+    cd /opt/stack/devstack
     ./stack.sh
     . ./openrc
 
@@ -72,38 +90,59 @@
 ::
 
     #create nova instances on private network
-    nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1
-    nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2
-    nova list # should show the nova instances just created
+    openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1
+    openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2
+    openstack server list # should show the nova instances just created
 
     #add secgroup rules to allow ssh etc..
     openstack security group rule create default --protocol icmp
     openstack security group rule create default --protocol tcp --dst-port 22:22
     openstack security group rule create default --protocol tcp --dst-port 80:80
 
-Set up a simple web server on each of these instances. ssh into each instance (username 'cirros', password 'cubswin:)') and run
+Set up a simple web server on each of these instances. ssh into each instance (username 'cirros', password 'cubswin:)' or 'gocubsgo') and run
 
 ::
 
     MYIP=$(ifconfig eth0|grep 'inet addr'|awk -F: '{print $2}'| awk '{print $1}')
     while true; do echo -e "HTTP/1.0 200 OK\r\n\r\nWelcome to $MYIP" | sudo nc -l -p 80 ; done&
 
-Phase 2: Create your load balancers
-------------------------------------
+Phase 2: Create your load balancer
+----------------------------------
+
+Make sure you have the 'openstack loadbalancer' commands:
 
 ::
 
-    neutron lbaas-loadbalancer-create --name lb1 private-subnet
-    neutron lbaas-loadbalancer-show lb1  # Wait for the provisioning_status to be ACTIVE.
-    neutron lbaas-listener-create --loadbalancer lb1 --protocol HTTP --protocol-port 80 --name listener1
-    sleep 10  # Sleep since LBaaS actions can take a few seconds depending on the environment.
-    neutron lbaas-pool-create --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1
-    sleep 10
-    neutron lbaas-member-create  --subnet private-subnet --address 10.0.0.3 --protocol-port 80 pool1
-    sleep 10
-    neutron lbaas-member-create  --subnet private-subnet --address 10.0.0.5 --protocol-port 80 pool1
+    pip install python-octaviaclient
 
-Please note here that the "10.0.0.3" and "10.0.0.5" in the above commands are the IPs of the nodes
-(in my test run-thru, they were actually 10.2 and 10.4), and the address of the created LB will be
-reported as "vip_address" from the lbaas-loadbalancer-create, and a quick test of that LB is
-"curl that-lb-ip", which should alternate between showing the IPs of the two nodes.
+Create your load balancer:
+
+::
+
+    openstack loadbalancer create --name lb1 --vip-subnet-id private-subnet
+    openstack loadbalancer show lb1  # Wait for the provisioning_status to be ACTIVE.
+    openstack loadbalancer listener create --protocol HTTP --protocol-port 80 --name listener1 lb1
+    openstack loadbalancer show lb1  # Wait for the provisioning_status to be ACTIVE.
+    openstack loadbalancer pool create --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1
+    openstack loadbalancer show lb1  # Wait for the provisioning_status to be ACTIVE.
+    openstack loadbalancer healthmonitor create --delay 5 --timeout 2 --max-retries 1 --type HTTP pool1
+    openstack loadbalancer show lb1  # Wait for the provisioning_status to be ACTIVE.
+    openstack loadbalancer member create --subnet-id private-subnet --address <web server 1 address> --protocol-port 80 pool1
+    openstack loadbalancer show lb1  # Wait for the provisioning_status to be ACTIVE.
+    openstack loadbalancer member create --subnet-id private-subnet --address <web server 2 address> --protocol-port 80 pool1
+
+Please note: The <web server # address> fields are the IP addresses of the nova
+servers created in Phase 1.
+Also note, using the API directly you can do all of the above commands in one
+API call.
+
+Phase 3: Test your load balancer
+--------------------------------
+
+::
+
+    openstack loadbalancer show lb1 # Note the vip_address
+    curl http://<vip_address>
+    curl http://<vip_address>
+
+This should show the "Welcome to <IP>" message from each member server.
diff --git a/doc/source/guides/devstack-with-ldap.rst b/doc/source/guides/devstack-with-ldap.rst
index ec41141..4c54723 100644
--- a/doc/source/guides/devstack-with-ldap.rst
+++ b/doc/source/guides/devstack-with-ldap.rst
@@ -12,14 +12,14 @@
 LDAP support in keystone is read-only. You can use it to back an entire
 OpenStack deployment to a single LDAP server, or you can use it to back
 separate LDAP servers to specific keystone domains. Users within those domains
-will can authenticate against keystone, assume role assignments, and interact
-with other OpenStack services.
+can authenticate against keystone, assume role assignments, and interact with
+other OpenStack services.
 
 Configuration
 =============
 
 To deploy an OpenLDAP server, make sure ``ldap`` is added to the list of
-``ENABLED_SERVICES``::
+``ENABLED_SERVICES`` in the ``local.conf`` file::
 
     enable_service ldap
 
@@ -35,9 +35,9 @@
 
 At this point, devstack should have everything it needs to deploy OpenLDAP,
 bootstrap it with a minimal set of users, and configure it to back to a domain
-in keystone::
+in keystone. You can do this by running the ``stack.sh`` script::
 
-    ./stack.sh
+    $ ./stack.sh
 
 Once ``stack.sh`` completes, you should have a running keystone deployment with
 a basic set of users. It is important to note that not all users will live
@@ -63,7 +63,7 @@
 To list all users in LDAP directly, you can use ``ldapsearch`` with the LDAP
 user bootstrapped by devstack::
 
-    ldapsearch -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \
+    $ ldapsearch -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \
         -H ldap://localhost -b dc=openstack,dc=org
 
 As you can see, devstack creates an OpenStack domain called ``openstack.org``
@@ -93,7 +93,7 @@
 
 Now, we use the ``Manager`` user to create a user for Peter in LDAP::
 
-    ldapadd -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \
+    $ ldapadd -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \
         -H ldap://localhost -c -f peter.ldif.in
 
 We should be able to assign Peter roles on projects. After Peter has some level
@@ -125,7 +125,7 @@
 We can use the same basic steps to remove users from LDAP, but instead of using
 LDIFs, we can just pass the ``dn`` of the user we want to delete::
 
-    ldapdelete -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \
+    $ ldapdelete -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \
         -H ldap://localhost cn=peter,ou=Users,dc=openstack,dc=org
 
 Group Management
@@ -153,7 +153,7 @@
 We can create the group using the same ``ldapadd`` command as we did with
 users::
 
-    ldapadd -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \
+    $ ldapadd -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \
         -H ldap://localhost -c -f guardian-group.ldif.in
 
 If we check the group membership in Horizon, we'll see that only Peter is a
@@ -167,7 +167,7 @@
 
 Just like users, groups can be deleted using the ``dn``::
 
-    ldapdelete -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \
+    $ ldapdelete -x -w LDAP_PASSWORD -D cn=Manager,dc=openstack,dc=org \
         -H ldap://localhost cn=guardians,ou=UserGroups,dc=openstack,dc=org
 
 Note that this operation will not remove users within that group. It will only
diff --git a/doc/source/guides/lxc.rst b/doc/source/guides/lxc.rst
index 9549ed2..dcaa416 100644
--- a/doc/source/guides/lxc.rst
+++ b/doc/source/guides/lxc.rst
@@ -105,7 +105,7 @@
 
    ::
 
-       git clone https://git.openstack.org/openstack-dev/devstack
+       git clone https://opendev.org/openstack/devstack
 
 #. Configure
 
diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst
index b4e2891..15f02a0 100644
--- a/doc/source/guides/multinode-lab.rst
+++ b/doc/source/guides/multinode-lab.rst
@@ -103,7 +103,7 @@
 
 ::
 
-    git clone https://git.openstack.org/openstack-dev/devstack
+    git clone https://opendev.org/openstack/devstack
     cd devstack
 
 Up to this point all of the steps apply to each node in the cluster.
@@ -120,11 +120,8 @@
 
     [[local|localrc]]
     HOST_IP=192.168.42.11
-    FLAT_INTERFACE=eth0
     FIXED_RANGE=10.4.128.0/20
-    FIXED_NETWORK_SIZE=4096
     FLOATING_RANGE=192.168.42.128/25
-    MULTI_HOST=1
     LOGFILE=/opt/stack/logs/stack.sh.log
     ADMIN_PASSWORD=labstack
     DATABASE_PASSWORD=supersecret
@@ -160,11 +157,8 @@
 
     [[local|localrc]]
     HOST_IP=192.168.42.12 # change this per compute node
-    FLAT_INTERFACE=eth0
     FIXED_RANGE=10.4.128.0/20
-    FIXED_NETWORK_SIZE=4096
     FLOATING_RANGE=192.168.42.128/25
-    MULTI_HOST=1
     LOGFILE=/opt/stack/logs/stack.sh.log
     ADMIN_PASSWORD=labstack
     DATABASE_PASSWORD=supersecret
@@ -177,7 +171,7 @@
     GLANCE_HOSTPORT=$SERVICE_HOST:9292
     ENABLED_SERVICES=n-cpu,q-agt,n-api-meta,c-vol,placement-client
     NOVA_VNC_ENABLED=True
-    NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_auto.html"
+    NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_lite.html"
     VNCSERVER_LISTEN=$HOST_IP
     VNCSERVER_PROXYCLIENT_ADDRESS=$VNCSERVER_LISTEN
 
@@ -240,8 +234,8 @@
     sudo rm -rf /etc/libvirt/qemu/inst*
     sudo virsh list | grep inst | awk '{print $1}' | xargs -n1 virsh destroy
 
-Options pimp your stack
-=======================
+Going further
+=============
 
 Additional Users
 ----------------
@@ -302,10 +296,10 @@
 
 DevStack will automatically use an existing LVM volume group named
 ``stack-volumes`` to store cloud-created volumes. If ``stack-volumes``
-doesn't exist, DevStack will set up a 10Gb loop-mounted file to contain
-it. This obviously limits the number and size of volumes that can be
-created inside OpenStack. The size can be overridden by setting
-``VOLUME_BACKING_FILE_SIZE`` in ``local.conf``.
+doesn't exist, DevStack will set up a loop-mounted file to contain
+it.  If the default size is insufficient for the number and size of volumes
+required, it can be overridden by setting ``VOLUME_BACKING_FILE_SIZE`` in
+``local.conf`` (sizes given in ``truncate`` compatible format, e.g. ``24G``).
 
 ``stack-volumes`` can be pre-created on any physical volume supported by
 Linux's LVM. The name of the volume group can be changed by setting
@@ -369,17 +363,6 @@
 Notes stuff you might need to know
 ==================================
 
-Reset the Bridge
-----------------
-
-How to reset the bridge configuration:
-
-::
-
-    sudo brctl delif br100 eth0.926
-    sudo ip link set dev br100 down
-    sudo brctl delbr br100
-
 Set MySQL Password
 ------------------
 
diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst
index 12c6d69..2c25a1c 100644
--- a/doc/source/guides/neutron.rst
+++ b/doc/source/guides/neutron.rst
@@ -567,7 +567,7 @@
     Q_ML2_PLUGIN_MECHANISM_DRIVERS=macvtap
     Q_USE_PROVIDER_NETWORKING=True
 
-    enable_plugin neutron git://git.openstack.org/openstack/neutron
+    enable_plugin neutron https://opendev.org/openstack/neutron
 
     ## MacVTap agent options
     Q_AGENT=macvtap
@@ -622,7 +622,7 @@
 
     # Services that a compute node runs
     disable_all_services
-    enable_plugin neutron git://git.openstack.org/openstack/neutron
+    enable_plugin neutron https://opendev.org/openstack/neutron
     ENABLED_SERVICES+=n-cpu,q-agt
 
     ## MacVTap agent options
diff --git a/doc/source/guides/nova.rst b/doc/source/guides/nova.rst
index 0f105d7..5b42797 100644
--- a/doc/source/guides/nova.rst
+++ b/doc/source/guides/nova.rst
@@ -10,7 +10,7 @@
 ================
 
 In Juno, nova implemented a `spec
-<http://specs.openstack.org/openstack/nova-specs/specs/juno/implemented/serial-ports.html>`_
+<https://specs.openstack.org/openstack/nova-specs/specs/juno/implemented/serial-ports.html>`_
 to allow read/write access to the serial console of an instance via
 `nova-serialproxy
 <https://docs.openstack.org/nova/latest/cli/nova-serialproxy.html>`_.
@@ -63,8 +63,74 @@
 Enabling the service is enough to be functional for a single machine DevStack.
 
 These config options are defined in `nova.conf.serial_console
-<https://github.com/openstack/nova/blob/master/nova/conf/serial_console.py>`_.
+<https://opendev.org/openstack/nova/src/master/nova/conf/serial_console.py>`_.
 
 For more information on OpenStack configuration see the `OpenStack
 Compute Service Configuration Reference
 <https://docs.openstack.org/nova/latest/admin/configuration/index.html>`_
+
+
+Fake virt driver
+================
+
+Nova has a `fake virt driver`_ which can be used for scale testing the control
+plane services or testing "move" operations between fake compute nodes, for
+example cold/live migration, evacuate and unshelve.
+
+The fake virt driver does not communicate with any hypervisor, it just reports
+some fake resource inventory values and keeps track of the state of the
+"guests" created, moved and deleted. It is not feature-complete with the
+compute API but is good enough for most API testing, and is also used within
+the nova functional tests themselves so is fairly robust.
+
+.. _fake virt driver: https://opendev.org/openstack/nova/src/branch/master/nova/virt/fake.py
+
+Configuration
+-------------
+
+Set the following in your devstack ``local.conf``:
+
+.. code-block:: ini
+
+  [[local|localrc]]
+  VIRT_DRIVER=fake
+  NUMBER_FAKE_NOVA_COMPUTE=<number>
+
+The ``NUMBER_FAKE_NOVA_COMPUTE`` variable controls the number of fake
+``nova-compute`` services to run and defaults to 1.
+
+When ``VIRT_DRIVER=fake`` is used, devstack will disable quota checking in
+nova and neutron automatically. However, other services, like cinder, will
+still enforce quota limits by default.
+
+Scaling
+-------
+
+The actual value to use for ``NUMBER_FAKE_NOVA_COMPUTE`` depends on factors
+such as:
+
+* The size of the host (physical or virtualized) on which devstack is running.
+* The number of API workers. By default, devstack will run ``max($nproc/2, 2)``
+  workers per API service. If you are running several fake compute services on
+  a single host, then consider setting ``API_WORKERS=1`` in ``local.conf``.
+
+In addition, while quota will be disabled in neutron, there is no fake ML2
+backend for neutron so creating fake VMs will still result in real ports being
+created. To create servers without networking, you can specify ``--nic=none``
+when creating the server, for example:
+
+.. code-block:: shell
+
+  $ openstack --os-compute-api-version 2.37 server create --flavor cirros256 \
+      --image cirros-0.3.5-x86_64-disk --nic none --wait test-server
+
+.. note:: ``--os-compute-api-version`` greater than or equal to 2.37 is
+          required to use ``--nic=none``.
+
+To avoid overhead from other services which you may not need, disable them in
+your ``local.conf``, for example:
+
+.. code-block:: ini
+
+  disable_service horizon
+  disable_service tempest
diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst
index 515ea9a..a0e97ed 100644
--- a/doc/source/guides/single-machine.rst
+++ b/doc/source/guides/single-machine.rst
@@ -45,31 +45,37 @@
 install you can skip this step and just give the user sudo privileges
 below)
 
-::
+.. code-block:: console
 
-    useradd -s /bin/bash -d /opt/stack -m stack
+    $ sudo useradd -s /bin/bash -d /opt/stack -m stack
 
 Since this user will be making many changes to your system, it will need
 to have sudo privileges:
 
-::
+.. code-block:: console
 
-    apt-get install sudo -y || yum install -y sudo
-    echo "stack ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
+    $ apt-get install sudo -y || yum install -y sudo
+    $ echo "stack ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
+
+.. note:: On some systems you may need to use ``sudo visudo``.
 
 From here on you should use the user you created. **Logout** and
-**login** as that user.
+**login** as that user:
+
+.. code-block:: console
+
+    $ sudo su stack && cd ~
 
 Download DevStack
 -----------------
 
 We'll grab the latest version of DevStack via https:
 
-::
+.. code-block:: console
 
-    sudo apt-get install git -y || sudo yum install -y git
-    git clone https://git.openstack.org/openstack-dev/devstack
-    cd devstack
+    $ sudo apt-get install git -y || sudo yum install -y git
+    $ git clone https://opendev.org/openstack/devstack
+    $ cd devstack
 
 Run DevStack
 ------------
@@ -81,11 +87,8 @@
 -  Set ``FLOATING_RANGE`` to a range not used on the local network, i.e.
    192.168.1.224/27. This configures IP addresses ending in 225-254 to
    be used as floating IPs.
--  Set ``FIXED_RANGE`` and ``FIXED_NETWORK_SIZE`` to configure the
-   internal address space used by the instances.
--  Set ``FLAT_INTERFACE`` to the Ethernet interface that connects the
-   host to your local network. This is the interface that should be
-   configured with the static IP address mentioned above.
+-  Set ``FIXED_RANGE`` to configure the internal address space used by the
+   instances.
 -  Set the administrative password. This password is used for the
    **admin** and **demo** accounts set up as OpenStack users.
 -  Set the MySQL administrative password. The default here is a random
@@ -97,23 +100,24 @@
 
 ``local.conf`` should look something like this:
 
-::
+.. code-block:: ini
 
     [[local|localrc]]
     FLOATING_RANGE=192.168.1.224/27
     FIXED_RANGE=10.11.12.0/24
-    FIXED_NETWORK_SIZE=256
-    FLAT_INTERFACE=eth0
     ADMIN_PASSWORD=supersecret
     DATABASE_PASSWORD=iheartdatabases
     RABBIT_PASSWORD=flopsymopsy
     SERVICE_PASSWORD=iheartksl
 
+.. note:: There is a sample :download:`local.conf </assets/local.conf>` file
+    under the *samples* directory in the devstack repository.
+
 Run DevStack:
 
-::
+.. code-block:: console
 
-    ./stack.sh
+    $ ./stack.sh
 
 A seemingly endless stream of activity ensues. When complete you will
 see a summary of ``stack.sh``'s work, including the relevant URLs,
diff --git a/doc/source/guides/single-vm.rst b/doc/source/guides/single-vm.rst
index 45b8f2d..7dac18b 100644
--- a/doc/source/guides/single-vm.rst
+++ b/doc/source/guides/single-vm.rst
@@ -60,7 +60,7 @@
             DEBIAN_FRONTEND=noninteractive sudo apt-get install -qqy git || sudo yum install -qy git
             sudo chown stack:stack /home/stack
             cd /home/stack
-            git clone https://git.openstack.org/openstack-dev/devstack
+            git clone https://opendev.org/openstack/devstack
             cd devstack
             echo '[[local|localrc]]' > local.conf
             echo ADMIN_PASSWORD=password >> local.conf
@@ -78,7 +78,7 @@
 to create a non-root user and run the ``start.sh`` script as that user.
 
 If you are using cloud-init and you have not
-`enabled custom logging <../configuration.html#enable-logging>`_ of the stack
+:ref:`enabled custom logging <enable_logging>` of the stack
 output, then the stack output can be found in
 ``/var/log/cloud-init-output.log`` by default.
 
diff --git a/doc/source/index.rst b/doc/source/index.rst
index fcf1e82..6694022 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -11,9 +11,8 @@
    and how to go beyond this setup. Both should be a set of quick
    links to other documents to let people explore from there.
 
-==========
- DevStack
-==========
+DevStack
+========
 
 .. image:: assets/images/logo-blue.png
 
@@ -23,8 +22,7 @@
 environment and as the basis for much of the OpenStack project's
 functional testing.
 
-The source is available at
-`<https://git.openstack.org/cgit/openstack-dev/devstack>`__.
+The source is available at `<https://opendev.org/openstack/devstack>`__.
 
 .. warning::
 
@@ -33,36 +31,37 @@
    are dedicated to this purpose.
 
 Quick Start
-===========
++++++++++++
 
 Install Linux
 -------------
 
-Start with a clean and minimal install of a Linux system. Devstack
+Start with a clean and minimal install of a Linux system. DevStack
 attempts to support the two latest LTS releases of Ubuntu, the
 latest/current Fedora version, CentOS/RHEL 7, as well as Debian and
 OpenSUSE.
 
-If you do not have a preference, Ubuntu 16.04 is the most tested, and
-will probably go the smoothest.
+If you do not have a preference, Ubuntu 18.04 (Bionic Beaver) is the
+most tested, and will probably go the smoothest.
 
-Add Stack User
---------------
+Add Stack User (optional)
+-------------------------
 
-Devstack should be run as a non-root user with sudo enabled
+DevStack should be run as a non-root user with sudo enabled
 (standard logins to cloud images such as "ubuntu" or "cloud-user"
 are usually fine).
 
-You can quickly create a separate `stack` user to run DevStack with
+If you are not using a cloud image, you can create a separate `stack` user
+to run DevStack with
 
-::
+.. code-block:: console
 
    $ sudo useradd -s /bin/bash -d /opt/stack -m stack
 
 Since this user will be making many changes to your system, it should
 have sudo privileges:
 
-::
+.. code-block:: console
 
     $ echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack
     $ sudo su - stack
@@ -70,20 +69,21 @@
 Download DevStack
 -----------------
 
-::
+.. code-block:: console
 
-   $ git clone https://git.openstack.org/openstack-dev/devstack
+   $ git clone https://opendev.org/openstack/devstack
    $ cd devstack
 
 The ``devstack`` repo contains a script that installs OpenStack and
-templates for configuration files
+templates for configuration files.
 
 Create a local.conf
 -------------------
 
-Create a ``local.conf`` file with 4 passwords preset at the root of the
+Create a ``local.conf`` file with four passwords preset at the root of the
 devstack git repo.
-::
+
+.. code-block:: ini
 
    [[local|localrc]]
    ADMIN_PASSWORD=secret
@@ -93,12 +93,15 @@
 
 This is the minimum required config to get started with DevStack.
 
+.. note:: There is a sample :download:`local.conf </assets/local.conf>` file
+    under the *samples* directory in the devstack repository.
+
 Start the install
 -----------------
 
-::
+.. code-block:: console
 
-   ./stack.sh
+   $ ./stack.sh
 
 This will take a 15 - 20 minutes, largely depending on the speed of
 your internet connection. Many git trees and packages will be
@@ -110,8 +113,8 @@
 You now have a working DevStack! Congrats!
 
 Your devstack will have installed ``keystone``, ``glance``, ``nova``,
-``cinder``, ``neutron``, and ``horizon``. Floating IPs will be
-available, guests have access to the external world.
+``placement``, ``cinder``, ``neutron``, and ``horizon``. Floating IPs
+will be available, guests have access to the external world.
 
 You can access horizon to experience the web interface to
 OpenStack, and manage vms, networks, volumes, and images from
@@ -149,7 +152,7 @@
 <hacking>`.
 
 Contents
---------
+++++++++
 
 .. toctree::
    :glob:
diff --git a/doc/source/overview.rst b/doc/source/overview.rst
index 2479cd0..a609333 100644
--- a/doc/source/overview.rst
+++ b/doc/source/overview.rst
@@ -64,7 +64,8 @@
 
 The default services configured by DevStack are Identity (keystone),
 Object Storage (swift), Image Service (glance), Block Storage
-(cinder), Compute (nova), Networking (neutron), Dashboard (horizon)
+(cinder), Compute (nova), Placement (placement),
+Networking (neutron), Dashboard (horizon).
 
 Additional services not included directly in DevStack can be tied in to
 ``stack.sh`` using the :doc:`plugin mechanism <plugins>` to call
diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst
index b02061e..5cbe4ed 100644
--- a/doc/source/plugin-registry.rst
+++ b/doc/source/plugin-registry.rst
@@ -21,179 +21,188 @@
 official OpenStack projects.
 
 
-====================================== ===
-Plugin Name                            URL
-====================================== ===
-almanach                               `git://git.openstack.org/openstack/almanach <https://git.openstack.org/cgit/openstack/almanach>`__
-aodh                                   `git://git.openstack.org/openstack/aodh <https://git.openstack.org/cgit/openstack/aodh>`__
-apmec                                  `git://git.openstack.org/openstack/apmec <https://git.openstack.org/cgit/openstack/apmec>`__
-barbican                               `git://git.openstack.org/openstack/barbican <https://git.openstack.org/cgit/openstack/barbican>`__
-bilean                                 `git://git.openstack.org/openstack/bilean <https://git.openstack.org/cgit/openstack/bilean>`__
-blazar                                 `git://git.openstack.org/openstack/blazar <https://git.openstack.org/cgit/openstack/blazar>`__
-broadview-collector                    `git://git.openstack.org/openstack/broadview-collector <https://git.openstack.org/cgit/openstack/broadview-collector>`__
-castellan-ui                           `git://git.openstack.org/openstack/castellan-ui <https://git.openstack.org/cgit/openstack/castellan-ui>`__
-ceilometer                             `git://git.openstack.org/openstack/ceilometer <https://git.openstack.org/cgit/openstack/ceilometer>`__
-ceilometer-powervm                     `git://git.openstack.org/openstack/ceilometer-powervm <https://git.openstack.org/cgit/openstack/ceilometer-powervm>`__
-cloudkitty                             `git://git.openstack.org/openstack/cloudkitty <https://git.openstack.org/cgit/openstack/cloudkitty>`__
-collectd-openstack-plugins             `git://git.openstack.org/openstack/collectd-openstack-plugins <https://git.openstack.org/cgit/openstack/collectd-openstack-plugins>`__
-congress                               `git://git.openstack.org/openstack/congress <https://git.openstack.org/cgit/openstack/congress>`__
-cyborg                                 `git://git.openstack.org/openstack/cyborg <https://git.openstack.org/cgit/openstack/cyborg>`__
-designate                              `git://git.openstack.org/openstack/designate <https://git.openstack.org/cgit/openstack/designate>`__
-devstack-plugin-additional-pkg-repos   `git://git.openstack.org/openstack/devstack-plugin-additional-pkg-repos <https://git.openstack.org/cgit/openstack/devstack-plugin-additional-pkg-repos>`__
-devstack-plugin-amqp1                  `git://git.openstack.org/openstack/devstack-plugin-amqp1 <https://git.openstack.org/cgit/openstack/devstack-plugin-amqp1>`__
-devstack-plugin-bdd                    `git://git.openstack.org/openstack/devstack-plugin-bdd <https://git.openstack.org/cgit/openstack/devstack-plugin-bdd>`__
-devstack-plugin-ceph                   `git://git.openstack.org/openstack/devstack-plugin-ceph <https://git.openstack.org/cgit/openstack/devstack-plugin-ceph>`__
-devstack-plugin-container              `git://git.openstack.org/openstack/devstack-plugin-container <https://git.openstack.org/cgit/openstack/devstack-plugin-container>`__
-devstack-plugin-glusterfs              `git://git.openstack.org/openstack/devstack-plugin-glusterfs <https://git.openstack.org/cgit/openstack/devstack-plugin-glusterfs>`__
-devstack-plugin-hdfs                   `git://git.openstack.org/openstack/devstack-plugin-hdfs <https://git.openstack.org/cgit/openstack/devstack-plugin-hdfs>`__
-devstack-plugin-kafka                  `git://git.openstack.org/openstack/devstack-plugin-kafka <https://git.openstack.org/cgit/openstack/devstack-plugin-kafka>`__
-devstack-plugin-libvirt-qemu           `git://git.openstack.org/openstack/devstack-plugin-libvirt-qemu <https://git.openstack.org/cgit/openstack/devstack-plugin-libvirt-qemu>`__
-devstack-plugin-mariadb                `git://git.openstack.org/openstack/devstack-plugin-mariadb <https://git.openstack.org/cgit/openstack/devstack-plugin-mariadb>`__
-devstack-plugin-nfs                    `git://git.openstack.org/openstack/devstack-plugin-nfs <https://git.openstack.org/cgit/openstack/devstack-plugin-nfs>`__
-devstack-plugin-pika                   `git://git.openstack.org/openstack/devstack-plugin-pika <https://git.openstack.org/cgit/openstack/devstack-plugin-pika>`__
-devstack-plugin-sheepdog               `git://git.openstack.org/openstack/devstack-plugin-sheepdog <https://git.openstack.org/cgit/openstack/devstack-plugin-sheepdog>`__
-devstack-plugin-vmax                   `git://git.openstack.org/openstack/devstack-plugin-vmax <https://git.openstack.org/cgit/openstack/devstack-plugin-vmax>`__
-devstack-plugin-zmq                    `git://git.openstack.org/openstack/devstack-plugin-zmq <https://git.openstack.org/cgit/openstack/devstack-plugin-zmq>`__
-dragonflow                             `git://git.openstack.org/openstack/dragonflow <https://git.openstack.org/cgit/openstack/dragonflow>`__
-drbd-devstack                          `git://git.openstack.org/openstack/drbd-devstack <https://git.openstack.org/cgit/openstack/drbd-devstack>`__
-ec2-api                                `git://git.openstack.org/openstack/ec2-api <https://git.openstack.org/cgit/openstack/ec2-api>`__
-freezer                                `git://git.openstack.org/openstack/freezer <https://git.openstack.org/cgit/openstack/freezer>`__
-freezer-api                            `git://git.openstack.org/openstack/freezer-api <https://git.openstack.org/cgit/openstack/freezer-api>`__
-freezer-tempest-plugin                 `git://git.openstack.org/openstack/freezer-tempest-plugin <https://git.openstack.org/cgit/openstack/freezer-tempest-plugin>`__
-freezer-web-ui                         `git://git.openstack.org/openstack/freezer-web-ui <https://git.openstack.org/cgit/openstack/freezer-web-ui>`__
-gce-api                                `git://git.openstack.org/openstack/gce-api <https://git.openstack.org/cgit/openstack/gce-api>`__
-glare                                  `git://git.openstack.org/openstack/glare <https://git.openstack.org/cgit/openstack/glare>`__
-group-based-policy                     `git://git.openstack.org/openstack/group-based-policy <https://git.openstack.org/cgit/openstack/group-based-policy>`__
-gyan                                   `git://git.openstack.org/openstack/gyan <https://git.openstack.org/cgit/openstack/gyan>`__
-heat                                   `git://git.openstack.org/openstack/heat <https://git.openstack.org/cgit/openstack/heat>`__
-heat-dashboard                         `git://git.openstack.org/openstack/heat-dashboard <https://git.openstack.org/cgit/openstack/heat-dashboard>`__
-horizon-mellanox                       `git://git.openstack.org/openstack/horizon-mellanox <https://git.openstack.org/cgit/openstack/horizon-mellanox>`__
-ironic                                 `git://git.openstack.org/openstack/ironic <https://git.openstack.org/cgit/openstack/ironic>`__
-ironic-inspector                       `git://git.openstack.org/openstack/ironic-inspector <https://git.openstack.org/cgit/openstack/ironic-inspector>`__
-ironic-staging-drivers                 `git://git.openstack.org/openstack/ironic-staging-drivers <https://git.openstack.org/cgit/openstack/ironic-staging-drivers>`__
-ironic-ui                              `git://git.openstack.org/openstack/ironic-ui <https://git.openstack.org/cgit/openstack/ironic-ui>`__
-karbor                                 `git://git.openstack.org/openstack/karbor <https://git.openstack.org/cgit/openstack/karbor>`__
-karbor-dashboard                       `git://git.openstack.org/openstack/karbor-dashboard <https://git.openstack.org/cgit/openstack/karbor-dashboard>`__
-keystone                               `git://git.openstack.org/openstack/keystone <https://git.openstack.org/cgit/openstack/keystone>`__
-kingbird                               `git://git.openstack.org/openstack/kingbird <https://git.openstack.org/cgit/openstack/kingbird>`__
-kuryr-kubernetes                       `git://git.openstack.org/openstack/kuryr-kubernetes <https://git.openstack.org/cgit/openstack/kuryr-kubernetes>`__
-kuryr-libnetwork                       `git://git.openstack.org/openstack/kuryr-libnetwork <https://git.openstack.org/cgit/openstack/kuryr-libnetwork>`__
-kuryr-tempest-plugin                   `git://git.openstack.org/openstack/kuryr-tempest-plugin <https://git.openstack.org/cgit/openstack/kuryr-tempest-plugin>`__
-magnum                                 `git://git.openstack.org/openstack/magnum <https://git.openstack.org/cgit/openstack/magnum>`__
-magnum-ui                              `git://git.openstack.org/openstack/magnum-ui <https://git.openstack.org/cgit/openstack/magnum-ui>`__
-manila                                 `git://git.openstack.org/openstack/manila <https://git.openstack.org/cgit/openstack/manila>`__
-manila-ui                              `git://git.openstack.org/openstack/manila-ui <https://git.openstack.org/cgit/openstack/manila-ui>`__
-masakari                               `git://git.openstack.org/openstack/masakari <https://git.openstack.org/cgit/openstack/masakari>`__
-meteos                                 `git://git.openstack.org/openstack/meteos <https://git.openstack.org/cgit/openstack/meteos>`__
-meteos-ui                              `git://git.openstack.org/openstack/meteos-ui <https://git.openstack.org/cgit/openstack/meteos-ui>`__
-mistral                                `git://git.openstack.org/openstack/mistral <https://git.openstack.org/cgit/openstack/mistral>`__
-mixmatch                               `git://git.openstack.org/openstack/mixmatch <https://git.openstack.org/cgit/openstack/mixmatch>`__
-mogan                                  `git://git.openstack.org/openstack/mogan <https://git.openstack.org/cgit/openstack/mogan>`__
-mogan-ui                               `git://git.openstack.org/openstack/mogan-ui <https://git.openstack.org/cgit/openstack/mogan-ui>`__
-monasca-analytics                      `git://git.openstack.org/openstack/monasca-analytics <https://git.openstack.org/cgit/openstack/monasca-analytics>`__
-monasca-api                            `git://git.openstack.org/openstack/monasca-api <https://git.openstack.org/cgit/openstack/monasca-api>`__
-monasca-ceilometer                     `git://git.openstack.org/openstack/monasca-ceilometer <https://git.openstack.org/cgit/openstack/monasca-ceilometer>`__
-monasca-events-api                     `git://git.openstack.org/openstack/monasca-events-api <https://git.openstack.org/cgit/openstack/monasca-events-api>`__
-monasca-log-api                        `git://git.openstack.org/openstack/monasca-log-api <https://git.openstack.org/cgit/openstack/monasca-log-api>`__
-monasca-tempest-plugin                 `git://git.openstack.org/openstack/monasca-tempest-plugin <https://git.openstack.org/cgit/openstack/monasca-tempest-plugin>`__
-monasca-transform                      `git://git.openstack.org/openstack/monasca-transform <https://git.openstack.org/cgit/openstack/monasca-transform>`__
-murano                                 `git://git.openstack.org/openstack/murano <https://git.openstack.org/cgit/openstack/murano>`__
-networking-6wind                       `git://git.openstack.org/openstack/networking-6wind <https://git.openstack.org/cgit/openstack/networking-6wind>`__
-networking-ansible                     `git://git.openstack.org/openstack/networking-ansible <https://git.openstack.org/cgit/openstack/networking-ansible>`__
-networking-arista                      `git://git.openstack.org/openstack/networking-arista <https://git.openstack.org/cgit/openstack/networking-arista>`__
-networking-bagpipe                     `git://git.openstack.org/openstack/networking-bagpipe <https://git.openstack.org/cgit/openstack/networking-bagpipe>`__
-networking-baremetal                   `git://git.openstack.org/openstack/networking-baremetal <https://git.openstack.org/cgit/openstack/networking-baremetal>`__
-networking-bgpvpn                      `git://git.openstack.org/openstack/networking-bgpvpn <https://git.openstack.org/cgit/openstack/networking-bgpvpn>`__
-networking-brocade                     `git://git.openstack.org/openstack/networking-brocade <https://git.openstack.org/cgit/openstack/networking-brocade>`__
-networking-calico                      `git://git.openstack.org/openstack/networking-calico <https://git.openstack.org/cgit/openstack/networking-calico>`__
-networking-cisco                       `git://git.openstack.org/openstack/networking-cisco <https://git.openstack.org/cgit/openstack/networking-cisco>`__
-networking-cumulus                     `git://git.openstack.org/openstack/networking-cumulus <https://git.openstack.org/cgit/openstack/networking-cumulus>`__
-networking-dpm                         `git://git.openstack.org/openstack/networking-dpm <https://git.openstack.org/cgit/openstack/networking-dpm>`__
-networking-fortinet                    `git://git.openstack.org/openstack/networking-fortinet <https://git.openstack.org/cgit/openstack/networking-fortinet>`__
-networking-generic-switch              `git://git.openstack.org/openstack/networking-generic-switch <https://git.openstack.org/cgit/openstack/networking-generic-switch>`__
-networking-hpe                         `git://git.openstack.org/openstack/networking-hpe <https://git.openstack.org/cgit/openstack/networking-hpe>`__
-networking-huawei                      `git://git.openstack.org/openstack/networking-huawei <https://git.openstack.org/cgit/openstack/networking-huawei>`__
-networking-hyperv                      `git://git.openstack.org/openstack/networking-hyperv <https://git.openstack.org/cgit/openstack/networking-hyperv>`__
-networking-infoblox                    `git://git.openstack.org/openstack/networking-infoblox <https://git.openstack.org/cgit/openstack/networking-infoblox>`__
-networking-l2gw                        `git://git.openstack.org/openstack/networking-l2gw <https://git.openstack.org/cgit/openstack/networking-l2gw>`__
-networking-lagopus                     `git://git.openstack.org/openstack/networking-lagopus <https://git.openstack.org/cgit/openstack/networking-lagopus>`__
-networking-midonet                     `git://git.openstack.org/openstack/networking-midonet <https://git.openstack.org/cgit/openstack/networking-midonet>`__
-networking-mlnx                        `git://git.openstack.org/openstack/networking-mlnx <https://git.openstack.org/cgit/openstack/networking-mlnx>`__
-networking-nec                         `git://git.openstack.org/openstack/networking-nec <https://git.openstack.org/cgit/openstack/networking-nec>`__
-networking-odl                         `git://git.openstack.org/openstack/networking-odl <https://git.openstack.org/cgit/openstack/networking-odl>`__
-networking-onos                        `git://git.openstack.org/openstack/networking-onos <https://git.openstack.org/cgit/openstack/networking-onos>`__
-networking-opencontrail                `git://git.openstack.org/openstack/networking-opencontrail <https://git.openstack.org/cgit/openstack/networking-opencontrail>`__
-networking-ovn                         `git://git.openstack.org/openstack/networking-ovn <https://git.openstack.org/cgit/openstack/networking-ovn>`__
-networking-ovs-dpdk                    `git://git.openstack.org/openstack/networking-ovs-dpdk <https://git.openstack.org/cgit/openstack/networking-ovs-dpdk>`__
-networking-plumgrid                    `git://git.openstack.org/openstack/networking-plumgrid <https://git.openstack.org/cgit/openstack/networking-plumgrid>`__
-networking-powervm                     `git://git.openstack.org/openstack/networking-powervm <https://git.openstack.org/cgit/openstack/networking-powervm>`__
-networking-sfc                         `git://git.openstack.org/openstack/networking-sfc <https://git.openstack.org/cgit/openstack/networking-sfc>`__
-networking-spp                         `git://git.openstack.org/openstack/networking-spp <https://git.openstack.org/cgit/openstack/networking-spp>`__
-networking-vpp                         `git://git.openstack.org/openstack/networking-vpp <https://git.openstack.org/cgit/openstack/networking-vpp>`__
-networking-vsphere                     `git://git.openstack.org/openstack/networking-vsphere <https://git.openstack.org/cgit/openstack/networking-vsphere>`__
-neutron                                `git://git.openstack.org/openstack/neutron <https://git.openstack.org/cgit/openstack/neutron>`__
-neutron-classifier                     `git://git.openstack.org/openstack/neutron-classifier <https://git.openstack.org/cgit/openstack/neutron-classifier>`__
-neutron-dynamic-routing                `git://git.openstack.org/openstack/neutron-dynamic-routing <https://git.openstack.org/cgit/openstack/neutron-dynamic-routing>`__
-neutron-fwaas                          `git://git.openstack.org/openstack/neutron-fwaas <https://git.openstack.org/cgit/openstack/neutron-fwaas>`__
-neutron-fwaas-dashboard                `git://git.openstack.org/openstack/neutron-fwaas-dashboard <https://git.openstack.org/cgit/openstack/neutron-fwaas-dashboard>`__
-neutron-lbaas                          `git://git.openstack.org/openstack/neutron-lbaas <https://git.openstack.org/cgit/openstack/neutron-lbaas>`__
-neutron-lbaas-dashboard                `git://git.openstack.org/openstack/neutron-lbaas-dashboard <https://git.openstack.org/cgit/openstack/neutron-lbaas-dashboard>`__
-neutron-tempest-plugin                 `git://git.openstack.org/openstack/neutron-tempest-plugin <https://git.openstack.org/cgit/openstack/neutron-tempest-plugin>`__
-neutron-vpnaas                         `git://git.openstack.org/openstack/neutron-vpnaas <https://git.openstack.org/cgit/openstack/neutron-vpnaas>`__
-neutron-vpnaas-dashboard               `git://git.openstack.org/openstack/neutron-vpnaas-dashboard <https://git.openstack.org/cgit/openstack/neutron-vpnaas-dashboard>`__
-nova-dpm                               `git://git.openstack.org/openstack/nova-dpm <https://git.openstack.org/cgit/openstack/nova-dpm>`__
-nova-lxd                               `git://git.openstack.org/openstack/nova-lxd <https://git.openstack.org/cgit/openstack/nova-lxd>`__
-nova-mksproxy                          `git://git.openstack.org/openstack/nova-mksproxy <https://git.openstack.org/cgit/openstack/nova-mksproxy>`__
-nova-powervm                           `git://git.openstack.org/openstack/nova-powervm <https://git.openstack.org/cgit/openstack/nova-powervm>`__
-oaktree                                `git://git.openstack.org/openstack/oaktree <https://git.openstack.org/cgit/openstack/oaktree>`__
-octavia                                `git://git.openstack.org/openstack/octavia <https://git.openstack.org/cgit/openstack/octavia>`__
-octavia-dashboard                      `git://git.openstack.org/openstack/octavia-dashboard <https://git.openstack.org/cgit/openstack/octavia-dashboard>`__
-omni                                   `git://git.openstack.org/openstack/omni <https://git.openstack.org/cgit/openstack/omni>`__
-openstacksdk                           `git://git.openstack.org/openstack/openstacksdk <https://git.openstack.org/cgit/openstack/openstacksdk>`__
-os-xenapi                              `git://git.openstack.org/openstack/os-xenapi <https://git.openstack.org/cgit/openstack/os-xenapi>`__
-osprofiler                             `git://git.openstack.org/openstack/osprofiler <https://git.openstack.org/cgit/openstack/osprofiler>`__
-oswin-tempest-plugin                   `git://git.openstack.org/openstack/oswin-tempest-plugin <https://git.openstack.org/cgit/openstack/oswin-tempest-plugin>`__
-panko                                  `git://git.openstack.org/openstack/panko <https://git.openstack.org/cgit/openstack/panko>`__
-patrole                                `git://git.openstack.org/openstack/patrole <https://git.openstack.org/cgit/openstack/patrole>`__
-picasso                                `git://git.openstack.org/openstack/picasso <https://git.openstack.org/cgit/openstack/picasso>`__
-qinling                                `git://git.openstack.org/openstack/qinling <https://git.openstack.org/cgit/openstack/qinling>`__
-qinling-dashboard                      `git://git.openstack.org/openstack/qinling-dashboard <https://git.openstack.org/cgit/openstack/qinling-dashboard>`__
-rally                                  `git://git.openstack.org/openstack/rally <https://git.openstack.org/cgit/openstack/rally>`__
-rally-openstack                        `git://git.openstack.org/openstack/rally-openstack <https://git.openstack.org/cgit/openstack/rally-openstack>`__
-sahara                                 `git://git.openstack.org/openstack/sahara <https://git.openstack.org/cgit/openstack/sahara>`__
-sahara-dashboard                       `git://git.openstack.org/openstack/sahara-dashboard <https://git.openstack.org/cgit/openstack/sahara-dashboard>`__
-scalpels                               `git://git.openstack.org/openstack/scalpels <https://git.openstack.org/cgit/openstack/scalpels>`__
-searchlight                            `git://git.openstack.org/openstack/searchlight <https://git.openstack.org/cgit/openstack/searchlight>`__
-searchlight-ui                         `git://git.openstack.org/openstack/searchlight-ui <https://git.openstack.org/cgit/openstack/searchlight-ui>`__
-senlin                                 `git://git.openstack.org/openstack/senlin <https://git.openstack.org/cgit/openstack/senlin>`__
-slogging                               `git://git.openstack.org/openstack/slogging <https://git.openstack.org/cgit/openstack/slogging>`__
-solum                                  `git://git.openstack.org/openstack/solum <https://git.openstack.org/cgit/openstack/solum>`__
-stackube                               `git://git.openstack.org/openstack/stackube <https://git.openstack.org/cgit/openstack/stackube>`__
-storlets                               `git://git.openstack.org/openstack/storlets <https://git.openstack.org/cgit/openstack/storlets>`__
-stx-config                             `git://git.openstack.org/openstack/stx-config <https://git.openstack.org/cgit/openstack/stx-config>`__
-stx-fault                              `git://git.openstack.org/openstack/stx-fault <https://git.openstack.org/cgit/openstack/stx-fault>`__
-stx-update                             `git://git.openstack.org/openstack/stx-update <https://git.openstack.org/cgit/openstack/stx-update>`__
-tacker                                 `git://git.openstack.org/openstack/tacker <https://git.openstack.org/cgit/openstack/tacker>`__
-tap-as-a-service                       `git://git.openstack.org/openstack/tap-as-a-service <https://git.openstack.org/cgit/openstack/tap-as-a-service>`__
-tap-as-a-service-dashboard             `git://git.openstack.org/openstack/tap-as-a-service-dashboard <https://git.openstack.org/cgit/openstack/tap-as-a-service-dashboard>`__
-tatu                                   `git://git.openstack.org/openstack/tatu <https://git.openstack.org/cgit/openstack/tatu>`__
-telemetry-tempest-plugin               `git://git.openstack.org/openstack/telemetry-tempest-plugin <https://git.openstack.org/cgit/openstack/telemetry-tempest-plugin>`__
-tricircle                              `git://git.openstack.org/openstack/tricircle <https://git.openstack.org/cgit/openstack/tricircle>`__
-trio2o                                 `git://git.openstack.org/openstack/trio2o <https://git.openstack.org/cgit/openstack/trio2o>`__
-trove                                  `git://git.openstack.org/openstack/trove <https://git.openstack.org/cgit/openstack/trove>`__
-trove-dashboard                        `git://git.openstack.org/openstack/trove-dashboard <https://git.openstack.org/cgit/openstack/trove-dashboard>`__
-valet                                  `git://git.openstack.org/openstack/valet <https://git.openstack.org/cgit/openstack/valet>`__
-vitrage                                `git://git.openstack.org/openstack/vitrage <https://git.openstack.org/cgit/openstack/vitrage>`__
-vitrage-dashboard                      `git://git.openstack.org/openstack/vitrage-dashboard <https://git.openstack.org/cgit/openstack/vitrage-dashboard>`__
-vitrage-tempest-plugin                 `git://git.openstack.org/openstack/vitrage-tempest-plugin <https://git.openstack.org/cgit/openstack/vitrage-tempest-plugin>`__
-vmware-nsx                             `git://git.openstack.org/openstack/vmware-nsx <https://git.openstack.org/cgit/openstack/vmware-nsx>`__
-vmware-vspc                            `git://git.openstack.org/openstack/vmware-vspc <https://git.openstack.org/cgit/openstack/vmware-vspc>`__
-watcher                                `git://git.openstack.org/openstack/watcher <https://git.openstack.org/cgit/openstack/watcher>`__
-watcher-dashboard                      `git://git.openstack.org/openstack/watcher-dashboard <https://git.openstack.org/cgit/openstack/watcher-dashboard>`__
-zaqar                                  `git://git.openstack.org/openstack/zaqar <https://git.openstack.org/cgit/openstack/zaqar>`__
-zaqar-ui                               `git://git.openstack.org/openstack/zaqar-ui <https://git.openstack.org/cgit/openstack/zaqar-ui>`__
-zun                                    `git://git.openstack.org/openstack/zun <https://git.openstack.org/cgit/openstack/zun>`__
-zun-ui                                 `git://git.openstack.org/openstack/zun-ui <https://git.openstack.org/cgit/openstack/zun-ui>`__
-====================================== ===
+======================================== ===
+Plugin Name                              URL
+======================================== ===
+openstack/aodh                           `https://opendev.org/openstack/aodh <https://opendev.org/openstack/aodh>`__
+openstack/barbican                       `https://opendev.org/openstack/barbican <https://opendev.org/openstack/barbican>`__
+openstack/blazar                         `https://opendev.org/openstack/blazar <https://opendev.org/openstack/blazar>`__
+openstack/ceilometer                     `https://opendev.org/openstack/ceilometer <https://opendev.org/openstack/ceilometer>`__
+openstack/ceilometer-powervm             `https://opendev.org/openstack/ceilometer-powervm <https://opendev.org/openstack/ceilometer-powervm>`__
+openstack/cinderlib                      `https://opendev.org/openstack/cinderlib <https://opendev.org/openstack/cinderlib>`__
+openstack/cloudkitty                     `https://opendev.org/openstack/cloudkitty <https://opendev.org/openstack/cloudkitty>`__
+openstack/congress                       `https://opendev.org/openstack/congress <https://opendev.org/openstack/congress>`__
+openstack/cyborg                         `https://opendev.org/openstack/cyborg <https://opendev.org/openstack/cyborg>`__
+openstack/designate                      `https://opendev.org/openstack/designate <https://opendev.org/openstack/designate>`__
+openstack/devstack-plugin-amqp1          `https://opendev.org/openstack/devstack-plugin-amqp1 <https://opendev.org/openstack/devstack-plugin-amqp1>`__
+openstack/devstack-plugin-ceph           `https://opendev.org/openstack/devstack-plugin-ceph <https://opendev.org/openstack/devstack-plugin-ceph>`__
+openstack/devstack-plugin-container      `https://opendev.org/openstack/devstack-plugin-container <https://opendev.org/openstack/devstack-plugin-container>`__
+openstack/devstack-plugin-kafka          `https://opendev.org/openstack/devstack-plugin-kafka <https://opendev.org/openstack/devstack-plugin-kafka>`__
+openstack/devstack-plugin-pika           `https://opendev.org/openstack/devstack-plugin-pika <https://opendev.org/openstack/devstack-plugin-pika>`__
+openstack/devstack-plugin-zmq            `https://opendev.org/openstack/devstack-plugin-zmq <https://opendev.org/openstack/devstack-plugin-zmq>`__
+openstack/dragonflow                     `https://opendev.org/openstack/dragonflow <https://opendev.org/openstack/dragonflow>`__
+openstack/ec2-api                        `https://opendev.org/openstack/ec2-api <https://opendev.org/openstack/ec2-api>`__
+openstack/freezer                        `https://opendev.org/openstack/freezer <https://opendev.org/openstack/freezer>`__
+openstack/freezer-api                    `https://opendev.org/openstack/freezer-api <https://opendev.org/openstack/freezer-api>`__
+openstack/freezer-tempest-plugin         `https://opendev.org/openstack/freezer-tempest-plugin <https://opendev.org/openstack/freezer-tempest-plugin>`__
+openstack/freezer-web-ui                 `https://opendev.org/openstack/freezer-web-ui <https://opendev.org/openstack/freezer-web-ui>`__
+openstack/heat                           `https://opendev.org/openstack/heat <https://opendev.org/openstack/heat>`__
+openstack/heat-dashboard                 `https://opendev.org/openstack/heat-dashboard <https://opendev.org/openstack/heat-dashboard>`__
+openstack/ironic                         `https://opendev.org/openstack/ironic <https://opendev.org/openstack/ironic>`__
+openstack/ironic-inspector               `https://opendev.org/openstack/ironic-inspector <https://opendev.org/openstack/ironic-inspector>`__
+openstack/ironic-ui                      `https://opendev.org/openstack/ironic-ui <https://opendev.org/openstack/ironic-ui>`__
+openstack/karbor                         `https://opendev.org/openstack/karbor <https://opendev.org/openstack/karbor>`__
+openstack/karbor-dashboard               `https://opendev.org/openstack/karbor-dashboard <https://opendev.org/openstack/karbor-dashboard>`__
+openstack/keystone                       `https://opendev.org/openstack/keystone <https://opendev.org/openstack/keystone>`__
+openstack/kuryr-kubernetes               `https://opendev.org/openstack/kuryr-kubernetes <https://opendev.org/openstack/kuryr-kubernetes>`__
+openstack/kuryr-libnetwork               `https://opendev.org/openstack/kuryr-libnetwork <https://opendev.org/openstack/kuryr-libnetwork>`__
+openstack/kuryr-tempest-plugin           `https://opendev.org/openstack/kuryr-tempest-plugin <https://opendev.org/openstack/kuryr-tempest-plugin>`__
+openstack/magnum                         `https://opendev.org/openstack/magnum <https://opendev.org/openstack/magnum>`__
+openstack/magnum-ui                      `https://opendev.org/openstack/magnum-ui <https://opendev.org/openstack/magnum-ui>`__
+openstack/manila                         `https://opendev.org/openstack/manila <https://opendev.org/openstack/manila>`__
+openstack/manila-tempest-plugin          `https://opendev.org/openstack/manila-tempest-plugin <https://opendev.org/openstack/manila-tempest-plugin>`__
+openstack/manila-ui                      `https://opendev.org/openstack/manila-ui <https://opendev.org/openstack/manila-ui>`__
+openstack/masakari                       `https://opendev.org/openstack/masakari <https://opendev.org/openstack/masakari>`__
+openstack/mistral                        `https://opendev.org/openstack/mistral <https://opendev.org/openstack/mistral>`__
+openstack/monasca-analytics              `https://opendev.org/openstack/monasca-analytics <https://opendev.org/openstack/monasca-analytics>`__
+openstack/monasca-api                    `https://opendev.org/openstack/monasca-api <https://opendev.org/openstack/monasca-api>`__
+openstack/monasca-ceilometer             `https://opendev.org/openstack/monasca-ceilometer <https://opendev.org/openstack/monasca-ceilometer>`__
+openstack/monasca-events-api             `https://opendev.org/openstack/monasca-events-api <https://opendev.org/openstack/monasca-events-api>`__
+openstack/monasca-log-api                `https://opendev.org/openstack/monasca-log-api <https://opendev.org/openstack/monasca-log-api>`__
+openstack/monasca-tempest-plugin         `https://opendev.org/openstack/monasca-tempest-plugin <https://opendev.org/openstack/monasca-tempest-plugin>`__
+openstack/monasca-transform              `https://opendev.org/openstack/monasca-transform <https://opendev.org/openstack/monasca-transform>`__
+openstack/murano                         `https://opendev.org/openstack/murano <https://opendev.org/openstack/murano>`__
+openstack/networking-bagpipe             `https://opendev.org/openstack/networking-bagpipe <https://opendev.org/openstack/networking-bagpipe>`__
+openstack/networking-baremetal           `https://opendev.org/openstack/networking-baremetal <https://opendev.org/openstack/networking-baremetal>`__
+openstack/networking-bgpvpn              `https://opendev.org/openstack/networking-bgpvpn <https://opendev.org/openstack/networking-bgpvpn>`__
+openstack/networking-calico              `https://opendev.org/openstack/networking-calico <https://opendev.org/openstack/networking-calico>`__
+openstack/networking-generic-switch      `https://opendev.org/openstack/networking-generic-switch <https://opendev.org/openstack/networking-generic-switch>`__
+openstack/networking-hyperv              `https://opendev.org/openstack/networking-hyperv <https://opendev.org/openstack/networking-hyperv>`__
+openstack/networking-l2gw                `https://opendev.org/openstack/networking-l2gw <https://opendev.org/openstack/networking-l2gw>`__
+openstack/networking-midonet             `https://opendev.org/openstack/networking-midonet <https://opendev.org/openstack/networking-midonet>`__
+openstack/networking-odl                 `https://opendev.org/openstack/networking-odl <https://opendev.org/openstack/networking-odl>`__
+openstack/networking-onos                `https://opendev.org/openstack/networking-onos <https://opendev.org/openstack/networking-onos>`__
+openstack/networking-ovn                 `https://opendev.org/openstack/networking-ovn <https://opendev.org/openstack/networking-ovn>`__
+openstack/networking-powervm             `https://opendev.org/openstack/networking-powervm <https://opendev.org/openstack/networking-powervm>`__
+openstack/networking-sfc                 `https://opendev.org/openstack/networking-sfc <https://opendev.org/openstack/networking-sfc>`__
+openstack/neutron                        `https://opendev.org/openstack/neutron <https://opendev.org/openstack/neutron>`__
+openstack/neutron-dynamic-routing        `https://opendev.org/openstack/neutron-dynamic-routing <https://opendev.org/openstack/neutron-dynamic-routing>`__
+openstack/neutron-fwaas                  `https://opendev.org/openstack/neutron-fwaas <https://opendev.org/openstack/neutron-fwaas>`__
+openstack/neutron-fwaas-dashboard        `https://opendev.org/openstack/neutron-fwaas-dashboard <https://opendev.org/openstack/neutron-fwaas-dashboard>`__
+openstack/neutron-tempest-plugin         `https://opendev.org/openstack/neutron-tempest-plugin <https://opendev.org/openstack/neutron-tempest-plugin>`__
+openstack/neutron-vpnaas                 `https://opendev.org/openstack/neutron-vpnaas <https://opendev.org/openstack/neutron-vpnaas>`__
+openstack/neutron-vpnaas-dashboard       `https://opendev.org/openstack/neutron-vpnaas-dashboard <https://opendev.org/openstack/neutron-vpnaas-dashboard>`__
+openstack/nova-powervm                   `https://opendev.org/openstack/nova-powervm <https://opendev.org/openstack/nova-powervm>`__
+openstack/octavia                        `https://opendev.org/openstack/octavia <https://opendev.org/openstack/octavia>`__
+openstack/octavia-dashboard              `https://opendev.org/openstack/octavia-dashboard <https://opendev.org/openstack/octavia-dashboard>`__
+openstack/openstacksdk                   `https://opendev.org/openstack/openstacksdk <https://opendev.org/openstack/openstacksdk>`__
+openstack/os-loganalyze                  `https://opendev.org/openstack/os-loganalyze <https://opendev.org/openstack/os-loganalyze>`__
+openstack/osprofiler                     `https://opendev.org/openstack/osprofiler <https://opendev.org/openstack/osprofiler>`__
+openstack/oswin-tempest-plugin           `https://opendev.org/openstack/oswin-tempest-plugin <https://opendev.org/openstack/oswin-tempest-plugin>`__
+openstack/panko                          `https://opendev.org/openstack/panko <https://opendev.org/openstack/panko>`__
+openstack/patrole                        `https://opendev.org/openstack/patrole <https://opendev.org/openstack/patrole>`__
+openstack/qinling                        `https://opendev.org/openstack/qinling <https://opendev.org/openstack/qinling>`__
+openstack/qinling-dashboard              `https://opendev.org/openstack/qinling-dashboard <https://opendev.org/openstack/qinling-dashboard>`__
+openstack/rally                          `https://opendev.org/openstack/rally <https://opendev.org/openstack/rally>`__
+openstack/rally-openstack                `https://opendev.org/openstack/rally-openstack <https://opendev.org/openstack/rally-openstack>`__
+openstack/sahara                         `https://opendev.org/openstack/sahara <https://opendev.org/openstack/sahara>`__
+openstack/sahara-dashboard               `https://opendev.org/openstack/sahara-dashboard <https://opendev.org/openstack/sahara-dashboard>`__
+openstack/searchlight                    `https://opendev.org/openstack/searchlight <https://opendev.org/openstack/searchlight>`__
+openstack/searchlight-ui                 `https://opendev.org/openstack/searchlight-ui <https://opendev.org/openstack/searchlight-ui>`__
+openstack/senlin                         `https://opendev.org/openstack/senlin <https://opendev.org/openstack/senlin>`__
+openstack/shade                          `https://opendev.org/openstack/shade <https://opendev.org/openstack/shade>`__
+openstack/solum                          `https://opendev.org/openstack/solum <https://opendev.org/openstack/solum>`__
+openstack/storlets                       `https://opendev.org/openstack/storlets <https://opendev.org/openstack/storlets>`__
+openstack/tacker                         `https://opendev.org/openstack/tacker <https://opendev.org/openstack/tacker>`__
+openstack/telemetry-tempest-plugin       `https://opendev.org/openstack/telemetry-tempest-plugin <https://opendev.org/openstack/telemetry-tempest-plugin>`__
+openstack/tricircle                      `https://opendev.org/openstack/tricircle <https://opendev.org/openstack/tricircle>`__
+openstack/trove                          `https://opendev.org/openstack/trove <https://opendev.org/openstack/trove>`__
+openstack/trove-dashboard                `https://opendev.org/openstack/trove-dashboard <https://opendev.org/openstack/trove-dashboard>`__
+openstack/vitrage                        `https://opendev.org/openstack/vitrage <https://opendev.org/openstack/vitrage>`__
+openstack/vitrage-dashboard              `https://opendev.org/openstack/vitrage-dashboard <https://opendev.org/openstack/vitrage-dashboard>`__
+openstack/vitrage-tempest-plugin         `https://opendev.org/openstack/vitrage-tempest-plugin <https://opendev.org/openstack/vitrage-tempest-plugin>`__
+openstack/watcher                        `https://opendev.org/openstack/watcher <https://opendev.org/openstack/watcher>`__
+openstack/watcher-dashboard              `https://opendev.org/openstack/watcher-dashboard <https://opendev.org/openstack/watcher-dashboard>`__
+openstack/zaqar                          `https://opendev.org/openstack/zaqar <https://opendev.org/openstack/zaqar>`__
+openstack/zaqar-ui                       `https://opendev.org/openstack/zaqar-ui <https://opendev.org/openstack/zaqar-ui>`__
+openstack/zun                            `https://opendev.org/openstack/zun <https://opendev.org/openstack/zun>`__
+openstack/zun-ui                         `https://opendev.org/openstack/zun-ui <https://opendev.org/openstack/zun-ui>`__
+performa/os-faults                       `https://opendev.org/performa/os-faults <https://opendev.org/performa/os-faults>`__
+starlingx/config                         `https://opendev.org/starlingx/config <https://opendev.org/starlingx/config>`__
+starlingx/fault                          `https://opendev.org/starlingx/fault <https://opendev.org/starlingx/fault>`__
+starlingx/ha                             `https://opendev.org/starlingx/ha <https://opendev.org/starlingx/ha>`__
+starlingx/integ                          `https://opendev.org/starlingx/integ <https://opendev.org/starlingx/integ>`__
+starlingx/metal                          `https://opendev.org/starlingx/metal <https://opendev.org/starlingx/metal>`__
+starlingx/nfv                            `https://opendev.org/starlingx/nfv <https://opendev.org/starlingx/nfv>`__
+starlingx/update                         `https://opendev.org/starlingx/update <https://opendev.org/starlingx/update>`__
+x/almanach                               `https://opendev.org/x/almanach <https://opendev.org/x/almanach>`__
+x/apmec                                  `https://opendev.org/x/apmec <https://opendev.org/x/apmec>`__
+x/bilean                                 `https://opendev.org/x/bilean <https://opendev.org/x/bilean>`__
+x/broadview-collector                    `https://opendev.org/x/broadview-collector <https://opendev.org/x/broadview-collector>`__
+x/collectd-openstack-plugins             `https://opendev.org/x/collectd-openstack-plugins <https://opendev.org/x/collectd-openstack-plugins>`__
+x/devstack-plugin-additional-pkg-repos   `https://opendev.org/x/devstack-plugin-additional-pkg-repos <https://opendev.org/x/devstack-plugin-additional-pkg-repos>`__
+x/devstack-plugin-bdd                    `https://opendev.org/x/devstack-plugin-bdd <https://opendev.org/x/devstack-plugin-bdd>`__
+x/devstack-plugin-glusterfs              `https://opendev.org/x/devstack-plugin-glusterfs <https://opendev.org/x/devstack-plugin-glusterfs>`__
+x/devstack-plugin-hdfs                   `https://opendev.org/x/devstack-plugin-hdfs <https://opendev.org/x/devstack-plugin-hdfs>`__
+x/devstack-plugin-libvirt-qemu           `https://opendev.org/x/devstack-plugin-libvirt-qemu <https://opendev.org/x/devstack-plugin-libvirt-qemu>`__
+x/devstack-plugin-mariadb                `https://opendev.org/x/devstack-plugin-mariadb <https://opendev.org/x/devstack-plugin-mariadb>`__
+x/devstack-plugin-nfs                    `https://opendev.org/x/devstack-plugin-nfs <https://opendev.org/x/devstack-plugin-nfs>`__
+x/devstack-plugin-sheepdog               `https://opendev.org/x/devstack-plugin-sheepdog <https://opendev.org/x/devstack-plugin-sheepdog>`__
+x/devstack-plugin-vmax                   `https://opendev.org/x/devstack-plugin-vmax <https://opendev.org/x/devstack-plugin-vmax>`__
+x/drbd-devstack                          `https://opendev.org/x/drbd-devstack <https://opendev.org/x/drbd-devstack>`__
+x/fenix                                  `https://opendev.org/x/fenix <https://opendev.org/x/fenix>`__
+x/gce-api                                `https://opendev.org/x/gce-api <https://opendev.org/x/gce-api>`__
+x/glare                                  `https://opendev.org/x/glare <https://opendev.org/x/glare>`__
+x/group-based-policy                     `https://opendev.org/x/group-based-policy <https://opendev.org/x/group-based-policy>`__
+x/gyan                                   `https://opendev.org/x/gyan <https://opendev.org/x/gyan>`__
+x/horizon-mellanox                       `https://opendev.org/x/horizon-mellanox <https://opendev.org/x/horizon-mellanox>`__
+x/ironic-staging-drivers                 `https://opendev.org/x/ironic-staging-drivers <https://opendev.org/x/ironic-staging-drivers>`__
+x/kingbird                               `https://opendev.org/x/kingbird <https://opendev.org/x/kingbird>`__
+x/meteos                                 `https://opendev.org/x/meteos <https://opendev.org/x/meteos>`__
+x/meteos-ui                              `https://opendev.org/x/meteos-ui <https://opendev.org/x/meteos-ui>`__
+x/mixmatch                               `https://opendev.org/x/mixmatch <https://opendev.org/x/mixmatch>`__
+x/mogan                                  `https://opendev.org/x/mogan <https://opendev.org/x/mogan>`__
+x/mogan-ui                               `https://opendev.org/x/mogan-ui <https://opendev.org/x/mogan-ui>`__
+x/networking-6wind                       `https://opendev.org/x/networking-6wind <https://opendev.org/x/networking-6wind>`__
+x/networking-ansible                     `https://opendev.org/x/networking-ansible <https://opendev.org/x/networking-ansible>`__
+x/networking-arista                      `https://opendev.org/x/networking-arista <https://opendev.org/x/networking-arista>`__
+x/networking-brocade                     `https://opendev.org/x/networking-brocade <https://opendev.org/x/networking-brocade>`__
+x/networking-cisco                       `https://opendev.org/x/networking-cisco <https://opendev.org/x/networking-cisco>`__
+x/networking-cumulus                     `https://opendev.org/x/networking-cumulus <https://opendev.org/x/networking-cumulus>`__
+x/networking-dpm                         `https://opendev.org/x/networking-dpm <https://opendev.org/x/networking-dpm>`__
+x/networking-fortinet                    `https://opendev.org/x/networking-fortinet <https://opendev.org/x/networking-fortinet>`__
+x/networking-hpe                         `https://opendev.org/x/networking-hpe <https://opendev.org/x/networking-hpe>`__
+x/networking-huawei                      `https://opendev.org/x/networking-huawei <https://opendev.org/x/networking-huawei>`__
+x/networking-infoblox                    `https://opendev.org/x/networking-infoblox <https://opendev.org/x/networking-infoblox>`__
+x/networking-lagopus                     `https://opendev.org/x/networking-lagopus <https://opendev.org/x/networking-lagopus>`__
+x/networking-mlnx                        `https://opendev.org/x/networking-mlnx <https://opendev.org/x/networking-mlnx>`__
+x/networking-nec                         `https://opendev.org/x/networking-nec <https://opendev.org/x/networking-nec>`__
+x/networking-omnipath                    `https://opendev.org/x/networking-omnipath <https://opendev.org/x/networking-omnipath>`__
+x/networking-opencontrail                `https://opendev.org/x/networking-opencontrail <https://opendev.org/x/networking-opencontrail>`__
+x/networking-ovs-dpdk                    `https://opendev.org/x/networking-ovs-dpdk <https://opendev.org/x/networking-ovs-dpdk>`__
+x/networking-plumgrid                    `https://opendev.org/x/networking-plumgrid <https://opendev.org/x/networking-plumgrid>`__
+x/networking-spp                         `https://opendev.org/x/networking-spp <https://opendev.org/x/networking-spp>`__
+x/networking-vpp                         `https://opendev.org/x/networking-vpp <https://opendev.org/x/networking-vpp>`__
+x/networking-vsphere                     `https://opendev.org/x/networking-vsphere <https://opendev.org/x/networking-vsphere>`__
+x/neutron-classifier                     `https://opendev.org/x/neutron-classifier <https://opendev.org/x/neutron-classifier>`__
+x/nova-dpm                               `https://opendev.org/x/nova-dpm <https://opendev.org/x/nova-dpm>`__
+x/nova-mksproxy                          `https://opendev.org/x/nova-mksproxy <https://opendev.org/x/nova-mksproxy>`__
+x/oaktree                                `https://opendev.org/x/oaktree <https://opendev.org/x/oaktree>`__
+x/omni                                   `https://opendev.org/x/omni <https://opendev.org/x/omni>`__
+x/os-xenapi                              `https://opendev.org/x/os-xenapi <https://opendev.org/x/os-xenapi>`__
+x/picasso                                `https://opendev.org/x/picasso <https://opendev.org/x/picasso>`__
+x/rsd-virt-for-nova                      `https://opendev.org/x/rsd-virt-for-nova <https://opendev.org/x/rsd-virt-for-nova>`__
+x/scalpels                               `https://opendev.org/x/scalpels <https://opendev.org/x/scalpels>`__
+x/slogging                               `https://opendev.org/x/slogging <https://opendev.org/x/slogging>`__
+x/stackube                               `https://opendev.org/x/stackube <https://opendev.org/x/stackube>`__
+x/tap-as-a-service                       `https://opendev.org/x/tap-as-a-service <https://opendev.org/x/tap-as-a-service>`__
+x/tap-as-a-service-dashboard             `https://opendev.org/x/tap-as-a-service-dashboard <https://opendev.org/x/tap-as-a-service-dashboard>`__
+x/tatu                                   `https://opendev.org/x/tatu <https://opendev.org/x/tatu>`__
+x/tobiko                                 `https://opendev.org/x/tobiko <https://opendev.org/x/tobiko>`__
+x/trio2o                                 `https://opendev.org/x/trio2o <https://opendev.org/x/trio2o>`__
+x/valet                                  `https://opendev.org/x/valet <https://opendev.org/x/valet>`__
+x/vmware-nsx                             `https://opendev.org/x/vmware-nsx <https://opendev.org/x/vmware-nsx>`__
+x/vmware-vspc                            `https://opendev.org/x/vmware-vspc <https://opendev.org/x/vmware-vspc>`__
+======================================== ===
 
 
diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst
index 89b9381..a18a786 100644
--- a/doc/source/plugins.rst
+++ b/doc/source/plugins.rst
@@ -58,7 +58,7 @@
   plugin's name, which is the name that should be used by users on
   "enable_plugin" lines.  It should generally be the last component of
   the git repo path (e.g., if the plugin's repo is
-  openstack/devstack-foo, then the name here should be "foo") ::
+  openstack/foo, then the name here should be "foo") ::
 
     define_plugin <YOUR PLUGIN>
 
@@ -99,7 +99,7 @@
 
 An example would be as follows::
 
-  enable_plugin ec2-api git://git.openstack.org/openstack/ec2-api
+  enable_plugin ec2-api https://opendev.org/openstack/ec2-api
 
 plugin.sh contract
 ==================
@@ -148,7 +148,7 @@
 
 ``devstack/settings``::
 
-    # settings file for template
+  # settings file for template
   enable_service template
 
 
@@ -222,14 +222,20 @@
 System Packages
 ===============
 
-Devstack provides a framework for getting packages installed at an early
-phase of its execution. These packages may be defined in a plugin as files
-that contain new-line separated lists of packages required by the plugin
 
-Supported packaging systems include apt and yum across multiple distributions.
-To enable a plugin to hook into this and install package dependencies, packages
-may be listed at the following locations in the top-level of the plugin
-repository:
+
+Devstack based
+--------------
+
+Devstack provides a custom framework for getting packages installed at
+an early phase of its execution.  These packages may be defined in a
+plugin as files that contain new-line separated lists of packages
+required by the plugin
+
+Supported packaging systems include apt and yum across multiple
+distributions.  To enable a plugin to hook into this and install
+package dependencies, packages may be listed at the following
+locations in the top-level of the plugin repository:
 
 - ``./devstack/files/debs/$plugin_name`` - Packages to install when running
   on Ubuntu, Debian or Linux Mint.
@@ -240,6 +246,42 @@
 - ``./devstack/files/rpms-suse/$plugin_name`` - Packages to install when
   running on SUSE Linux or openSUSE.
 
+Although there a no plans to remove this method of installing
+packages, plugins should consider it deprecated for ``bindep`` support
+described below.
+
+bindep
+------
+
+The `bindep <https://docs.openstack.org/infra/bindep>`__ project has
+become the defacto standard for OpenStack projects to specify binary
+dependencies.
+
+A plugin may provide a ``./devstack/files/bindep.txt`` file, which
+will be called with the *default* profile to install packages.  For
+details on the syntax, etc. see the bindep documentation.
+
+It is also possible to use the ``bindep.txt`` of projects that are
+being installed from source with the ``-bindep`` flag available in
+install functions.  For example
+
+.. code-block:: bash
+
+  if use_library_from_git "diskimage-builder"; then
+     GITREPO["diskimage-builder"]=$DISKIMAGE_BUILDER_REPO_URL
+     GITDIR["diskimage-builder"]=$DEST/diskimage-builder
+     GITBRANCH["diskimage-builder"]=$DISKIMAGE_BUILDER_REPO_REF
+     git_clone_by_name "diskimage-builder"
+     setup_dev_lib -bindep "diskimage-builder"
+  fi
+
+will result in any packages required by the ``bindep.txt`` of the
+``diskimage-builder`` project being installed.  Note however that jobs
+that switch projects between source and released/pypi installs
+(e.g. with a ``foo-dsvm`` and a ``foo-dsvm-src`` test to cover both
+released dependencies and master versions) will have to deal with
+``bindep.txt`` being unavailable without the source directory.
+
 
 Using Plugins in the OpenStack Gate
 ===================================
@@ -264,10 +306,12 @@
 the best practice is to build a dedicated
 ``openstack/devstack-plugin-FOO`` project.
 
+Legacy project-config jobs
+--------------------------
+
 To enable a plugin to be used in a gate job, the following lines will
 be needed in your ``jenkins/jobs/<project>.yaml`` definition in
-`project-config
-<http://git.openstack.org/cgit/openstack-infra/project-config/>`_::
+`project-config <https://opendev.org/openstack/project-config/>`_::
 
   # Because we are testing a non standard project, add the
   # our project repository. This makes zuul do the right
@@ -277,12 +321,17 @@
   # note the actual url here is somewhat irrelevant because it
   # caches in nodepool, however make it a valid url for
   # documentation purposes.
-  export DEVSTACK_LOCAL_CONFIG="enable_plugin ec2-api git://git.openstack.org/openstack/ec2-api"
+  export DEVSTACK_LOCAL_CONFIG="enable_plugin ec2-api https://opendev.org/openstack/ec2-api"
+
+Zuul v3 jobs
+------------
+
+See the ``devstack_plugins`` example in :doc:`zuul_ci_jobs_migration`.
 
 See Also
 ========
 
 For additional inspiration on devstack plugins you can check out the
-`Plugin Registry <plugin-registry.html>`_.
+:doc:`Plugin Registry <plugin-registry>`.
 
 .. _service types authority: https://specs.openstack.org/openstack/service-types-authority/
diff --git a/doc/source/systemd.rst b/doc/source/systemd.rst
index 1bc9911..15b3f75 100644
--- a/doc/source/systemd.rst
+++ b/doc/source/systemd.rst
@@ -194,7 +194,7 @@
 
 See the `remote-pdb`_ home page for more options.
 
-.. _`remote-pdb`: https://pypi.python.org/pypi/remote-pdb
+.. _`remote-pdb`: https://pypi.org/project/remote-pdb/
 
 Known Issues
 ============
diff --git a/doc/source/zuul_ci_jobs_migration.rst b/doc/source/zuul_ci_jobs_migration.rst
index c00f06e..17e7e16 100644
--- a/doc/source/zuul_ci_jobs_migration.rst
+++ b/doc/source/zuul_ci_jobs_migration.rst
@@ -28,7 +28,7 @@
 
 .. code:: yaml
 
-  # In http://git.openstack.org/cgit/openstack/sahara-tests/tree/.zuul.yaml:
+  # In https://opendev.org/openstack/sahara-tests/src/branch/master/.zuul.yaml:
   - job:
       name: sahara-tests-tempest
       description: |
@@ -86,7 +86,7 @@
 
 .. code:: yaml
 
-  # https://git.openstack.org/cgit/openstack/kuryr-kubernetes/tree/.zuul.yaml:
+  # https://opendev.org/openstack/kuryr-kubernetes/src/branch/master/.zuul.d/base.yaml:
   - job:
       name: kuryr-kubernetes-tempest-base
       parent: devstack-tempest
@@ -102,7 +102,6 @@
         tox_envlist: 'all'
         devstack_localrc:
           KURYR_K8S_API_PORT: 8080
-          TEMPEST_PLUGINS: '/opt/stack/kuryr-tempest-plugin'
         devstack_services:
           kubernetes-api: true
           kubernetes-controller-manager: true
@@ -111,9 +110,11 @@
           kuryr-kubernetes: true
           (...)
         devstack_plugins:
-          kuryr-kubernetes: https://git.openstack.org/openstack/kuryr
-          devstack-plugin-container: https://git.openstack.org/openstack/devstack-plugin-container
-          neutron-lbaas: https://git.openstack.org/openstack/neutron-lbaas
+          kuryr-kubernetes: https://opendev.org/openstack/kuryr
+          devstack-plugin-container: https://opendev.org/openstack/devstack-plugin-container
+          neutron-lbaas: https://opendev.org/openstack/neutron-lbaas
+        tempest_plugins:
+          - kuryr-tempest-plugin
         (...)
 
 Job variables
@@ -179,123 +180,147 @@
 devstack-gate defined a default value. In ansible jobs the default is either the
 value defined in the parent job, or the default from DevStack, if any.
 
-==============================================  ============= ==================
-DevStack gate flag                              Repo          New implementation
-==============================================  ============= ==================
-OVERRIDE_ZUUL_BRANCH                            zuul          override-checkout:
-                                                              [branch]
-                                                              in the job definition.
-DEVSTACK_GATE_NET_OVERLAY                       zuul-jobs     A bridge called
-                                                              br-infra is set up for
-                                                              all jobs that inherit
-                                                              from multinode with
-                                                              a dedicated `bridge role <https://docs.openstack.org/infra/zuul-jobs/roles.html#role-multi-node-bridge>`_.
-DEVSTACK_GATE_FEATURE_MATRIX                    devstack-gate ``test_matrix_features``
-                                                              variable of the
-                                                              test-matrix role in
-                                                              devstack-gate. This
-                                                              is a temporary
-                                                              solution, feature
-                                                              matrix will go away.
-                                                              In the future services
-                                                              will be defined in
-                                                              jobs only.
-DEVSTACK_CINDER_VOLUME_CLEAR                    devstack      *CINDER_VOLUME_CLEAR: true/false*
-                                                              in devstack_localrc
-                                                              in the job vars.
-DEVSTACK_GATE_NEUTRON                           devstack      True by default. To
-                                                              disable, disable all
-                                                              neutron services in
-                                                              devstack_services in
-                                                              the job definition.
-DEVSTACK_GATE_CONFIGDRIVE                       devstack      *FORCE_CONFIG_DRIVE: true/false*
-                                                              in devstack_localrc
-                                                              in the job vars.
-DEVSTACK_GATE_INSTALL_TESTONLY                  devstack      *INSTALL_TESTONLY_PACKAGES: true/false*
-                                                              in devstack_localrc
-                                                              in the job vars.
-DEVSTACK_GATE_VIRT_DRIVER                       devstack      *VIRT_DRIVER: [virt driver]*
-                                                              in devstack_localrc
-                                                              in the job vars.
-DEVSTACK_GATE_LIBVIRT_TYPE                      devstack      *LIBVIRT_TYPE: [libvirt type]*
-                                                              in devstack_localrc
-                                                              in the job vars.
-DEVSTACK_GATE_TEMPEST                           devstack      Defined by the job
-                                                tempest       that is used. The
-                                                              ``devstack`` job only
-                                                              runs devstack.
-                                                              The ``devstack-tempest``
-                                                              one triggers a Tempest
-                                                              run as well.
-DEVSTACK_GATE_TEMPEST_FULL                      tempest       *tox_envlist: full*
-                                                              in the job vars.
-DEVSTACK_GATE_TEMPEST_ALL                       tempest       *tox_envlist: all*
-                                                              in the job vars.
-DEVSTACK_GATE_TEMPEST_ALL_PLUGINS               tempest       *tox_envlist: all-plugin*
-                                                              in the job vars.
-DEVSTACK_GATE_TEMPEST_SCENARIOS                 tempest       *tox_envlist: scenario*
-                                                              in the job vars.
-TEMPEST_CONCURRENCY                             tempest       *tempest_concurrency: [value]*
-                                                              in the job vars. This
-                                                              is available only on
-                                                              jobs that inherit from
-                                                              ``devstack-tempest``
-                                                              down.
-DEVSTACK_GATE_TEMPEST_NOTESTS                   tempest       *tox_envlist: venv-tempest*
-                                                              in the job vars. This
-                                                              will create Tempest
-                                                              virtual environment
-                                                              but run no tests.
-DEVSTACK_GATE_SMOKE_SERIAL                      tempest       *tox_envlist: smoke-serial*
-                                                              in the job vars.
-DEVSTACK_GATE_TEMPEST_DISABLE_TENANT_ISOLATION  tempest       *tox_envlist: full-serial*
-                                                              in the job vars.
-                                                              *TEMPEST_ALLOW_TENANT_ISOLATION: false*
-                                                              in devstack_localrc in
-                                                              the job vars.
-==============================================  ============= ==================
+.. list-table:: **DevStack Gate Flags**
+   :widths: 20 10 60
+   :header-rows: 1
+
+   * - DevStack gate flag
+     - Repo
+     - New implementation
+   * - OVERRIDE_ZUUL_BRANCH
+     - zuul
+     - override-checkout: [branch] in the job definition.
+   * - DEVSTACK_GATE_NET_OVERLAY
+     - zuul-jobs
+     - A bridge called br-infra is set up for all jobs that inherit
+       from multinode with a dedicated `bridge role
+       <https://zuul-ci.org/docs/zuul-jobs/general-roles.html#role-multi-node-bridge>`_.
+   * - DEVSTACK_GATE_FEATURE_MATRIX
+     - devstack-gate
+     - ``test_matrix_features`` variable of the test-matrix role in
+       devstack-gate. This is a temporary solution, feature matrix
+       will go away. In the future services will be defined in jobs
+       only.
+   * - DEVSTACK_CINDER_VOLUME_CLEAR
+     - devstack
+     - *CINDER_VOLUME_CLEAR: true/false* in devstack_localrc in the
+       job vars.
+   * - DEVSTACK_GATE_NEUTRON
+     - devstack
+     - True by default. To disable, disable all neutron services in
+       devstack_services in the job definition.
+   * - DEVSTACK_GATE_CONFIGDRIVE
+     - devstack
+     - *FORCE_CONFIG_DRIVE: true/false* in devstack_localrc in the job
+       vars.
+   * - DEVSTACK_GATE_INSTALL_TESTONLY
+     - devstack
+     - *INSTALL_TESTONLY_PACKAGES: true/false* in devstack_localrc in
+       the job vars.
+   * - DEVSTACK_GATE_VIRT_DRIVER
+     - devstack
+     - *VIRT_DRIVER: [virt driver]* in devstack_localrc in the job
+       vars.
+   * - DEVSTACK_GATE_LIBVIRT_TYPE
+     - devstack
+     - *LIBVIRT_TYPE: [libvirt type]* in devstack_localrc in the job
+       vars.
+   * - DEVSTACK_GATE_TEMPEST
+     - devstack and tempest
+     - Defined by the job that is used. The ``devstack`` job only runs
+       devstack. The ``devstack-tempest`` one triggers a Tempest run
+       as well.
+   * - DEVSTACK_GATE_TEMPEST_FULL
+     - tempest
+     - *tox_envlist: full* in the job vars.
+   * - DEVSTACK_GATE_TEMPEST_ALL
+     - tempest
+     - *tox_envlist: all* in the job vars.
+   * - DEVSTACK_GATE_TEMPEST_ALL_PLUGINS
+     - tempest
+     - *tox_envlist: all-plugin* in the job vars.
+   * - DEVSTACK_GATE_TEMPEST_SCENARIOS
+     - tempest
+     - *tox_envlist: scenario* in the job vars.
+   * - TEMPEST_CONCURRENCY
+     - tempest
+     - *tempest_concurrency: [value]* in the job vars. This is
+       available only on jobs that inherit from ``devstack-tempest``
+       down.
+   * - DEVSTACK_GATE_TEMPEST_NOTESTS
+     - tempest
+     - *tox_envlist: venv-tempest* in the job vars. This will create
+       Tempest virtual environment but run no tests.
+   * - DEVSTACK_GATE_SMOKE_SERIAL
+     - tempest
+     - *tox_envlist: smoke-serial* in the job vars.
+   * - DEVSTACK_GATE_TEMPEST_DISABLE_TENANT_ISOLATION
+     - tempest
+     - *tox_envlist: full-serial* in the job vars.
+       *TEMPEST_ALLOW_TENANT_ISOLATION: false* in devstack_localrc in
+       the job vars.
+
 
 The following flags have not been migrated yet or are legacy and won't be
 migrated at all.
 
-=====================================  ======  ==========================
-DevStack gate flag                     Status  Details
-=====================================  ======  ==========================
-DEVSTACK_GATE_TOPOLOGY                 WIP     The topology depends on the base
-                                               job that is used and more
-                                               specifically on the nodeset
-                                               attached to it. The new job
-                                               format allows project to define
-                                               the variables to be passed to
-                                               every node/node-group that exists
-                                               in the topology. Named topologies
-                                               that include the nodeset and the
-                                               matching variables can be defined
-                                               in the form of base jobs.
-DEVSTACK_GATE_GRENADE                  TBD     Grenade Zuul V3 jobs will be
-                                               hosted in the grenade repo.
-GRENADE_BASE_BRANCH                    TBD     Grenade Zuul V3 jobs will be
-                                               hosted in the grenade repo.
-DEVSTACK_GATE_NEUTRON_DVR              TBD     Depends on multinode support.
-DEVSTACK_GATE_EXERCISES                TBD     Can be done on request.
-DEVSTACK_GATE_IRONIC                   TBD     This will probably be implemented
-                                               on ironic side.
-DEVSTACK_GATE_IRONIC_DRIVER            TBD     This will probably be implemented
-                                               on ironic side.
-DEVSTACK_GATE_IRONIC_BUILD_RAMDISK     TBD     This will probably be implemented
-                                               on ironic side.
-DEVSTACK_GATE_POSTGRES                 Legacy  This flag exists in d-g but the
-                                               only thing that it does is
-                                               capture postgres logs. This is
-                                               already supported by the roles in
-                                               post, so the flag is useless in
-                                               the new jobs. postgres itself can
-                                               be enabled via the
-                                               devstack_service job variable.
-DEVSTACK_GATE_ZEROMQ                   Legacy  This has no effect in d-g.
-DEVSTACK_GATE_MQ_DRIVER                Legacy  This has no effect in d-g.
-DEVSTACK_GATE_TEMPEST_STRESS_ARGS      Legacy  Stress is not in Tempest anymore.
-DEVSTACK_GATE_TEMPEST_HEAT_SLOW        Legacy  This is not used anywhere.
-DEVSTACK_GATE_CELLS                    Legacy  This has no effect in d-g.
-DEVSTACK_GATE_NOVA_API_METADATA_SPLIT  Legacy  This has no effect in d-g.
-=====================================  ======  ==========================
+.. list-table:: **Not Migrated DevStack Gate Flags**
+   :widths: 20 10 60
+   :header-rows: 1
+
+   * - DevStack gate flag
+     - Status
+     - Details
+   * - DEVSTACK_GATE_TOPOLOGY
+     - WIP
+     - The topology depends on the base job that is used and more
+       specifically on the nodeset attached to it. The new job format
+       allows project to define the variables to be passed to every
+       node/node-group that exists in the topology. Named topologies
+       that include the nodeset and the matching variables can be
+       defined in the form of base jobs.
+   * - DEVSTACK_GATE_GRENADE
+     - TBD
+     - Grenade Zuul V3 jobs will be hosted in the grenade repo.
+   * - GRENADE_BASE_BRANCH
+     - TBD
+     - Grenade Zuul V3 jobs will be hosted in the grenade repo.
+   * - DEVSTACK_GATE_NEUTRON_DVR
+     - TBD
+     - Depends on multinode support.
+   * - DEVSTACK_GATE_EXERCISES
+     - TBD
+     - Can be done on request.
+   * - DEVSTACK_GATE_IRONIC
+     - TBD
+     - This will probably be implemented on ironic side.
+   * - DEVSTACK_GATE_IRONIC_DRIVER
+     - TBD
+     - This will probably be implemented on ironic side.
+   * - DEVSTACK_GATE_IRONIC_BUILD_RAMDISK
+     - TBD
+     - This will probably be implemented on ironic side.
+   * - DEVSTACK_GATE_POSTGRES
+     - Legacy
+     - This flag exists in d-g but the only thing that it does is
+       capture postgres logs. This is already supported by the roles
+       in post, so the flag is useless in the new jobs. postgres
+       itself can be enabled via the devstack_service job variable.
+   * - DEVSTACK_GATE_ZEROMQ
+     - Legacy
+     - This has no effect in d-g.
+   * - DEVSTACK_GATE_MQ_DRIVER
+     - Legacy
+     - This has no effect in d-g.
+   * - DEVSTACK_GATE_TEMPEST_STRESS_ARGS
+     - Legacy
+     - Stress is not in Tempest anymore.
+   * - DEVSTACK_GATE_TEMPEST_HEAT_SLOW
+     - Legacy
+     - This is not used anywhere.
+   * - DEVSTACK_GATE_CELLS
+     - Legacy
+     - This has no effect in d-g.
+   * - DEVSTACK_GATE_NOVA_API_METADATA_SPLIT
+     - Legacy
+     - This has no effect in d-g.
diff --git a/files/debs/neutron-common b/files/debs/neutron-common
index e30f678..b269f63 100644
--- a/files/debs/neutron-common
+++ b/files/debs/neutron-common
@@ -1,6 +1,6 @@
 acl
 dnsmasq-base
-dnsmasq-utils # for dhcp_release only available in dist:precise
+dnsmasq-utils # for dhcp_release
 ebtables
 haproxy # to serve as metadata proxy inside router/dhcp namespaces
 iptables
diff --git a/files/debs/nova b/files/debs/nova
index 5e14aec..e5110e9 100644
--- a/files/debs/nova
+++ b/files/debs/nova
@@ -10,9 +10,8 @@
 kpartx
 libjs-jquery-tablesorter # Needed for coverage html reports
 libmysqlclient-dev
-libvirt-bin # dist:xenial NOPRIME
-libvirt-clients # not:xenial NOPRIME
-libvirt-daemon-system # not:xenial NOPRIME
+libvirt-clients # NOPRIME
+libvirt-daemon-system # NOPRIME
 libvirt-dev # NOPRIME
 mysql-server # NOPRIME
 parted
diff --git a/files/rpms-suse/n-cpu b/files/rpms-suse/n-cpu
index c11e9f0..9c724cb 100644
--- a/files/rpms-suse/n-cpu
+++ b/files/rpms-suse/n-cpu
@@ -1,8 +1,9 @@
+cdrkit-cdrtools-compat # dist:sle12
 cryptsetup
 dosfstools
 libosinfo
 lvm2
-mkisofs
+mkisofs # not:sle12
 open-iscsi
 sg3_utils
 # Stuff for diablo volumes
diff --git a/files/rpms-suse/n-novnc b/files/rpms-suse/n-novnc
deleted file mode 100644
index c8722b9..0000000
--- a/files/rpms-suse/n-novnc
+++ /dev/null
@@ -1 +0,0 @@
-python-numpy
diff --git a/files/rpms-suse/n-spice b/files/rpms-suse/n-spice
deleted file mode 100644
index c8722b9..0000000
--- a/files/rpms-suse/n-spice
+++ /dev/null
@@ -1 +0,0 @@
-python-numpy
diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova
index 4103a40..1d58121 100644
--- a/files/rpms-suse/nova
+++ b/files/rpms-suse/nova
@@ -1,3 +1,4 @@
+cdrkit-cdrtools-compat # dist:sle12
 conntrack-tools
 curl
 dnsmasq
@@ -11,7 +12,8 @@
 libvirt # NOPRIME
 libvirt-python # NOPRIME
 mariadb # NOPRIME
-mkisofs # required for config_drive
+# mkisofs is required for config_drive
+mkisofs # not:sle12
 parted
 polkit
 # qemu as fallback if kvm cannot be used
diff --git a/files/rpms/cinder b/files/rpms/cinder
index 058c235..e6b33dc 100644
--- a/files/rpms/cinder
+++ b/files/rpms/cinder
@@ -1,5 +1,5 @@
 iscsi-initiator-utils
 lvm2
 qemu-img
-scsi-target-utils # not:rhel7,f25,f26,f27,f28 NOPRIME
-targetcli # dist:rhel7,f25,f26,f27,f28 NOPRIME
+scsi-target-utils # not:rhel7,f25,f26,f27,f28,f29 NOPRIME
+targetcli # dist:rhel7,f25,f26,f27,f28,f29 NOPRIME
diff --git a/files/rpms/dstat b/files/rpms/dstat
index 0d9da44..b058c27 100644
--- a/files/rpms/dstat
+++ b/files/rpms/dstat
@@ -1,2 +1,3 @@
-dstat
+dstat # not:f29
+pcp-system-tools # dist:f29
 python-psutil
diff --git a/files/rpms/general b/files/rpms/general
index c7863e4..5bf1e9a 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -9,9 +9,9 @@
 graphviz # needed only for docs
 httpd
 httpd-devel
-iptables-services  # NOPRIME f25,f26,f27,f28
+iptables-services  # NOPRIME f25,f26,f27,f28,f29
 java-1.7.0-openjdk-headless  # NOPRIME rhel7
-java-1.8.0-openjdk-headless  # NOPRIME f25,f26,f27,f28
+java-1.8.0-openjdk-headless  # NOPRIME f25,f26,f27,f28,f29
 libffi-devel
 libjpeg-turbo-devel # Pillow 3.0.0
 libxml2-devel # lxml
diff --git a/files/rpms/n-novnc b/files/rpms/n-novnc
deleted file mode 100644
index 24ce15a..0000000
--- a/files/rpms/n-novnc
+++ /dev/null
@@ -1 +0,0 @@
-numpy
diff --git a/files/rpms/n-spice b/files/rpms/n-spice
deleted file mode 100644
index 24ce15a..0000000
--- a/files/rpms/n-spice
+++ /dev/null
@@ -1 +0,0 @@
-numpy
diff --git a/files/rpms/nova b/files/rpms/nova
index 4140cd7..639d793 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -1,19 +1,18 @@
 conntrack-tools
 curl
-dnsmasq # for nova-network
+dnsmasq # for q-dhcp
 dnsmasq-utils # for dhcp_release
 ebtables
 gawk
 genisoimage # required for config_drive
 iptables
 iputils
-kernel-modules # dist:f25,f26,f27,f28
+kernel-modules # dist:f25,f26,f27,f28,f29
 kpartx
 libxml2-python
 m2crypto
 mysql-devel
 mysql-server # NOPRIME
-numpy # needed by websockify for spice console
 parted
 polkit
 rabbitmq-server # NOPRIME
diff --git a/files/rpms/swift b/files/rpms/swift
index f2f5de6..be524d1 100644
--- a/files/rpms/swift
+++ b/files/rpms/swift
@@ -2,7 +2,7 @@
 liberasurecode-devel
 memcached
 pyxattr
-rsync-daemon # dist:f25,f26,f27,f28
+rsync-daemon # dist:f25,f26,f27,f28,f29
 sqlite
 xfsprogs
 xinetd
diff --git a/functions b/functions
index 051c816..8ea634e 100644
--- a/functions
+++ b/functions
@@ -18,6 +18,7 @@
 FUNC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd)
 source ${FUNC_DIR}/functions-common
 source ${FUNC_DIR}/inc/ini-config
+source ${FUNC_DIR}/inc/meta-config
 source ${FUNC_DIR}/inc/python
 source ${FUNC_DIR}/inc/rootwrap
 
@@ -291,7 +292,7 @@
     local disk_format=""
     local container_format=""
     local unpack=""
-    local img_property=""
+    local img_property="--property hw_rng_model=virtio"
     case "$image_fname" in
         *.tar.gz|*.tgz)
             # Extract ami and aki files
@@ -363,11 +364,11 @@
     esac
 
     if is_arch "ppc64le" || is_arch "ppc64" || is_arch "ppc"; then
-        img_property="--property hw_cdrom_bus=scsi --property os_command_line=console=hvc0"
+        img_property="$img_property --property hw_cdrom_bus=scsi --property os_command_line=console=hvc0"
     fi
 
     if is_arch "aarch64"; then
-        img_property="--property hw_machine_type=virt --property hw_cdrom_bus=scsi --property hw_scsi_model=virtio-scsi --property os_command_line='console=ttyAMA0'"
+        img_property="$img_property --property hw_machine_type=virt --property hw_cdrom_bus=scsi --property hw_scsi_model=virtio-scsi --property os_command_line='console=ttyAMA0'"
     fi
 
     if [ "$container_format" = "bare" ]; then
@@ -469,7 +470,7 @@
 
 
 # ping check
-# Uses globals ``ENABLED_SERVICES``, ``TOP_DIR``, ``MULTI_HOST``, ``PRIVATE_NETWORK``
+# Uses globals ``ENABLED_SERVICES``, ``TOP_DIR``, ``PRIVATE_NETWORK``
 # ping_check <ip> [boot-timeout] [from_net] [expected]
 function ping_check {
     local ip=$1
@@ -483,12 +484,9 @@
     # if we don't specify a from_net we're expecting things to work
     # fine from our local box.
     if [[ -n "$from_net" ]]; then
+        # TODO(stephenfin): Is there any way neutron could be disabled now?
         if is_service_enabled neutron; then
             ping_cmd="$TOP_DIR/tools/ping_neutron.sh $from_net"
-        elif [[ "$MULTI_HOST" = "True" && "$from_net" = "$PRIVATE_NETWORK_NAME" ]]; then
-            # there is no way to address the multihost / private case, bail here for compatibility.
-            # TODO: remove this cruft and redo code to handle this at the caller level.
-            return
         fi
     fi
 
@@ -739,7 +737,7 @@
 
     # Mount the disk with mount options to make it as efficient as possible
     if ! egrep -q ${storage_data_dir} /proc/mounts; then
-        sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8  \
+        sudo mount -t xfs -o loop,noatime,nodiratime,logbufs=8  \
             ${disk_image} ${storage_data_dir}
     fi
 }
diff --git a/functions-common b/functions-common
index af95bfb..6be07b4 100644
--- a/functions-common
+++ b/functions-common
@@ -27,7 +27,6 @@
 # - ``RECLONE``
 # - ``REQUIREMENTS_DIR``
 # - ``STACK_USER``
-# - ``TRACK_DEPENDS``
 # - ``http_proxy``, ``https_proxy``, ``no_proxy``
 #
 
@@ -44,7 +43,6 @@
 declare -A -g GITBRANCH
 declare -A -g GITDIR
 
-TRACK_DEPENDS=${TRACK_DEPENDS:-False}
 KILL_PATH="$(which kill)"
 
 # Save these variables to .stackenv
@@ -92,7 +90,6 @@
         --file $CLOUDS_YAML \
         --os-cloud devstack \
         --os-region-name $REGION_NAME \
-        --os-identity-api-version 3 \
         $CA_CERT_ARG \
         --os-auth-url $KEYSTONE_SERVICE_URI \
         --os-username demo \
@@ -104,7 +101,6 @@
         --file $CLOUDS_YAML \
         --os-cloud devstack-alt \
         --os-region-name $REGION_NAME \
-        --os-identity-api-version 3 \
         $CA_CERT_ARG \
         --os-auth-url $KEYSTONE_SERVICE_URI \
         --os-username alt_demo \
@@ -116,13 +112,23 @@
         --file $CLOUDS_YAML \
         --os-cloud devstack-admin \
         --os-region-name $REGION_NAME \
-        --os-identity-api-version 3 \
         $CA_CERT_ARG \
         --os-auth-url $KEYSTONE_SERVICE_URI \
         --os-username admin \
         --os-password $ADMIN_PASSWORD \
         --os-project-name admin
 
+    # admin with a system-scoped token -> devstack-system
+    $PYTHON $TOP_DIR/tools/update_clouds_yaml.py \
+        --file $CLOUDS_YAML \
+        --os-cloud devstack-system-admin \
+        --os-region-name $REGION_NAME \
+        $CA_CERT_ARG \
+        --os-auth-url $KEYSTONE_SERVICE_URI \
+        --os-username admin \
+        --os-password $ADMIN_PASSWORD \
+        --os-system-scope all
+
     # CLean up any old clouds.yaml files we had laying around
     rm -f $(eval echo ~"$STACK_USER")/.config/openstack/clouds.yaml
 }
@@ -371,14 +377,14 @@
     elif [[ "$os_VENDOR" =~ (Fedora) ]]; then
         # For Fedora, just use 'f' and the release
         DISTRO="f$os_RELEASE"
-    elif [[ "$os_VENDOR" =~ (openSUSE) ]]; then
+    elif is_opensuse; then
         DISTRO="opensuse-$os_RELEASE"
         # Tumbleweed uses "n/a" as a codename, and the release is a datestring
         # like 20180218, so not very useful. Leap however uses a release
         # with a "dot", so for example 15.0
         [ "$os_CODENAME" = "n/a" -a "$os_RELEASE" = "${os_RELEASE/\./}" ] && \
             DISTRO="opensuse-tumbleweed"
-    elif [[ "$os_VENDOR" =~ (SUSE LINUX) ]]; then
+    elif is_suse_linux_enterprise; then
         # just use major release
         DISTRO="sle${os_RELEASE%.*}"
     elif [[ "$os_VENDOR" =~ (Red.*Hat) || \
@@ -452,11 +458,30 @@
 # (openSUSE, SLE).
 # is_suse
 function is_suse {
+    is_opensuse || is_suse_linux_enterprise
+}
+
+
+# Determine if current distribution is an openSUSE distribution
+# is_opensuse
+function is_opensuse {
     if [[ -z "$os_VENDOR" ]]; then
         GetOSVersion
     fi
 
-    [[ "$os_VENDOR" =~ (openSUSE) || "$os_VENDOR" == "SUSE LINUX" ]]
+    [[ "$os_VENDOR" =~ (openSUSE) ]]
+}
+
+
+# Determine if current distribution is a SUSE Linux Enterprise (SLE)
+# distribution
+# is_suse_linux_enterprise
+function is_suse_linux_enterprise {
+    if [[ -z "$os_VENDOR" ]]; then
+        GetOSVersion
+    fi
+
+    [[ "$os_VENDOR" =~ (^SUSE) ]]
 }
 
 
@@ -1248,6 +1273,30 @@
     $xtrace
 }
 
+# Search plugins for a bindep.txt file
+#
+# Uses globals ``BINDEP_CMD``, ``GITDIR``, ``DEVSTACK_PLUGINS``
+#
+# Note this is only valid after BINDEP_CMD is setup in stack.sh, and
+# is thus not really intended to be called externally.
+function _get_plugin_bindep_packages {
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+
+    local bindep_file
+    local packages
+
+    for plugin in ${DEVSTACK_PLUGINS//,/ }; do
+        bindep_file=${GITDIR[$plugin]}/devstack/files/bindep.txt
+        if [[ -f ${bindep_file} ]]; then
+            packages+=$($BINDEP_CMD -b --file ${bindep_file} || true)
+        fi
+    done
+    echo "${packages}"
+    $xtrace
+}
+
 # Distro-agnostic package installer
 # Uses globals ``NO_UPDATE_REPOS``, ``REPOS_UPDATED``, ``RETRY_UPDATE``
 # install_package package [package ...]
@@ -1381,6 +1430,35 @@
         zypper --non-interactive install --auto-agree-with-licenses --no-recommends "$@"
 }
 
+# Run bindep and install packages it outputs
+#
+# Usage:
+#  install_bindep <path-to-bindep.txt> [profile,profile]
+#
+# Note unlike the bindep command itself, profile(s) specified should
+# be a single, comma-separated string, no spaces.
+function install_bindep {
+    local file=$1
+    local profiles=${2:-""}
+    local pkgs
+
+    if [[ ! -f $file ]]; then
+        die $LINENO "Can not find bindep file: $file"
+    fi
+
+    # converting here makes it much easier to work with passing
+    # arguments
+    profiles=${profiles/,/ /}
+
+    # Note bindep returns 1 when packages need to be installed, so we
+    # have to ignore it's return for "-e"
+    pkgs=$($DEST/bindep-venv/bin/bindep -b --file $file $profiles || true)
+
+    if [[ -n "${pkgs}" ]]; then
+        install_package ${pkgs}
+    fi
+}
+
 function write_user_unit_file {
     local service=$1
     local command="$2"
@@ -1873,10 +1951,6 @@
 #   For backward compatibility if we have **swift** in ENABLED_SERVICES all the
 #   **s-** services will be enabled. This will be deprecated in the future.
 #
-# Cells within nova is enabled if **n-cell** is in ``ENABLED_SERVICES``.
-# We also need to make sure to treat **n-cell-region** and **n-cell-child**
-# as enabled in this case.
-#
 # Uses global ``ENABLED_SERVICES``
 # is_service_enabled service [service ...]
 function is_service_enabled {
@@ -1899,7 +1973,6 @@
         # TODO(dtroyer): Remove these legacy special-cases after the is_XXX_enabled()
         #                are implemented
 
-        [[ ${service} == n-cell-* && ,${ENABLED_SERVICES} =~ ,"n-cell" ]] && enabled=0
         [[ ${service} == n-cpu-* && ,${ENABLED_SERVICES} =~ ,"n-cpu" ]] && enabled=0
         [[ ${service} == "nova" && ,${ENABLED_SERVICES} =~ ,"n-" ]] && enabled=0
         [[ ${service} == "glance" && ,${ENABLED_SERVICES} =~ ,"g-" ]] && enabled=0
@@ -2000,11 +2073,7 @@
         return 0
     fi
 
-    if [[ $TRACK_DEPENDS = True ]]; then
-        sudo_cmd="env"
-    else
-        sudo_cmd="sudo"
-    fi
+    sudo_cmd="sudo"
 
     $xtrace
     $sudo_cmd $@
@@ -2279,6 +2348,10 @@
 }
 
 function oscwrap {
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+
     local out
     local rc
     local start
@@ -2293,6 +2366,7 @@
     echo $((end - start)) >> $OSCWRAP_TIMER_FILE
 
     echo "$out"
+    $xtrace
     return $rc
 }
 
diff --git a/inc/ini-config b/inc/ini-config
index 6fe7788..7993682 100644
--- a/inc/ini-config
+++ b/inc/ini-config
@@ -88,17 +88,22 @@
 }
 
 # Determinate is the given option present in the INI file
-# ini_has_option config-file section option
+# ini_has_option [-sudo] config-file section option
 function ini_has_option {
     local xtrace
     xtrace=$(set +o | grep xtrace)
     set +o xtrace
+    local sudo=""
+    if [ $1 == "-sudo" ]; then
+        sudo="sudo "
+        shift
+    fi
     local file=$1
     local section=$2
     local option=$3
     local line
 
-    line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file")
+    line=$($sudo sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file")
     $xtrace
     [ -n "$line" ]
 }
@@ -173,8 +178,10 @@
     xtrace=$(set +o | grep xtrace)
     set +o xtrace
     local sudo=""
+    local sudo_option=""
     if [ $1 == "-sudo" ]; then
         sudo="sudo "
+        sudo_option="-sudo "
         shift
     fi
     local file=$1
@@ -187,11 +194,11 @@
         return
     fi
 
-    if ! grep -q "^\[$section\]" "$file" 2>/dev/null; then
+    if ! $sudo grep -q "^\[$section\]" "$file" 2>/dev/null; then
         # Add section at the end
         echo -e "\n[$section]" | $sudo tee --append "$file" > /dev/null
     fi
-    if ! ini_has_option "$file" "$section" "$option"; then
+    if ! ini_has_option $sudo_option "$file" "$section" "$option"; then
         # Add it
         $sudo sed -i -e "/^\[$section\]/ a\\
 $option = $value
@@ -228,7 +235,7 @@
         # the reverse order. Do a reverse here to keep the original order.
         values="$v ${values}"
     done
-    if ! grep -q "^\[$section\]" "$file"; then
+    if ! $sudo grep -q "^\[$section\]" "$file"; then
         # Add section at the end
         echo -e "\n[$section]" | $sudo tee --append "$file" > /dev/null
     else
diff --git a/inc/python b/inc/python
index d8b8169..fd43cef 100644
--- a/inc/python
+++ b/inc/python
@@ -29,6 +29,10 @@
 # get_pip_command
 function get_pip_command {
     local version="$1"
+    if [ -z "$version" ]; then
+        die $LINENO "pip python version is not set."
+    fi
+
     # NOTE(dhellmann): I don't know if we actually get a pip3.4-python
     # under any circumstances.
     which pip${version} || which pip${version}-python
@@ -49,15 +53,9 @@
     fi
     $xtrace
 
-    if python3_enabled && [[ "$os_VENDOR" == "Fedora" && $os_RELEASE -gt 26 ]]; then
-        # Default Python 3 install prefix changed to /usr/local in Fedora 27:
-        # https://fedoraproject.org/wiki/Changes/Making_sudo_pip_safe
-        echo "/usr/local/bin"
-    elif is_fedora || is_suse; then
-        echo "/usr/bin"
-    else
-        echo "/usr/local/bin"
-    fi
+    local PYTHON_PATH=/usr/local/bin
+    ( is_fedora && ! python3_enabled ) || is_suse && PYTHON_PATH=/usr/bin
+    echo $PYTHON_PATH
 }
 
 # Wrapper for ``pip install`` that only installs versions of libraries
@@ -87,41 +85,12 @@
     pip_install $clean_name[$extras]
 }
 
-# Determine the python versions supported by a package
-function get_python_versions_for_package {
-    local name=$1
-    cd $name && python setup.py --classifiers \
-        | grep 'Language' | cut -f5 -d: | grep '\.' | tr '\n' ' '
-}
-
-# Check for python3 classifier in local directory
-function check_python3_support_for_package_local {
-    local name=$1
-    cd $name
-    set +e
-    classifier=$(python setup.py --classifiers \
-        | grep 'Programming Language :: Python :: 3$')
-    set -e
-    echo $classifier
-}
-
-# Check for python3 classifier on pypi
-function check_python3_support_for_package_remote {
-    local name=$1
-    set +e
-    classifier=$(curl -s -L "https://pypi.python.org/pypi/$name/json" \
-        | grep '"Programming Language :: Python :: 3"')
-    set -e
-    echo $classifier
-}
-
-# python3_enabled_for() checks if the service(s) specified as arguments are
-# enabled by the user in ``ENABLED_PYTHON3_PACKAGES``.
+# python3_enabled_for() assumes the service(s) specified as arguments are
+# enabled for python 3 unless explicitly disabled. See python3_disabled_for().
 #
 # Multiple services specified as arguments are ``OR``'ed together; the test
 # is a short-circuit boolean, i.e it returns on the first match.
 #
-# Uses global ``ENABLED_PYTHON3_PACKAGES``
 # python3_enabled_for dir [dir ...]
 function python3_enabled_for {
     local xtrace
@@ -132,7 +101,9 @@
     local dirs=$@
     local dir
     for dir in ${dirs}; do
-        [[ ,${ENABLED_PYTHON3_PACKAGES}, =~ ,${dir}, ]] && enabled=0
+        if ! python3_disabled_for "${dir}"; then
+            enabled=0
+        fi
     done
 
     $xtrace
@@ -163,42 +134,29 @@
     return $enabled
 }
 
-# enable_python3_package() adds the repositories passed as argument to the
-# ``ENABLED_PYTHON3_PACKAGES`` list, if they are not already present.
+# enable_python3_package() -- no-op for backwards compatibility
 #
 # For example:
 #   enable_python3_package nova
 #
-# Uses global ``ENABLED_PYTHON3_PACKAGES``
 # enable_python3_package dir [dir ...]
 function enable_python3_package {
     local xtrace
     xtrace=$(set +o | grep xtrace)
     set +o xtrace
 
-    local tmpsvcs="${ENABLED_PYTHON3_PACKAGES}"
-    local python3
-    for dir in $@; do
-        if [[ ,${DISABLED_PYTHON3_PACKAGES}, =~ ,${dir}, ]]; then
-            warn $LINENO "Attempt to enable_python3_package ${dir} when it has been disabled"
-            continue
-        fi
-        if ! python3_enabled_for $dir; then
-            tmpsvcs+=",$dir"
-        fi
-    done
-    ENABLED_PYTHON3_PACKAGES=$(_cleanup_service_list "$tmpsvcs")
+    echo "It is no longer necessary to call enable_python3_package()."
 
     $xtrace
 }
 
-# disable_python3_package() prepares the services passed as argument to be
-# removed from the ``ENABLED_PYTHON3_PACKAGES`` list, if they are present.
+# disable_python3_package() adds the services passed as argument to
+# the ``DISABLED_PYTHON3_PACKAGES`` list.
 #
 # For example:
 #   disable_python3_package swift
 #
-# Uses globals ``ENABLED_PYTHON3_PACKAGES`` and ``DISABLED_PYTHON3_PACKAGES``
+# Uses global ``DISABLED_PYTHON3_PACKAGES``
 # disable_python3_package dir [dir ...]
 function disable_python3_package {
     local xtrace
@@ -206,23 +164,18 @@
     set +o xtrace
 
     local disabled_svcs="${DISABLED_PYTHON3_PACKAGES}"
-    local enabled_svcs=",${ENABLED_PYTHON3_PACKAGES},"
     local dir
     for dir in $@; do
         disabled_svcs+=",$dir"
-        if python3_enabled_for $dir; then
-            enabled_svcs=${enabled_svcs//,$dir,/,}
-        fi
     done
     DISABLED_PYTHON3_PACKAGES=$(_cleanup_service_list "$disabled_svcs")
-    ENABLED_PYTHON3_PACKAGES=$(_cleanup_service_list "$enabled_svcs")
 
     $xtrace
 }
 
 # Wrapper for ``pip install`` to set cache and proxy environment variables
 # Uses globals ``OFFLINE``, ``PIP_VIRTUAL_ENV``,
-# ``PIP_UPGRADE``, ``TRACK_DEPENDS``, ``*_proxy``,
+# ``PIP_UPGRADE``, ``*_proxy``,
 # Usage:
 #  pip_install pip_arguments
 function pip_install {
@@ -266,65 +219,28 @@
     # this works (for now...)
     local package_dir=${!#%\[*\]}
 
-    if [[ $TRACK_DEPENDS = True && ! "$@" =~ virtualenv ]]; then
-        # TRACK_DEPENDS=True installation creates a circular dependency when
-        # we attempt to install virtualenv into a virtualenv, so we must global
-        # that installation.
-        source $DEST/.venv/bin/activate
-        local cmd_pip=$DEST/.venv/bin/pip
+    if [[ -n ${PIP_VIRTUAL_ENV:=} && -d ${PIP_VIRTUAL_ENV} ]]; then
+        local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip
         local sudo_pip="env"
     else
-        if [[ -n ${PIP_VIRTUAL_ENV:=} && -d ${PIP_VIRTUAL_ENV} ]]; then
-            local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip
-            local sudo_pip="env"
-        else
-            local cmd_pip
-            cmd_pip=$(get_pip_command $PYTHON2_VERSION)
-            local sudo_pip="sudo -H"
-            if python3_enabled; then
-                # Look at the package classifiers to find the python
-                # versions supported, and if we find the version of
-                # python3 we've been told to use, use that instead of the
-                # default pip
-                local python_versions
-
-                # Special case some services that have experimental
-                # support for python3 in progress, but don't claim support
-                # in their classifier
-                echo "Check python version for : $package_dir"
-                if python3_disabled_for ${package_dir##*/}; then
-                    echo "Explicitly using $PYTHON2_VERSION version to install $package_dir based on DISABLED_PYTHON3_PACKAGES"
-                elif python3_enabled_for ${package_dir##*/}; then
-                    echo "Explicitly using $PYTHON3_VERSION version to install $package_dir based on ENABLED_PYTHON3_PACKAGES"
-                    sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8"
-                    cmd_pip=$(get_pip_command $PYTHON3_VERSION)
-                elif [[ -d "$package_dir" ]]; then
-                    python_versions=$(get_python_versions_for_package $package_dir)
-                    if [[ $python_versions =~ $PYTHON3_VERSION ]]; then
-                        echo "Automatically using $PYTHON3_VERSION version to install $package_dir based on classifiers"
-                        sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8"
-                        cmd_pip=$(get_pip_command $PYTHON3_VERSION)
-                    else
-                        # The package may not have yet advertised python3.5
-                        # support so check for just python3 classifier and log
-                        # a warning.
-                        python3_classifier=$(check_python3_support_for_package_local $package_dir)
-                        if [[ ! -z "$python3_classifier" ]]; then
-                            echo "Automatically using $PYTHON3_VERSION version to install $package_dir based on local package settings"
-                            sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8"
-                            cmd_pip=$(get_pip_command $PYTHON3_VERSION)
-                        fi
-                    fi
-                else
-                    # Check pypi as we don't have the package on disk
-                    package=$(echo $package_dir | grep -o '^[.a-zA-Z0-9_-]*')
-                    python3_classifier=$(check_python3_support_for_package_remote $package)
-                    if [[ ! -z "$python3_classifier" ]]; then
-                        echo "Automatically using $PYTHON3_VERSION version to install $package based on remote package settings"
-                        sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8"
-                        cmd_pip=$(get_pip_command $PYTHON3_VERSION)
-                    fi
-                fi
+        local cmd_pip
+        cmd_pip=$(get_pip_command $PYTHON2_VERSION)
+        local sudo_pip="sudo -H"
+        if python3_enabled; then
+            # Special case some services that have experimental
+            # support for python3 in progress, but don't claim support
+            # in their classifier
+            echo "Check python version for : $package_dir"
+            if python3_disabled_for ${package_dir##*/}; then
+                echo "Explicitly using $PYTHON2_VERSION version to install $package_dir based on DISABLED_PYTHON3_PACKAGES"
+            else
+                # For everything that is not explicitly blacklisted with
+                # DISABLED_PYTHON3_PACKAGES, assume it supports python3
+                # and we will let pip sort out the install, regardless of
+                # the package being local or remote.
+                echo "Using $PYTHON3_VERSION version to install $package_dir based on default behavior"
+                sudo_pip="$sudo_pip LC_ALL=en_US.UTF-8"
+                cmd_pip=$(get_pip_command $PYTHON3_VERSION)
             fi
         fi
     fi
@@ -421,7 +337,7 @@
     # The best option seems to be to use "pip list" which will tell
     # you the path an editable install was installed from; for example
     # in response to something like
-    #  pip install -e 'git+http://git.openstack.org/openstack-dev/bashate#egg=bashate'
+    #  pip install -e 'git+https://opendev.org/openstack/bashate#egg=bashate'
     # pip list --format columns shows
     #  bashate 0.5.2.dev19 /tmp/env/src/bashate
     # Thus we check the third column to see if we're installed from
@@ -445,20 +361,17 @@
 # another project.
 #
 # use this for non namespaced libraries
+#
+# setup_dev_lib [-bindep] <name>
 function setup_dev_lib {
+    local bindep
+    if [[ $1 == -bindep* ]]; then
+        bindep="${1}"
+        shift
+    fi
     local name=$1
     local dir=${GITDIR[$name]}
-    if python3_enabled; then
-        # Turn off Python 3 mode and install the package again,
-        # forcing a Python 2 installation. This ensures that all libs
-        # being used for development are installed under both versions
-        # of Python.
-        echo "Installing $name again without Python 3 enabled"
-        USE_PYTHON3=False
-        setup_develop $dir
-        USE_PYTHON3=True
-    fi
-    setup_develop $dir
+    setup_develop $bindep $dir
 }
 
 # this should be used if you want to install globally, all libraries should
@@ -469,11 +382,17 @@
 # extras: comma-separated list of optional dependencies to install
 #         (e.g., ldap,memcache).
 #         See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements
+# bindep: Set "-bindep" as first argument to install bindep.txt packages
 # The command is like "pip install <project_dir>[<extras>]"
 function setup_install {
+    local bindep
+    if [[ $1 == -bindep* ]]; then
+        bindep="${1}"
+        shift
+    fi
     local project_dir=$1
     local extras=$2
-    _setup_package_with_constraints_edit $project_dir "" $extras
+    _setup_package_with_constraints_edit $bindep $project_dir "" $extras
 }
 
 # this should be used for projects which run services, like all services
@@ -485,20 +404,14 @@
 #         See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements
 # The command is like "pip install -e <project_dir>[<extras>]"
 function setup_develop {
+    local bindep
+    if [[ $1 == -bindep* ]]; then
+        bindep="${1}"
+        shift
+    fi
     local project_dir=$1
     local extras=$2
-    _setup_package_with_constraints_edit $project_dir -e $extras
-}
-
-# determine if a project as specified by directory is in
-# projects.txt. This will not be an exact match because we throw away
-# the namespacing when we clone, but it should be good enough in all
-# practical ways.
-function is_in_projects_txt {
-    local project_dir=$1
-    local project_name
-    project_name=$(basename $project_dir)
-    grep -q "/$project_name\$" $REQUIREMENTS_DIR/projects.txt
+    _setup_package_with_constraints_edit $bindep $project_dir -e $extras
 }
 
 # ``pip install -e`` the package, which processes the dependencies
@@ -517,6 +430,11 @@
 #         See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements
 # The command is like "pip install <flags> <project_dir>[<extras>]"
 function _setup_package_with_constraints_edit {
+    local bindep
+    if [[ $1 == -bindep* ]]; then
+        bindep="${1}"
+        shift
+    fi
     local project_dir=$1
     local flags=$2
     local extras=$3
@@ -537,7 +455,7 @@
             "$flags file://$project_dir#egg=$name"
     fi
 
-    setup_package $project_dir "$flags" $extras
+    setup_package $bindep $project_dir "$flags" $extras
 
     # If this project is in LIBS_FROM_GIT, verify it was actually installed
     # correctly.  This helps catch errors caused by constraints mismatches.
@@ -549,17 +467,30 @@
 }
 
 # ``pip install -e`` the package, which processes the dependencies
-# using pip before running `setup.py develop`
+# using pip before running `setup.py develop`.  The command is like
+# "pip install <flags> <project_dir>[<extras>]"
 #
 # Uses globals ``STACK_USER``
-# setup_package project_dir [flags] [extras]
-# project_dir: directory of project repo (e.g., /opt/stack/keystone)
-# flags: pip CLI options/flags
-# extras: comma-separated list of optional dependencies to install
-#         (e.g., ldap,memcache).
-#         See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements
-# The command is like "pip install <flags> <project_dir>[<extras>]"
+#
+# Usage:
+#  setup_package [-bindep[=profile,profile]] <project_dir> <flags> [extras]
+#
+# -bindep     : Use bindep to install dependencies; select extra profiles
+#               as comma separated arguments after "="
+# project_dir : directory of project repo (e.g., /opt/stack/keystone)
+# flags       : pip CLI options/flags
+# extras      : comma-separated list of optional dependencies to install
+#               (e.g., ldap,memcache).
+#               See https://docs.openstack.org/pbr/latest/user/using.html#extra-requirements
 function setup_package {
+    local bindep=0
+    local bindep_flag=""
+    local bindep_profiles=""
+    if [[ $1 == -bindep* ]]; then
+        bindep=1
+        IFS="=" read bindep_flag bindep_profiles <<< ${1}
+        shift
+    fi
     local project_dir=$1
     local flags=$2
     local extras=$3
@@ -575,6 +506,11 @@
         extras="[$extras]"
     fi
 
+    # install any bindep packages
+    if [[ $bindep == 1 ]]; then
+        install_bindep $project_dir/bindep.txt $bindep_profiles
+    fi
+
     pip_install $flags "$project_dir$extras"
     # ensure that further actions can do things like setup.py sdist
     if [[ "$flags" == "-e" ]]; then
diff --git a/lib/cinder b/lib/cinder
index 76bf928..fd96053 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -51,7 +51,6 @@
 fi
 
 CINDER_STATE_PATH=${CINDER_STATE_PATH:=$DATA_DIR/cinder}
-CINDER_AUTH_CACHE_DIR=${CINDER_AUTH_CACHE_DIR:-/var/cache/cinder}
 
 CINDER_CONF_DIR=/etc/cinder
 CINDER_CONF=$CINDER_CONF_DIR/cinder.conf
@@ -88,14 +87,6 @@
 CINDER_VOLUME_CLEAR=${CINDER_VOLUME_CLEAR:-${CINDER_VOLUME_CLEAR_DEFAULT:-zero}}
 CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]')
 
-# Cinder reports allocations back to the scheduler on periodic intervals
-# it turns out we can get an "out of space" issue when we run tests too
-# quickly just because cinder didn't realize we'd freed up resources.
-# Make this configurable so that devstack-gate/tempest can set it to
-# less than the 60 second default
-# https://bugs.launchpad.net/cinder/+bug/1180976
-CINDER_PERIODIC_INTERVAL=${CINDER_PERIODIC_INTERVAL:-60}
-
 # Centos7 and OpenSUSE switched to using LIO and that's all that's supported,
 # although the tgt bits are in EPEL and OpenSUSE we don't want that for CI
 if is_fedora || is_suse; then
@@ -225,7 +216,7 @@
     inicomment $CINDER_API_PASTE_INI filter:authtoken admin_password
     inicomment $CINDER_API_PASTE_INI filter:authtoken signing_dir
 
-    configure_auth_token_middleware $CINDER_CONF cinder $CINDER_AUTH_CACHE_DIR
+    configure_keystone_authtoken_middleware $CINDER_CONF cinder
 
     iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
 
@@ -237,9 +228,11 @@
     iniset $CINDER_CONF DEFAULT osapi_volume_listen $CINDER_SERVICE_LISTEN_ADDRESS
     iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH
     iniset $CINDER_CONF oslo_concurrency lock_path $CINDER_STATE_PATH
-    iniset $CINDER_CONF DEFAULT periodic_interval $CINDER_PERIODIC_INTERVAL
-    iniset $CINDER_CONF DEFAULT my_ip "$HOST_IP"
-
+    if [[ $SERVICE_IP_VERSION == 6 ]]; then
+        iniset $CINDER_CONF DEFAULT my_ip "$HOST_IPV6"
+    else
+        iniset $CINDER_CONF DEFAULT my_ip "$HOST_IP"
+    fi
     iniset $CINDER_CONF key_manager backend cinder.keymgr.conf_key_mgr.ConfKeyManager
     iniset $CINDER_CONF key_manager fixed_key $(openssl rand -hex 16)
 
@@ -323,7 +316,7 @@
     fi
 
     # Set nova credentials (used for os-assisted-snapshots)
-    configure_auth_token_middleware $CINDER_CONF nova $CINDER_AUTH_CACHE_DIR nova
+    configure_keystone_authtoken_middleware $CINDER_CONF nova nova
     iniset $CINDER_CONF nova region_name "$REGION_NAME"
     iniset $CINDER_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT"
 
@@ -349,18 +342,12 @@
 
         # block-storage is the official service type
         get_or_create_service "cinder" "block-storage" "Cinder Volume Service"
-        get_or_create_service "cinder" "volume" "Cinder Volume Service"
         if [ "$CINDER_USE_MOD_WSGI" == "False" ]; then
             get_or_create_endpoint \
                 "block-storage" \
                 "$REGION_NAME" \
                 "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/\$(project_id)s"
 
-            get_or_create_endpoint \
-                "volume" \
-                "$REGION_NAME" \
-                "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(project_id)s"
-
             get_or_create_service "cinderv2" "volumev2" "Cinder Volume Service V2"
             get_or_create_endpoint \
                 "volumev2" \
@@ -378,11 +365,6 @@
                 "$REGION_NAME" \
                 "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v3/\$(project_id)s"
 
-            get_or_create_endpoint \
-                "volume" \
-                "$REGION_NAME" \
-                "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST/volume/v1/\$(project_id)s"
-
             get_or_create_service "cinderv2" "volumev2" "Cinder Volume Service V2"
             get_or_create_endpoint \
                 "volumev2" \
@@ -400,13 +382,6 @@
     fi
 }
 
-# create_cinder_cache_dir() - Part of the init_cinder() process
-function create_cinder_cache_dir {
-    # Create cache dir
-    sudo install -d -o $STACK_USER $CINDER_AUTH_CACHE_DIR
-    rm -f $CINDER_AUTH_CACHE_DIR/*
-}
-
 # init_cinder() - Initialize database and volume group
 function init_cinder {
     if is_service_enabled $DATABASE_BACKENDS; then
@@ -435,7 +410,6 @@
     fi
 
     mkdir -p $CINDER_STATE_PATH/volumes
-    create_cinder_cache_dir
 }
 
 # install_cinder() - Collect source and prepare
@@ -445,7 +419,14 @@
     if [[ "$CINDER_ISCSI_HELPER" == "tgtadm" ]]; then
         install_package tgt
     elif [[ "$CINDER_ISCSI_HELPER" == "lioadm" ]]; then
-        install_package targetcli
+        if [[ ${DISTRO} == "bionic" ]]; then
+            # TODO(frickler): Workaround for https://launchpad.net/bugs/1819819
+            sudo mkdir -p /etc/target
+
+            install_package targetcli-fb
+        else
+            install_package targetcli
+        fi
     fi
 }
 
diff --git a/lib/database b/lib/database
index 0d72052..7940cf2 100644
--- a/lib/database
+++ b/lib/database
@@ -87,8 +87,6 @@
 
     if [ -n "$MYSQL_PASSWORD" ]; then
         DATABASE_PASSWORD=$MYSQL_PASSWORD
-    else
-        read_password DATABASE_PASSWORD "ENTER A PASSWORD TO USE FOR THE DATABASE."
     fi
 
     # We configure Nova, Horizon, Glance and Keystone to use MySQL as their
diff --git a/lib/databases/mysql b/lib/databases/mysql
index ac0c083..420a86e 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -15,9 +15,17 @@
 
 register_database mysql
 
-MYSQL_SERVICE_NAME=mysql
-if is_suse || is_fedora && ! is_oraclelinux; then
-    MYSQL_SERVICE_NAME=mariadb
+if [[ -z "$MYSQL_SERVICE_NAME" ]]; then
+    MYSQL_SERVICE_NAME=mysql
+    if is_fedora && ! is_oraclelinux; then
+        MYSQL_SERVICE_NAME=mariadb
+    elif is_suse && systemctl list-unit-files | grep -q 'mariadb\.service'; then
+        # Older mariadb packages on SLES 12 provided mysql.service.  The
+        # newer ones on SLES 12 and 15 use mariadb.service; they also
+        # provide a mysql.service symlink for backwards-compatibility, but
+        # let's not rely on that.
+        MYSQL_SERVICE_NAME=mariadb
+    fi
 fi
 
 # Functions
@@ -86,8 +94,23 @@
     # because the package might have been installed already.
     sudo mysqladmin -u root password $DATABASE_PASSWORD || true
 
+    # In case of Mariadb, giving hostname in arguments causes permission
+    # problems as it expects connection through socket
+    if is_ubuntu && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then
+        local cmd_args="-uroot -p$DATABASE_PASSWORD "
+    else
+        local cmd_args="-uroot -p$DATABASE_PASSWORD -h127.0.0.1 "
+    fi
+
+    # In mariadb e.g. on Ubuntu socket plugin is used for authentication
+    # as root so it works only as sudo. To restore old "mysql like" behaviour,
+    # we need to change auth plugin for root user
+    if is_ubuntu && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then
+        sudo mysql $cmd_args -e "UPDATE mysql.user SET plugin='' WHERE user='$DATABASE_USER' AND host='localhost';"
+        sudo mysql $cmd_args -e "FLUSH PRIVILEGES;"
+    fi
     # Update the DB to give user '$DATABASE_USER'@'%' full control of the all databases:
-    sudo mysql -uroot -p$DATABASE_PASSWORD -h127.0.0.1 -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';"
+    sudo mysql $cmd_args -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';"
 
     # Now update ``my.cnf`` for some local needs and restart the mysql service
 
@@ -142,8 +165,11 @@
 [client]
 user=$DATABASE_USER
 password=$DATABASE_PASSWORD
-host=$MYSQL_HOST
 EOF
+
+        if ! is_ubuntu || [ "$MYSQL_SERVICE_NAME" != "mariadb" ]; then
+            echo "host=$MYSQL_HOST" >> $HOME/.my.cnf
+        fi
         chmod 0600 $HOME/.my.cnf
     fi
     # Install mysql-server
@@ -153,7 +179,7 @@
         install_package mariadb-server
         sudo systemctl enable $MYSQL_SERVICE_NAME
     elif is_ubuntu; then
-        install_package mysql-server
+        install_package $MYSQL_SERVICE_NAME-server
     else
         exit_distro_not_supported "mysql installation"
     fi
diff --git a/lib/etcd3 b/lib/etcd3
index 26d07fd..4f3a7a4 100644
--- a/lib/etcd3
+++ b/lib/etcd3
@@ -27,6 +27,10 @@
 ETCD_DATA_DIR="$DATA_DIR/etcd"
 ETCD_SYSTEMD_SERVICE="devstack@etcd.service"
 ETCD_BIN_DIR="$DEST/bin"
+# Option below will mount ETCD_DATA_DIR as ramdisk, which is useful to run
+# etcd-heavy services in the gate VM's, e.g. Kubernetes.
+ETCD_USE_RAMDISK=$(trueorfalse True ETCD_USE_RAMDISK)
+ETCD_RAMDISK_MB=${ETCD_RAMDISK_MB:-512}
 
 if is_ubuntu ; then
     UBUNTU_RELEASE_BASE_NUM=`lsb_release -r | awk '{print $2}' | cut -d '.' -f 1`
@@ -46,6 +50,9 @@
         cmd+=" --listen-peer-urls http://0.0.0.0:$ETCD_PEER_PORT "
     fi
     cmd+=" --listen-client-urls http://$SERVICE_HOST:$ETCD_PORT"
+    if [ "$ENABLE_DEBUG_LOG_LEVEL" == "True" ]; then
+        cmd+=" --debug"
+    fi
 
     local unitfile="$SYSTEMD_DIR/$ETCD_SYSTEMD_SERVICE"
     write_user_unit_file $ETCD_SYSTEMD_SERVICE "$cmd" "" "root"
@@ -86,6 +93,9 @@
 
     $SYSTEMCTL daemon-reload
 
+    if [[ "$ETCD_USE_RAMDISK" == "True" ]]; then
+        sudo umount $ETCD_DATA_DIR
+    fi
     sudo rm -rf $ETCD_DATA_DIR
 }
 
@@ -95,6 +105,9 @@
     # Create the necessary directories
     sudo mkdir -p $ETCD_BIN_DIR
     sudo mkdir -p $ETCD_DATA_DIR
+    if [[ "$ETCD_USE_RAMDISK" == "True" ]]; then
+        sudo mount -t tmpfs -o nodev,nosuid,size=${ETCD_RAMDISK_MB}M tmpfs $ETCD_DATA_DIR
+    fi
 
     # Download and cache the etcd tgz for subsequent use
     local etcd_file
diff --git a/lib/glance b/lib/glance
index 94f6a22..b4cab2a 100644
--- a/lib/glance
+++ b/lib/glance
@@ -44,7 +44,6 @@
 GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache}
 GLANCE_IMAGE_DIR=${GLANCE_IMAGE_DIR:=$DATA_DIR/glance/images}
 GLANCE_LOCK_DIR=${GLANCE_LOCK_DIR:=$DATA_DIR/glance/locks}
-GLANCE_AUTH_CACHE_DIR=${GLANCE_AUTH_CACHE_DIR:-/var/cache/glance}
 
 GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance}
 GLANCE_METADEF_DIR=$GLANCE_CONF_DIR/metadefs
@@ -53,7 +52,6 @@
 GLANCE_REGISTRY_PASTE_INI=$GLANCE_CONF_DIR/glance-registry-paste.ini
 GLANCE_API_PASTE_INI=$GLANCE_CONF_DIR/glance-api-paste.ini
 GLANCE_CACHE_CONF=$GLANCE_CONF_DIR/glance-cache.conf
-GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json
 GLANCE_SCHEMA_JSON=$GLANCE_CONF_DIR/schema-image.json
 GLANCE_SWIFT_STORE_CONF=$GLANCE_CONF_DIR/glance-swift-store.conf
 GLANCE_IMAGE_IMPORT_CONF=$GLANCE_CONF_DIR/glance-image-import.conf
@@ -97,20 +95,14 @@
 # cleanup_glance() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
 function cleanup_glance {
-    # kill instances (nova)
     # delete image files (glance)
-    sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR $GLANCE_AUTH_CACHE_DIR
+    sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR
 }
 
 # configure_glance() - Set config files, create data dirs, etc
 function configure_glance {
     sudo install -d -o $STACK_USER $GLANCE_CONF_DIR $GLANCE_METADEF_DIR
 
-    # We run this here as this configures cache dirs for the auth middleware
-    # which is used in the api server and not in the registry. The api
-    # Server is configured through this function and not init_glance.
-    create_glance_cache_dir
-
     # Set non-default configuration options for registry
     iniset $GLANCE_REGISTRY_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
     iniset $GLANCE_REGISTRY_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS
@@ -120,7 +112,7 @@
     iniset $GLANCE_REGISTRY_CONF database connection $dburl
     iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG
     iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone
-    configure_auth_token_middleware $GLANCE_REGISTRY_CONF glance $GLANCE_AUTH_CACHE_DIR/registry
+    configure_keystone_authtoken_middleware $GLANCE_REGISTRY_CONF glance
     iniset $GLANCE_REGISTRY_CONF oslo_messaging_notifications driver messagingv2
     iniset_rpc_backend glance $GLANCE_REGISTRY_CONF
     iniset $GLANCE_REGISTRY_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT"
@@ -130,9 +122,9 @@
     iniset $GLANCE_API_CONF database connection $dburl
     iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG
     iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/
-    iniset $GLANCE_API_CONF DEFAULT lock_path $GLANCE_LOCK_DIR
+    iniset $GLANCE_API_CONF oslo_concurrency lock_path $GLANCE_LOCK_DIR
     iniset $GLANCE_API_CONF paste_deploy flavor keystone+cachemanagement
-    configure_auth_token_middleware $GLANCE_API_CONF glance $GLANCE_AUTH_CACHE_DIR/api
+    configure_keystone_authtoken_middleware $GLANCE_API_CONF glance
     iniset $GLANCE_API_CONF oslo_messaging_notifications driver messagingv2
     iniset_rpc_backend glance $GLANCE_API_CONF
     if [ "$VIRT_DRIVER" = 'xenserver' ]; then
@@ -227,7 +219,6 @@
     iniset $GLANCE_IMAGE_IMPORT_CONF inject_metadata_properties ignore_user_roles admin
     iniset $GLANCE_IMAGE_IMPORT_CONF inject_metadata_properties inject
 
-    cp -p $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON
     cp -p $GLANCE_DIR/etc/schema-image.json $GLANCE_SCHEMA_JSON
 
     cp -p $GLANCE_DIR/etc/metadefs/*.json $GLANCE_METADEF_DIR
@@ -236,8 +227,8 @@
         CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST}
         CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776}
 
-        iniset $GLANCE_API_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/%(project_id)s"
-        iniset $GLANCE_CACHE_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/%(project_id)s"
+        iniset $GLANCE_API_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/%(project_id)s"
+        iniset $GLANCE_CACHE_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v3/%(project_id)s"
     fi
 
     if [[ "$WSGI_MODE" == "uwsgi" ]]; then
@@ -279,33 +270,24 @@
     fi
 }
 
-# create_glance_cache_dir() - Part of the configure_glance() process
-function create_glance_cache_dir {
-    # Create cache dir
-    sudo install -d -o $STACK_USER $GLANCE_AUTH_CACHE_DIR/api $GLANCE_AUTH_CACHE_DIR/registry $GLANCE_AUTH_CACHE_DIR/search $GLANCE_AUTH_CACHE_DIR/artifact
-    rm -f $GLANCE_AUTH_CACHE_DIR/api/* $GLANCE_AUTH_CACHE_DIR/registry/* $GLANCE_AUTH_CACHE_DIR/search/* $GLANCE_AUTH_CACHE_DIR/artifact/*
-}
-
 # init_glance() - Initialize databases, etc.
 function init_glance {
     # Delete existing images
     rm -rf $GLANCE_IMAGE_DIR
     mkdir -p $GLANCE_IMAGE_DIR
 
-    # Delete existing cache
-    rm -rf $GLANCE_CACHE_DIR
-    mkdir -p $GLANCE_CACHE_DIR
+    if is_service_enabled $DATABASE_BACKENDS; then
+        # (Re)create glance database
+        recreate_database glance
 
-    # (Re)create glance database
-    recreate_database glance
+        time_start "dbsync"
+        # Migrate glance database
+        $GLANCE_BIN_DIR/glance-manage --config-file $GLANCE_CONF_DIR/glance-api.conf db_sync
 
-    time_start "dbsync"
-    # Migrate glance database
-    $GLANCE_BIN_DIR/glance-manage --config-file $GLANCE_CONF_DIR/glance-api.conf db_sync
-
-    # Load metadata definitions
-    $GLANCE_BIN_DIR/glance-manage --config-file $GLANCE_CONF_DIR/glance-api.conf db_load_metadefs
-    time_stop "dbsync"
+        # Load metadata definitions
+        $GLANCE_BIN_DIR/glance-manage --config-file $GLANCE_CONF_DIR/glance-api.conf db_load_metadefs
+        time_stop "dbsync"
+    fi
 }
 
 # install_glanceclient() - Collect source and prepare
@@ -345,7 +327,7 @@
     if [[ "$WSGI_MODE" == "uwsgi" ]]; then
         run_process g-api "$GLANCE_BIN_DIR/uwsgi --procname-prefix glance-api --ini $GLANCE_UWSGI_CONF"
     else
-        run_process g-api "$GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf"
+        run_process g-api "$GLANCE_BIN_DIR/glance-api --config-dir=$GLANCE_CONF_DIR"
     fi
 
     echo "Waiting for g-api ($GLANCE_SERVICE_HOST) to start..."
diff --git a/lib/horizon b/lib/horizon
index 293a627..b2bf7bc 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -43,8 +43,8 @@
     local value=$4
 
     if [ -z "$section" ]; then
-        sed -e "/^$option/d" -i $local_settings
-        echo -e "\n$option=$value" >> $file
+        sed -e "/^$option/d" -i $file
+        echo "$option = $value" >> $file
     elif grep -q "^$section" $file; then
         local line
         line=$(sed -ne "/^$section/,/^}/ { /^ *'$option':/ p; }" $file)
@@ -84,6 +84,9 @@
     local local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py
     cp $HORIZON_SETTINGS $local_settings
 
+    # Ensure local_setting.py file ends with EOL (newline)
+    echo >> $local_settings
+
     _horizon_config_set $local_settings "" WEBROOT \"$HORIZON_APACHE_ROOT/\"
 
     _horizon_config_set $local_settings "" COMPRESS_OFFLINE True
@@ -91,7 +94,6 @@
 
     _horizon_config_set $local_settings "" OPENSTACK_HOST \"${KEYSTONE_SERVICE_HOST}\"
 
-    _horizon_config_set $local_settings "" OPENSTACK_API_VERSIONS {\"identity\":3}
     _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_URI}/v3\""
 
     # note(trebskit): if HOST_IP points at non-localhost ip address, horizon cannot be accessed
diff --git a/lib/infra b/lib/infra
index cf003cc..b983f2b 100644
--- a/lib/infra
+++ b/lib/infra
@@ -29,7 +29,7 @@
 # install_infra() - Collect source and prepare
 function install_infra {
     local PIP_VIRTUAL_ENV="$REQUIREMENTS_DIR/.venv"
-    [ ! -d $PIP_VIRTUAL_ENV ] && virtualenv $PIP_VIRTUAL_ENV
+    [ ! -d $PIP_VIRTUAL_ENV ] && ${VIRTUALENV_CMD} $PIP_VIRTUAL_ENV
     # We don't care about testing git pbr in the requirements venv.
     PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install -U pbr
     PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install $REQUIREMENTS_DIR
diff --git a/lib/keystone b/lib/keystone
index 02e2822..9ceb829 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -131,6 +131,9 @@
 # however may not be suitable for real production.
 KEYSTONE_PASSWORD_HASH_ROUNDS=${KEYSTONE_PASSWORD_HASH_ROUNDS:-4}
 
+# Cache settings
+KEYSTONE_ENABLE_CACHE=${KEYSTONE_ENABLE_CACHE:-True}
+
 # Functions
 # ---------
 
@@ -213,9 +216,9 @@
     iniset $KEYSTONE_CONF resource driver "$KEYSTONE_RESOURCE_BACKEND"
 
     # Enable caching
-    iniset $KEYSTONE_CONF cache enabled "True"
-    iniset $KEYSTONE_CONF cache backend "dogpile.cache.memcached"
-    iniset $KEYSTONE_CONF cache memcache_servers localhost:11211
+    iniset $KEYSTONE_CONF cache enabled $KEYSTONE_ENABLE_CACHE
+    iniset $KEYSTONE_CONF cache backend $CACHE_BACKEND
+    iniset $KEYSTONE_CONF cache memcache_servers $MEMCACHE_SERVERS
 
     iniset_rpc_backend keystone $KEYSTONE_CONF oslo_messaging_notifications
 
@@ -397,18 +400,17 @@
     fi
 }
 
-# Configure the service to use the auth token middleware.
+# Configure a service to use the auth token middleware.
 #
-# configure_auth_token_middleware conf_file admin_user signing_dir [section]
+# configure_keystone_authtoken_middleware conf_file admin_user IGNORED [section]
 #
 # section defaults to keystone_authtoken, which is where auth_token looks in
 # the .conf file. If the paste config file is used (api-paste.ini) then
 # provide the section name for the auth_token filter.
-function configure_auth_token_middleware {
+function configure_keystone_authtoken_middleware {
     local conf_file=$1
     local admin_user=$2
-    local signing_dir=$3
-    local section=${4:-keystone_authtoken}
+    local section=${3:-keystone_authtoken}
 
     iniset $conf_file $section auth_type password
     iniset $conf_file $section auth_url $KEYSTONE_SERVICE_URI
@@ -419,10 +421,16 @@
     iniset $conf_file $section project_domain_name "$SERVICE_DOMAIN_NAME"
 
     iniset $conf_file $section cafile $SSL_BUNDLE_FILE
-    iniset $conf_file $section signing_dir $signing_dir
     iniset $conf_file $section memcached_servers localhost:11211
 }
 
+# configure_auth_token_middleware conf_file admin_user IGNORED [section]
+# TODO(frickler): old function for backwards compatibility, remove in U cycle
+function configure_auth_token_middleware {
+    echo "WARNING: configure_auth_token_middleware is deprecated, use configure_keystone_authtoken_middleware instead"
+    configure_keystone_authtoken_middleware $1 $2 $4
+}
+
 # init_keystone() - Initialize databases, etc.
 function init_keystone {
     if is_service_enabled ldap; then
diff --git a/lib/lvm b/lib/lvm
index f047181..d9e78a0 100644
--- a/lib/lvm
+++ b/lib/lvm
@@ -99,8 +99,15 @@
     if ! sudo vgs $vg; then
         # Only create if the file doesn't already exists
         [[ -f $backing_file ]] || truncate -s $size $backing_file
+
+        local directio=""
+        # Check to see if we can do direct-io
+        if losetup -h | grep -q direct-io; then
+            directio="--direct-io=on"
+        fi
+
         local vg_dev
-        vg_dev=`sudo losetup -f --show $backing_file`
+        vg_dev=$(sudo losetup -f --show $directio $backing_file)
 
         # Only create volume group if it doesn't already exist
         if ! sudo vgs $vg; then
diff --git a/lib/neutron b/lib/neutron
index 4847e87..9e6a80c 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -33,10 +33,9 @@
 # - True : Run neutron under uwsgi
 # TODO(annp): Switching to uwsgi in next cycle if things turn out to be stable
 # enough
-NEUTRON_DEPLOY_MOD_WSGI=${NEUTRON_DEPLOY_MOD_WSGI:-False}
+NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse False NEUTRON_DEPLOY_MOD_WSGI)
 NEUTRON_AGENT=${NEUTRON_AGENT:-openvswitch}
 NEUTRON_DIR=$DEST/neutron
-NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron}
 
 NEUTRON_DISTRIBUTED_ROUTING=$(trueorfalse False NEUTRON_DISTRIBUTED_ROUTING)
 # Distributed Virtual Router (DVR) configuration
@@ -55,6 +54,7 @@
 NEUTRON_CONF_DIR=/etc/neutron
 NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf
 NEUTRON_META_CONF=$NEUTRON_CONF_DIR/metadata_agent.ini
+NEUTRON_META_DATA_HOST=${NEUTRON_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)}
 
 NEUTRON_DHCP_CONF=$NEUTRON_CONF_DIR/dhcp_agent.ini
 NEUTRON_L3_CONF=$NEUTRON_CONF_DIR/l3_agent.ini
@@ -62,7 +62,6 @@
 NEUTRON_CREATE_INITIAL_NETWORKS=${NEUTRON_CREATE_INITIAL_NETWORKS:-True}
 
 NEUTRON_STATE_PATH=${NEUTRON_STATE_PATH:=$DATA_DIR/neutron}
-NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron}
 
 NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini
 
@@ -95,14 +94,22 @@
 NEUTRON_ROOTWRAP_CMD="$NEUTRON_ROOTWRAP $NEUTRON_ROOTWRAP_CONF_FILE"
 NEUTRON_ROOTWRAP_DAEMON_CMD="$NEUTRON_ROOTWRAP-daemon $NEUTRON_ROOTWRAP_CONF_FILE"
 
-# This is needed because _neutron_ovs_base_configure_l3_agent will set
-# external_network_bridge
-Q_USE_PROVIDERNET_FOR_PUBLIC=${Q_USE_PROVIDERNET_FOR_PUBLIC:-True}
 # This is needed because _neutron_ovs_base_configure_l3_agent uses it to create
 # an external network bridge
 PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex}
 PUBLIC_BRIDGE_MTU=${PUBLIC_BRIDGE_MTU:-1500}
 
+# Network type - default vxlan, however enables vlan based jobs to override
+# using the legacy environment variable as well as a new variable in greater
+# alignment with the naming scheme of this plugin.
+NEUTRON_TENANT_NETWORK_TYPE=${NEUTRON_TENANT_NETWORK_TYPE:-vxlan}
+
+NEUTRON_TENANT_VLAN_RANGE=${NEUTRON_TENANT_VLAN_RANGE:-${TENANT_VLAN_RANGE:-100:150}}
+
+# Physical network for VLAN network usage.
+NEUTRON_PHYSICAL_NETWORK=${NEUTRON_PHYSICAL_NETWORK:-}
+
+
 # Additional neutron api config files
 declare -a -g _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS
 
@@ -120,7 +127,9 @@
 # Test if any Neutron services are enabled
 # is_neutron_enabled
 function is_neutron_legacy_enabled {
-    [[ ,${DISABLED_SERVICES} =~ ,"neutron" ]] && return 1
+    # first we need to remove all "neutron-" from DISABLED_SERVICES list
+    disabled_services_copy=$(echo $DISABLED_SERVICES | sed 's/neutron-//g')
+    [[ ,${disabled_services_copy} =~ ,"neutron" ]] && return 1
     [[ ,${ENABLED_SERVICES} =~ ,"q-" ]] && return 0
     return 1
 }
@@ -183,9 +192,14 @@
     # Neutron API server & Neutron plugin
     if is_service_enabled neutron-api; then
         local policy_file=$NEUTRON_CONF_DIR/policy.json
-        cp $NEUTRON_DIR/etc/policy.json $policy_file
         # Allow neutron user to administer neutron to match neutron account
-        sed -i 's/"context_is_admin":  "role:admin"/"context_is_admin":  "role:admin or user_name:neutron"/g' $policy_file
+        # NOTE(amotoki): This is required for nova works correctly with neutron.
+        if [ -f $NEUTRON_DIR/etc/policy.json ]; then
+            cp $NEUTRON_DIR/etc/policy.json $policy_file
+            sed -i 's/"context_is_admin":  "role:admin"/"context_is_admin":  "role:admin or user_name:neutron"/g' $policy_file
+        else
+            echo '{"context_is_admin":  "role:admin or user_name:neutron"}' > $policy_file
+        fi
 
         cp $NEUTRON_DIR/etc/api-paste.ini $NEUTRON_CONF_DIR/api-paste.ini
 
@@ -196,12 +210,11 @@
         iniset $NEUTRON_CONF DEFAULT router_distributed $NEUTRON_DISTRIBUTED_ROUTING
 
         iniset $NEUTRON_CONF DEFAULT auth_strategy $NEUTRON_AUTH_STRATEGY
-        configure_auth_token_middleware $NEUTRON_CONF neutron $NEUTRON_AUTH_CACHE_DIR keystone_authtoken
-        configure_auth_token_middleware $NEUTRON_CONF nova $NEUTRON_AUTH_CACHE_DIR nova
+        configure_keystone_authtoken_middleware $NEUTRON_CONF neutron
+        configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova
 
-        # Configure VXLAN
-        # TODO(sc68cal) not hardcode?
-        iniset $NEUTRON_CORE_PLUGIN_CONF ml2 tenant_network_types vxlan
+        # Configure tenant network type
+        iniset $NEUTRON_CORE_PLUGIN_CONF ml2 tenant_network_types $NEUTRON_TENANT_NETWORK_TYPE
 
         local mech_drivers="openvswitch"
         if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then
@@ -212,7 +225,10 @@
         iniset $NEUTRON_CORE_PLUGIN_CONF ml2 mechanism_drivers $mech_drivers
 
         iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vxlan vni_ranges 1001:2000
-        iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_flat flat_networks public
+        iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_flat flat_networks $PUBLIC_NETWORK_NAME
+        if [[ "$NEUTRON_TENANT_NETWORK_TYPE" =~ "vlan" ]] && [[ "$NEUTRON_PHYSICAL_NETWORK" != "" ]]; then
+            iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vlan network_vlan_ranges ${NEUTRON_PHYSICAL_NETWORK}:${NEUTRON_TENANT_VLAN_RANGE}
+        fi
         if [[ "$NEUTRON_PORT_SECURITY" = "True" ]]; then
             neutron_ml2_extension_driver_add port_security
         fi
@@ -235,6 +251,7 @@
             if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then
                 iniset $NEUTRON_CORE_PLUGIN_CONF agent l2_population True
                 iniset $NEUTRON_CORE_PLUGIN_CONF agent enable_distributed_routing True
+                iniset $NEUTRON_CORE_PLUGIN_CONF agent arp_responder True
             fi
         fi
 
@@ -281,14 +298,14 @@
         cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $NEUTRON_META_CONF
 
         iniset $NEUTRON_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-        iniset $NEUTRON_META_CONF DEFAULT nova_metadata_host $SERVICE_HOST
+        iniset $NEUTRON_META_CONF DEFAULT nova_metadata_host $NEUTRON_META_DATA_HOST
         iniset $NEUTRON_META_CONF DEFAULT metadata_workers $API_WORKERS
         # TODO(ihrachys) do we really need to set rootwrap for metadata agent?
         configure_root_helper_options $NEUTRON_META_CONF
 
         # TODO(dtroyer): remove the v2.0 hard code below
         iniset $NEUTRON_META_CONF DEFAULT auth_url $KEYSTONE_SERVICE_URI
-        configure_auth_token_middleware $NEUTRON_META_CONF neutron $NEUTRON_AUTH_CACHE_DIR DEFAULT
+        configure_keystone_authtoken_middleware $NEUTRON_META_CONF neutron DEFAULT
     fi
 
     # Format logging
@@ -337,7 +354,6 @@
 # if not passed $NOVA_CONF is used.
 function configure_neutron_nova_new {
     local conf=${1:-$NOVA_CONF}
-    iniset $conf DEFAULT use_neutron True
     iniset $conf neutron auth_type "password"
     iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI"
     iniset $conf neutron username neutron
@@ -348,8 +364,6 @@
     iniset $conf neutron auth_strategy $NEUTRON_AUTH_STRATEGY
     iniset $conf neutron region_name "$REGION_NAME"
 
-    iniset $conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
-
     # optionally set options in nova_conf
     neutron_plugin_create_nova_conf $conf
 
@@ -385,13 +399,6 @@
     fi
 }
 
-# create_neutron_cache_dir() - Part of the init_neutron() process
-function create_neutron_cache_dir {
-    # Create cache dir
-    sudo install -d -o $STACK_USER $NEUTRON_AUTH_CACHE_DIR
-    rm -f $NEUTRON_AUTH_CACHE_DIR/*
-}
-
 # init_neutron() - Initialize databases, etc.
 function init_neutron_new {
 
@@ -401,8 +408,6 @@
     # Run Neutron db migrations
     $NEUTRON_BIN_DIR/neutron-db-manage upgrade heads
     time_stop "dbsync"
-
-    create_neutron_cache_dir
 }
 
 # install_neutron() - Collect source and prepare
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index be5b73f..3d39d41 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -50,8 +50,6 @@
 # See "Neutron Network Configuration" below for additional variables
 # that must be set in localrc for connectivity across hosts with
 # Neutron.
-#
-# With Neutron networking the NETWORK_MANAGER variable is ignored.
 
 # Settings
 # --------
@@ -73,7 +71,6 @@
 
 NEUTRON_DIR=$DEST/neutron
 NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas
-NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron}
 
 # Support entry points installation of console scripts
 if [[ -d $NEUTRON_DIR/bin/neutron-server ]]; then
@@ -91,7 +88,7 @@
 # - True : Run neutron under uwsgi
 # TODO(annp): Switching to uwsgi in next cycle if things turn out to be stable
 # enough
-NEUTRON_DEPLOY_MOD_WSGI=${NEUTRON_DEPLOY_MOD_WSGI:-False}
+NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse False NEUTRON_DEPLOY_MOD_WSGI)
 
 NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini
 
@@ -376,7 +373,6 @@
 
 function create_nova_conf_neutron {
     local conf=${1:-$NOVA_CONF}
-    iniset $conf DEFAULT use_neutron True
     iniset $conf neutron auth_type "password"
     iniset $conf neutron auth_url "$KEYSTONE_AUTH_URI"
     iniset $conf neutron username "$Q_ADMIN_USERNAME"
@@ -387,11 +383,6 @@
     iniset $conf neutron auth_strategy "$Q_AUTH_STRATEGY"
     iniset $conf neutron region_name "$REGION_NAME"
 
-    if [[ "$Q_USE_SECGROUP" == "True" ]]; then
-        LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver
-        iniset $conf DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER
-    fi
-
     # optionally set options in nova_conf
     neutron_plugin_create_nova_conf $conf
 
@@ -699,10 +690,15 @@
     cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF
 
     Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json
-    cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE
 
     # allow neutron user to administer neutron to match neutron account
-    sed -i 's/"context_is_admin":  "role:admin"/"context_is_admin":  "role:admin or user_name:neutron"/g' $Q_POLICY_FILE
+    # NOTE(amotoki): This is required for nova works correctly with neutron.
+    if [ -f $NEUTRON_DIR/etc/policy.json ]; then
+        cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE
+        sed -i 's/"context_is_admin":  "role:admin"/"context_is_admin":  "role:admin or user_name:neutron"/g' $Q_POLICY_FILE
+    else
+        echo '{"context_is_admin":  "role:admin or user_name:neutron"}' > $Q_POLICY_FILE
+    fi
 
     # Set plugin-specific variables ``Q_DB_NAME``, ``Q_PLUGIN_CLASS``.
     # For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``.
@@ -838,13 +834,13 @@
     iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP
 
     iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY
-    _neutron_setup_keystone $NEUTRON_CONF keystone_authtoken
+    configure_keystone_authtoken_middleware $NEUTRON_CONF $Q_ADMIN_USERNAME
 
     # Configuration for neutron notifications to nova.
     iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES
     iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES
 
-    configure_auth_token_middleware $NEUTRON_CONF nova $NEUTRON_AUTH_CACHE_DIR nova
+    configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova
 
     # Configure plugin
     neutron_plugin_configure_service
@@ -930,15 +926,6 @@
     fi
 }
 
-# Configures keystone integration for neutron service
-function _neutron_setup_keystone {
-    local conf_file=$1
-    local section=$2
-
-    create_neutron_cache_dir
-    configure_auth_token_middleware $conf_file $Q_ADMIN_USERNAME $NEUTRON_AUTH_CACHE_DIR $section
-}
-
 function _neutron_setup_interface_driver {
 
     # ovs_use_veth needs to be set before the plugin configuration
diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent
index f2302e3..1f1b0e8 100644
--- a/lib/neutron_plugins/linuxbridge_agent
+++ b/lib/neutron_plugins/linuxbridge_agent
@@ -8,21 +8,23 @@
 set +o xtrace
 
 function neutron_lb_cleanup {
-    sudo ip link set $PUBLIC_BRIDGE down
-    sudo brctl delbr $PUBLIC_BRIDGE
+    sudo ip link delete $PUBLIC_BRIDGE
 
+    bridge_list=`ls /sys/class/net/*/bridge/bridge_id 2>/dev/null | cut -f5 -d/`
+    if [[ -z "$bridge_list" ]]; then
+        return
+    fi
     if [[ "$Q_ML2_TENANT_NETWORK_TYPE" = "vxlan" ]]; then
-        for port in $(sudo brctl show | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e vxlan-[0-9a-f\-]*); do
+        for port in $(echo $bridge_list | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e vxlan-[0-9a-f\-]*); do
             sudo ip link delete $port
         done
     elif [[ "$Q_ML2_TENANT_NETWORK_TYPE" = "vlan" ]]; then
-        for port in $(sudo brctl show | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e ${LB_PHYSICAL_INTERFACE}\.[0-9a-f\-]*); do
+        for port in $(echo $bridge_list | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e ${LB_PHYSICAL_INTERFACE}\.[0-9a-f\-]*); do
             sudo ip link delete $port
         done
     fi
-    for bridge in $(sudo brctl show |grep -o -e brq[0-9a-f\-]*); do
-        sudo ip link set $bridge down
-        sudo brctl delbr $bridge
+    for bridge in $(echo $bridge_list |grep -o -e brq[0-9a-f\-]*); do
+        sudo ip link delete $bridge
     done
 }
 
@@ -46,7 +48,7 @@
 
 function neutron_plugin_configure_l3_agent {
     local conf_file=$1
-    sudo brctl addbr $PUBLIC_BRIDGE
+    sudo ip link add $PUBLIC_BRIDGE type bridge
     set_mtu $PUBLIC_BRIDGE $PUBLIC_BRIDGE_MTU
 }
 
diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2
index c5a4c02..497b6c6 100644
--- a/lib/neutron_plugins/ml2
+++ b/lib/neutron_plugins/ml2
@@ -44,7 +44,7 @@
 # L3 Plugin to load for ML2
 # For some flat network environment, they not want to extend L3 plugin.
 # Make sure it is able to set empty to ML2_L3_PLUGIN.
-ML2_L3_PLUGIN=${ML2_L3_PLUGIN-neutron.services.l3_router.l3_router_plugin.L3RouterPlugin}
+ML2_L3_PLUGIN=${ML2_L3_PLUGIN-router}
 
 function populate_ml2_config {
     CONF=$1
@@ -147,6 +147,7 @@
         populate_ml2_config /$Q_PLUGIN_CONF_FILE agent l2_population=True
         populate_ml2_config /$Q_PLUGIN_CONF_FILE agent tunnel_types=vxlan
         populate_ml2_config /$Q_PLUGIN_CONF_FILE agent enable_distributed_routing=True
+        populate_ml2_config /$Q_PLUGIN_CONF_FILE agent arp_responder=True
     fi
 }
 
diff --git a/lib/neutron_plugins/nuage b/lib/neutron_plugins/nuage
index f39c7c4..8c75e15 100644
--- a/lib/neutron_plugins/nuage
+++ b/lib/neutron_plugins/nuage
@@ -11,8 +11,6 @@
     local conf="$1"
     NOVA_OVS_BRIDGE=${NOVA_OVS_BRIDGE:-"br-int"}
     iniset $conf neutron ovs_bridge $NOVA_OVS_BRIDGE
-    LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver
-    iniset $conf DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER
 }
 
 function neutron_plugin_install_agent_packages {
diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent
index b65a258..1009611 100644
--- a/lib/neutron_plugins/openvswitch_agent
+++ b/lib/neutron_plugins/openvswitch_agent
@@ -41,8 +41,10 @@
     # Setup physical network bridge mappings.  Override
     # ``OVS_VLAN_RANGES`` and ``OVS_BRIDGE_MAPPINGS`` in ``localrc`` for more
     # complex physical network configurations.
-    if [[ "$OVS_BRIDGE_MAPPINGS" == "" ]] && [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then
-        OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE
+    if [[ "$PHYSICAL_NETWORK" != "" ]] && [[ "$OVS_PHYSICAL_BRIDGE" != "" ]]; then
+        if [[ "$OVS_BRIDGE_MAPPINGS" == "" ]]; then
+            OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE
+        fi
 
         # Configure bridge manually with physical interface as port for multi-node
         _neutron_ovs_base_add_bridge $OVS_PHYSICAL_BRIDGE
diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base
index 523024e..2e63fe3 100644
--- a/lib/neutron_plugins/ovs_base
+++ b/lib/neutron_plugins/ovs_base
@@ -69,7 +69,7 @@
         restart_service openvswitch
         sudo systemctl enable openvswitch
     elif is_suse; then
-        if [[ $DISTRO == "sle12" ]] && [[ $os_RELEASE -lt 12.2 ]]; then
+        if [[ $DISTRO == "sle12" ]] && vercmp "$os_RELEASE" "<" "12.2" ; then
             restart_service openvswitch-switch
         else
             # workaround for https://bugzilla.suse.com/show_bug.cgi?id=1085971
@@ -96,10 +96,6 @@
 }
 
 function _neutron_ovs_base_configure_l3_agent {
-    if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" != "True" ]; then
-        iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge $PUBLIC_BRIDGE
-    fi
-
     neutron-ovs-cleanup --config-file $NEUTRON_CONF
     if [[ "$Q_USE_PUBLIC_VETH" = "True" ]]; then
         ip link show $Q_PUBLIC_VETH_INT > /dev/null 2>&1 ||
diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3
index ec289f6..69536bb 100644
--- a/lib/neutron_plugins/services/l3
+++ b/lib/neutron_plugins/services/l3
@@ -395,8 +395,6 @@
 
     # This logic is specific to using the l3-agent for layer 3
     if is_service_enabled q-l3 || is_service_enabled neutron-l3; then
-        # Ensure IPv6 forwarding is enabled on the host
-        sudo sysctl -w net.ipv6.conf.all.forwarding=1
         # if the Linux host considers itself to be a router then it will
         # ignore all router advertisements
         # Ensure IPv6 RAs are accepted on interfaces with a default route.
@@ -409,6 +407,8 @@
             # device name would be reinterpreted as a slash, causing an error.
             sudo sysctl -w net/ipv6/conf/$d/accept_ra=2
         done
+        # Ensure IPv6 forwarding is enabled on the host
+        sudo sysctl -w net.ipv6.conf.all.forwarding=1
         # Configure and enable public bridge
         # Override global IPV6_ROUTER_GW_IP with the true value from neutron
         IPV6_ROUTER_GW_IP=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" port list -c 'Fixed IP Addresses' | grep $ipv6_pub_subnet_id | awk -F'ip_address' '{ print $2 }' | cut -f2 -d\' | tr '\n' ' ')
diff --git a/lib/nova b/lib/nova
index d1d0b3c..7557a51 100644
--- a/lib/nova
+++ b/lib/nova
@@ -46,15 +46,12 @@
 NOVA_STATE_PATH=${NOVA_STATE_PATH:=$DATA_DIR/nova}
 # INSTANCES_PATH is the previous name for this
 NOVA_INSTANCES_PATH=${NOVA_INSTANCES_PATH:=${INSTANCES_PATH:=$NOVA_STATE_PATH/instances}}
-NOVA_AUTH_CACHE_DIR=${NOVA_AUTH_CACHE_DIR:-/var/cache/nova}
 
 NOVA_CONF_DIR=/etc/nova
 NOVA_CONF=$NOVA_CONF_DIR/nova.conf
-NOVA_CELLS_CONF=$NOVA_CONF_DIR/nova-cells.conf
 NOVA_COND_CONF=$NOVA_CONF_DIR/nova.conf
 NOVA_CPU_CONF=$NOVA_CONF_DIR/nova-cpu.conf
 NOVA_FAKE_CONF=$NOVA_CONF_DIR/nova-fake.conf
-NOVA_CELLS_DB=${NOVA_CELLS_DB:-nova_cell}
 NOVA_API_DB=${NOVA_API_DB:-nova_api}
 NOVA_UWSGI=$NOVA_BIN_DIR/nova-api-wsgi
 NOVA_METADATA_UWSGI=$NOVA_BIN_DIR/nova-metadata-wsgi
@@ -91,9 +88,9 @@
 NOVA_SERVICE_PORT=${NOVA_SERVICE_PORT:-8774}
 NOVA_SERVICE_PORT_INT=${NOVA_SERVICE_PORT_INT:-18774}
 NOVA_SERVICE_PROTOCOL=${NOVA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
-NOVA_SERVICE_LOCAL_HOST=${NOVA_SERVICE_LOCAL_HOST:-$SERVICE_LOCAL_HOST}
 NOVA_SERVICE_LISTEN_ADDRESS=${NOVA_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)}
 METADATA_SERVICE_PORT=${METADATA_SERVICE_PORT:-8775}
+NOVA_ENABLE_CACHE=${NOVA_ENABLE_CACHE:-True}
 
 # Option to enable/disable config drive
 # NOTE: Set ``FORCE_CONFIG_DRIVE="False"`` to turn OFF config drive
@@ -103,15 +100,14 @@
 # should work in most cases.
 SCHEDULER=${SCHEDULER:-filter_scheduler}
 
-# The following FILTERS contains SameHostFilter and DifferentHostFilter with
+# The following NOVA_FILTERS contains SameHostFilter and DifferentHostFilter with
 # the default filters.
-FILTERS="RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter"
+NOVA_FILTERS="AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter"
 
 QEMU_CONF=/etc/libvirt/qemu.conf
 
 # Set default defaults here as some hypervisor drivers override these
 PUBLIC_INTERFACE_DEFAULT=br100
-FLAT_NETWORK_BRIDGE_DEFAULT=br100
 # Set ``GUEST_INTERFACE_DEFAULT`` to some interface on the box so that
 # the default isn't completely crazy. This will match ``eth*``, ``em*``, or
 # the new ``p*`` interfaces, then basically picks the first
@@ -137,44 +133,6 @@
     source $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER
 fi
 
-
-# Nova Network Configuration
-# --------------------------
-
-NETWORK_MANAGER=${NETWORK_MANAGER:-${NET_MAN:-FlatDHCPManager}}
-
-VLAN_INTERFACE=${VLAN_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
-FLAT_NETWORK_BRIDGE=${FLAT_NETWORK_BRIDGE:-$FLAT_NETWORK_BRIDGE_DEFAULT}
-
-# If you are using the FlatDHCP network mode on multiple hosts, set the
-# ``FLAT_INTERFACE`` variable but make sure that the interface doesn't already
-# have an IP or you risk breaking things.
-#
-# **DHCP Warning**:  If your flat interface device uses DHCP, there will be a
-# hiccup while the network is moved from the flat interface to the flat network
-# bridge.  This will happen when you launch your first instance.  Upon launch
-# you will lose all connectivity to the node, and the VM launch will probably
-# fail.
-#
-# If you are running on a single node and don't need to access the VMs from
-# devices other than that node, you can set ``FLAT_INTERFACE=``
-# This will stop nova from bridging any interfaces into ``FLAT_NETWORK_BRIDGE``.
-FLAT_INTERFACE=${FLAT_INTERFACE:-$GUEST_INTERFACE_DEFAULT}
-
-# ``MULTI_HOST`` is a mode where each compute node runs its own network node.  This
-# allows network operations and routing for a VM to occur on the server that is
-# running the VM - removing a SPOF and bandwidth bottleneck.
-MULTI_HOST=$(trueorfalse False MULTI_HOST)
-
-# ``NOVA_ALLOW_MOVE_TO_SAME_HOST`` can be set to False in multi node DevStack,
-# where there are at least two nova-computes.
-NOVA_ALLOW_MOVE_TO_SAME_HOST=$(trueorfalse True NOVA_ALLOW_MOVE_TO_SAME_HOST)
-
-# Test floating pool and range are used for testing.  They are defined
-# here until the admin APIs can replace nova-manage
-TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test}
-TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29}
-
 # Other Nova configurations
 # ----------------------------
 
@@ -183,6 +141,23 @@
 # and Glance.
 NOVA_USE_SERVICE_TOKEN=$(trueorfalse False NOVA_USE_SERVICE_TOKEN)
 
+# ``NOVA_ALLOW_MOVE_TO_SAME_HOST`` can be set to False in multi node DevStack,
+# where there are at least two nova-computes.
+NOVA_ALLOW_MOVE_TO_SAME_HOST=$(trueorfalse True NOVA_ALLOW_MOVE_TO_SAME_HOST)
+
+# Enable debugging levels for iscsid service (goes from 0-8)
+ISCSID_DEBUG=$(trueorfalse False ISCSID_DEBUG)
+ISCSID_DEBUG_LEVEL=${ISCSID_DEBUG_LEVEL:-4}
+
+# Format for notifications. Nova defaults to "unversioned" since Train.
+# Other options include "versioned" and "both".
+NOVA_NOTIFICATION_FORMAT=${NOVA_NOTIFICATION_FORMAT:-unversioned}
+
+# Timeout for servers to gracefully shutdown the OS during operations
+# like shelve, rescue, stop, rebuild. Defaults to 0 since the default
+# image in devstack is CirrOS.
+NOVA_SHUTDOWN_TIMEOUT=${NOVA_SHUTDOWN_TIMEOUT:-0}
+
 # Functions
 # ---------
 
@@ -194,13 +169,6 @@
     return 1
 }
 
-# Test if any Nova Cell services are enabled
-# is_nova_enabled
-function is_n-cell_enabled {
-    [[ ,${ENABLED_SERVICES} =~ ,"n-cell" ]] && return 0
-    return 1
-}
-
 # is_nova_console_proxy_compute_tls_enabled() - Test if the Nova Console Proxy
 # service has TLS enabled
 function is_nova_console_proxy_compute_tls_enabled {
@@ -251,7 +219,7 @@
         sudo rm -rf $NOVA_INSTANCES_PATH/*
     fi
 
-    sudo rm -rf $NOVA_STATE_PATH $NOVA_AUTH_CACHE_DIR
+    sudo rm -rf $NOVA_STATE_PATH
 
     # NOTE(dtroyer): This really should be called from here but due to the way
     #                nova abuses the _cleanup() function we're moving it
@@ -327,10 +295,32 @@
                 sudo chown -R $STACK_USER $NOVA_INSTANCES_PATH
             fi
         fi
-        if is_suse; then
-            # iscsid is not started by default
-            start_service iscsid
+
+        if is_fedora && [[ $DISTRO =~ f[0-9][0-9] ]]; then
+            # There is an iscsi-initiator bug where it inserts
+            # different whitespace that causes a bunch of output
+            # matching to fail.  We have not been able to get
+            # fixed, yet :/  Exists in fedora 29 & 30 at least
+            #  https://bugzilla.redhat.com/show_bug.cgi?id=1676365
+            sudo dnf copr enable -y iwienand/iscsi-initiator-utils
+            sudo dnf update -y
         fi
+
+        if [[ ${ISCSID_DEBUG} == "True" ]]; then
+            # Install an override that starts iscsid with debugging
+            # enabled.
+            cat > /tmp/iscsid.override <<EOF
+[Service]
+ExecStart=
+ExecStart=/usr/sbin/iscsid -d${ISCSID_DEBUG_LEVEL}
+EOF
+            sudo mkdir -p /etc/systemd/system/iscsid.service.d
+            sudo mv /tmp/iscsid.override /etc/systemd/system/iscsid.service.d/override.conf
+            sudo systemctl daemon-reload
+        fi
+
+        # ensure that iscsid is started, even when disabled by default
+        restart_service iscsid
     fi
 
     # Rebuild the config file from scratch
@@ -387,7 +377,7 @@
     fi
 
     # S3
-    if is_service_enabled swift3; then
+    if is_service_enabled s3api; then
         get_or_create_service "s3" "s3" "S3"
         get_or_create_endpoint \
             "s3" \
@@ -412,20 +402,20 @@
     iniset $NOVA_CONF wsgi api_paste_config "$NOVA_API_PASTE_INI"
     iniset $NOVA_CONF DEFAULT rootwrap_config "$NOVA_CONF_DIR/rootwrap.conf"
     iniset $NOVA_CONF scheduler driver "$SCHEDULER"
-    iniset $NOVA_CONF filter_scheduler enabled_filters "$FILTERS"
+    iniset $NOVA_CONF filter_scheduler enabled_filters "$NOVA_FILTERS"
     if [[ $SCHEDULER == "filter_scheduler" ]]; then
         iniset $NOVA_CONF scheduler workers "$API_WORKERS"
     fi
-    iniset $NOVA_CONF DEFAULT default_floating_pool "$PUBLIC_NETWORK_NAME"
+    iniset $NOVA_CONF neutron default_floating_pool "$PUBLIC_NETWORK_NAME"
     if [[ $SERVICE_IP_VERSION == 6 ]]; then
         iniset $NOVA_CONF DEFAULT my_ip "$HOST_IPV6"
-        iniset $NOVA_CONF DEFAULT use_ipv6 "True"
     else
         iniset $NOVA_CONF DEFAULT my_ip "$HOST_IP"
     fi
     iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x"
     iniset $NOVA_CONF DEFAULT osapi_compute_listen "$NOVA_SERVICE_LISTEN_ADDRESS"
     iniset $NOVA_CONF DEFAULT metadata_listen "$NOVA_SERVICE_LISTEN_ADDRESS"
+    iniset $NOVA_CONF DEFAULT shutdown_timeout $NOVA_SHUTDOWN_TIMEOUT
 
     iniset $NOVA_CONF key_manager backend nova.keymgr.conf_key_mgr.ConfKeyManager
 
@@ -435,8 +425,8 @@
         iniset $NOVA_CONF DEFAULT bindir "/usr/bin"
     fi
 
-    # only setup database connections if there are services that
-    # require them running on the host. The ensures that n-cpu doesn't
+    # only setup database connections and cache backend if there are services
+    # that require them running on the host. The ensures that n-cpu doesn't
     # leak a need to use the db in a multinode scenario.
     if is_service_enabled n-api n-cond n-sched; then
         # If we're in multi-tier cells mode, we want our control services pointing
@@ -453,6 +443,13 @@
 
         iniset $NOVA_CONF database connection `database_connection_url $db`
         iniset $NOVA_CONF api_database connection `database_connection_url nova_api`
+
+        # Cache related settings
+        # Those settings aren't really needed in n-cpu thus it is configured
+        # only on nodes which runs controller services
+        iniset $NOVA_CONF cache enabled $NOVA_ENABLE_CACHE
+        iniset $NOVA_CONF cache backend $CACHE_BACKEND
+        iniset $NOVA_CONF cache memcache_servers $MEMCACHE_SERVERS
     fi
 
     if is_service_enabled n-api; then
@@ -467,15 +464,11 @@
             iniset $NOVA_CONF DEFAULT osapi_compute_link_prefix $NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT
         fi
 
-        configure_auth_token_middleware $NOVA_CONF nova $NOVA_AUTH_CACHE_DIR
+        configure_keystone_authtoken_middleware $NOVA_CONF nova
     fi
 
     if is_service_enabled cinder; then
-        if is_service_enabled tls-proxy; then
-            CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST}
-            CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776}
-            iniset $NOVA_CONF cinder cafile $SSL_BUNDLE_FILE
-        fi
+        configure_cinder_access
     fi
 
     if [ -n "$NOVA_STATE_PATH" ]; then
@@ -485,10 +478,6 @@
     if [ -n "$NOVA_INSTANCES_PATH" ]; then
         iniset $NOVA_CONF DEFAULT instances_path "$NOVA_INSTANCES_PATH"
     fi
-    if [ "$MULTI_HOST" != "False" ]; then
-        iniset $NOVA_CONF DEFAULT multi_host "True"
-        iniset $NOVA_CONF DEFAULT send_arp_for_ha "True"
-    fi
     if [ "$SYSLOG" != "False" ]; then
         iniset $NOVA_CONF DEFAULT use_syslog "True"
     fi
@@ -519,6 +508,7 @@
     # enable notifications, but it will allow them to function when enabled.
     iniset $NOVA_CONF oslo_messaging_notifications driver "messagingv2"
     iniset $NOVA_CONF oslo_messaging_notifications transport_url $(get_notification_url)
+    iniset $NOVA_CONF notifications notification_format "$NOVA_NOTIFICATION_FORMAT"
     iniset_rpc_backend nova $NOVA_CONF
 
     iniset $NOVA_CONF DEFAULT osapi_compute_workers "$API_WORKERS"
@@ -526,8 +516,6 @@
     # don't let the conductor get out of control now that we're using a pure python db driver
     iniset $NOVA_CONF conductor workers "$API_WORKERS"
 
-    iniset $NOVA_CONF cinder os_region_name "$REGION_NAME"
-
     if is_service_enabled tls-proxy; then
         iniset $NOVA_CONF DEFAULT glance_protocol https
         iniset $NOVA_CONF oslo_middleware enable_proxy_headers_parsing True
@@ -535,21 +523,6 @@
 
     iniset $NOVA_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT"
 
-    # Setup logging for nova-dhcpbridge command line
-    sudo cp "$NOVA_CONF" "$NOVA_CONF_DIR/nova-dhcpbridge.conf"
-
-    if is_service_enabled n-net; then
-        local service="n-dhcp"
-        local logfile="${service}.log.${CURRENT_LOG_TIME}"
-        local real_logfile="${LOGDIR}/${logfile}"
-        if [[ -n ${LOGDIR} ]]; then
-            bash -c "cd '$LOGDIR' && ln -sf '$logfile' ${service}.log"
-            iniset "$NOVA_CONF_DIR/nova-dhcpbridge.conf" DEFAULT log_file "$real_logfile"
-        fi
-
-        iniset $NOVA_CONF DEFAULT dhcpbridge_flagfile "$NOVA_CONF_DIR/nova-dhcpbridge.conf"
-    fi
-
     if [ "$NOVA_USE_SERVICE_TOKEN" == "True" ]; then
         init_nova_service_user_conf
     fi
@@ -591,29 +564,94 @@
     else
         for i in $(seq 1 $NOVA_NUM_CELLS); do
             local conf
+            local offset
             conf=$(conductor_conf $i)
-            configure_console_proxies $conf
+            offset=$((i - 1))
+            configure_console_proxies $conf $offset
         done
     fi
 }
 
+# Configure access to placement from a nova service, usually
+# compute, but sometimes conductor.
+function configure_placement_nova_compute {
+    # Use the provided config file path or default to $NOVA_CONF.
+    local conf=${1:-$NOVA_CONF}
+    iniset $conf placement auth_type "password"
+    iniset $conf placement auth_url "$KEYSTONE_SERVICE_URI"
+    iniset $conf placement username placement
+    iniset $conf placement password "$SERVICE_PASSWORD"
+    iniset $conf placement user_domain_name "$SERVICE_DOMAIN_NAME"
+    iniset $conf placement project_name "$SERVICE_TENANT_NAME"
+    iniset $conf placement project_domain_name "$SERVICE_DOMAIN_NAME"
+    iniset $conf placement region_name "$REGION_NAME"
+}
+
+# Configure access to cinder.
+function configure_cinder_access {
+    iniset $NOVA_CONF cinder os_region_name "$REGION_NAME"
+    iniset $NOVA_CONF cinder auth_type "password"
+    iniset $NOVA_CONF cinder auth_url "$KEYSTONE_SERVICE_URI"
+    # NOTE(mriedem): This looks a bit weird but we use the nova user here
+    # since it has the admin role and the cinder user does not. This is
+    # similar to using the nova user in init_nova_service_user_conf. We need
+    # to use a user with the admin role for background tasks in nova to
+    # be able to GET block-storage API resources owned by another project
+    # since cinder has low-level "is_admin" checks in its DB API.
+    iniset $NOVA_CONF cinder username nova
+    iniset $NOVA_CONF cinder password "$SERVICE_PASSWORD"
+    iniset $NOVA_CONF cinder user_domain_name "$SERVICE_DOMAIN_NAME"
+    iniset $NOVA_CONF cinder project_name "$SERVICE_TENANT_NAME"
+    iniset $NOVA_CONF cinder project_domain_name "$SERVICE_DOMAIN_NAME"
+    if is_service_enabled tls-proxy; then
+        CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST}
+        CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776}
+        iniset $NOVA_CONF cinder cafile $SSL_BUNDLE_FILE
+    fi
+}
+
 function configure_console_compute {
+    # If we are running multiple cells (and thus multiple console proxies) on a
+    # single host, we offset the ports to avoid collisions.  We need to
+    # correspondingly configure the console proxy port for nova-compute and we
+    # can use the NOVA_CPU_CELL variable to know which cell we are for
+    # calculating the offset.
+    # Stagger the offset based on the total number of possible console proxies
+    # (novnc, xvpvnc, spice, serial) so that their ports will not collide if
+    # all are enabled.
+    local offset
+    offset=$(((NOVA_CPU_CELL - 1) * 4))
+
+    # Use the host IP instead of the service host because for multi-node, the
+    # service host will be the controller only.
+    local default_proxyclient_addr
+    default_proxyclient_addr=$(iniget $NOVA_CPU_CONF DEFAULT my_ip)
+
     # All nova-compute workers need to know the vnc configuration options
     # These settings don't hurt anything if n-xvnc and n-novnc are disabled
     if is_service_enabled n-cpu; then
-        NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"}
+        if [ "$NOVNC_FROM_PACKAGE" == "True" ]; then
+            # Use the old URL when installing novnc packages.
+            NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:$((6080 + offset))/vnc_auto.html"}
+        elif vercmp ${NOVNC_BRANCH} "<" "1.0.0"; then
+            # Use the old URL when installing older novnc source.
+            NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:$((6080 + offset))/vnc_auto.html"}
+        else
+            # Use the new URL when building >=v1.0.0 from source.
+            NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:$((6080 + offset))/vnc_lite.html"}
+        fi
         iniset $NOVA_CPU_CONF vnc novncproxy_base_url "$NOVNCPROXY_URL"
-        XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"}
+        XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:$((6081 + offset))/console"}
         iniset $NOVA_CPU_CONF vnc xvpvncproxy_base_url "$XVPVNCPROXY_URL"
-        SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:6082/spice_auto.html"}
+        SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:$((6082 + offset))/spice_auto.html"}
         iniset $NOVA_CPU_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL"
     fi
 
     if is_service_enabled n-novnc || is_service_enabled n-xvnc || [ "$NOVA_VNC_ENABLED" != False ]; then
         # Address on which instance vncservers will listen on compute hosts.
         # For multi-host, this should be the management ip of the compute host.
-        VNCSERVER_LISTEN=${VNCSERVER_LISTEN=$NOVA_SERVICE_LOCAL_HOST}
-        VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=$NOVA_SERVICE_LOCAL_HOST}
+        VNCSERVER_LISTEN=${VNCSERVER_LISTEN:-$NOVA_SERVICE_LISTEN_ADDRESS}
+        VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS:-$default_proxyclient_addr}
         iniset $NOVA_CPU_CONF vnc server_listen "$VNCSERVER_LISTEN"
         iniset $NOVA_CPU_CONF vnc server_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS"
     else
@@ -623,8 +661,8 @@
     if is_service_enabled n-spice; then
         # Address on which instance spiceservers will listen on compute hosts.
         # For multi-host, this should be the management ip of the compute host.
-        SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=$NOVA_SERVICE_LOCAL_HOST}
-        SPICESERVER_LISTEN=${SPICESERVER_LISTEN=$NOVA_SERVICE_LOCAL_HOST}
+        SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS:-$default_proxyclient_addr}
+        SPICESERVER_LISTEN=${SPICESERVER_LISTEN:-$NOVA_SERVICE_LISTEN_ADDRESS}
         iniset $NOVA_CPU_CONF spice enabled true
         iniset $NOVA_CPU_CONF spice server_listen "$SPICESERVER_LISTEN"
         iniset $NOVA_CPU_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS"
@@ -632,16 +670,24 @@
 
     if is_service_enabled n-sproxy; then
         iniset $NOVA_CPU_CONF serial_console enabled True
+        iniset $NOVA_CPU_CONF serial_console base_url "ws://$SERVICE_HOST:$((6083 + offset))/"
     fi
 }
 
 function configure_console_proxies {
     # Use the provided config file path or default to $NOVA_CONF.
     local conf=${1:-$NOVA_CONF}
+    local offset=${2:-0}
+    # Stagger the offset based on the total number of possible console proxies
+    # (novnc, xvpvnc, spice, serial) so that their ports will not collide if
+    # all are enabled.
+    offset=$((offset * 4))
 
     if is_service_enabled n-novnc || is_service_enabled n-xvnc || [ "$NOVA_VNC_ENABLED" != False ]; then
         iniset $conf vnc novncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
+        iniset $conf vnc novncproxy_port $((6080 + offset))
         iniset $conf vnc xvpvncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
+        iniset $conf vnc xvpvncproxy_port $((6081 + offset))
 
         if is_nova_console_proxy_compute_tls_enabled ; then
             iniset $conf vnc auth_schemes "vencrypt"
@@ -652,15 +698,33 @@
             sudo mkdir -p /etc/pki/nova-novnc
             deploy_int_CA /etc/pki/nova-novnc/ca-cert.pem
             deploy_int_cert /etc/pki/nova-novnc/client-cert.pem /etc/pki/nova-novnc/client-key.pem
+            # OpenSSL 1.1.0 generates the key file with permissions: 600, by
+            # default, and the deploy_int* methods use 'sudo cp' to copy the
+            # files, making them owned by root:root.
+            # Change ownership of everything under /etc/pki/nova-novnc to
+            # $STACK_USER:$(id -g ${STACK_USER}) so that $STACK_USER can read
+            # the key file.
+            sudo chown -R $STACK_USER:$(id -g ${STACK_USER}) /etc/pki/nova-novnc
+            # This is needed to enable TLS in the proxy itself, example log:
+            # WebSocket server settings:
+            #   - Listen on 0.0.0.0:6080
+            #   - Flash security policy server
+            #   - Web server (no directory listings). Web root: /usr/share/novnc
+            #   - SSL/TLS support
+            #   - proxying from 0.0.0.0:6080 to None:None
+            iniset $conf DEFAULT key "/etc/pki/nova-novnc/client-key.pem"
+            iniset $conf DEFAULT cert "/etc/pki/nova-novnc/client-cert.pem"
         fi
     fi
 
     if is_service_enabled n-spice; then
         iniset $conf spice html5proxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
+        iniset $conf spice html5proxy_port $((6082 + offset))
     fi
 
     if is_service_enabled n-sproxy; then
         iniset $conf serial_console serialproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS"
+        iniset $conf serial_console serialproxy_port $((6083 + offset))
     fi
 }
 
@@ -681,61 +745,6 @@
     echo "${NOVA_CONF_DIR}/nova_cell${cell}.conf"
 }
 
-function init_nova_cells {
-    if is_service_enabled n-cell; then
-        cp $NOVA_CONF $NOVA_CELLS_CONF
-        iniset $NOVA_CELLS_CONF database connection `database_connection_url $NOVA_CELLS_DB`
-        rpc_backend_add_vhost child_cell
-        iniset_rpc_backend nova $NOVA_CELLS_CONF DEFAULT child_cell
-        iniset $NOVA_CELLS_CONF DEFAULT dhcpbridge_flagfile $NOVA_CELLS_CONF
-        iniset $NOVA_CELLS_CONF cells enable True
-        iniset $NOVA_CELLS_CONF cells cell_type compute
-        iniset $NOVA_CELLS_CONF cells name child
-
-        iniset $NOVA_CONF cells enable True
-        iniset $NOVA_CONF cells cell_type api
-        iniset $NOVA_CONF cells name region
-
-        if is_service_enabled n-api-meta; then
-            NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/,metadata//")
-            iniset $NOVA_CONF DEFAULT enabled_apis $NOVA_ENABLED_APIS
-            iniset $NOVA_CELLS_CONF DEFAULT enabled_apis metadata
-        fi
-
-        # Cells v1 conductor should be the nova-cells.conf
-        NOVA_COND_CONF=$NOVA_CELLS_CONF
-
-        time_start "dbsync"
-        $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CELLS_CONF db sync
-        time_stop "dbsync"
-        $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CELLS_CONF cell create --name=region --cell_type=parent --username=$RABBIT_USERID --hostname=$RABBIT_HOST --port=5672 --password=$RABBIT_PASSWORD --virtual_host=/ --woffset=0 --wscale=1
-        $NOVA_BIN_DIR/nova-manage cell create --name=child --cell_type=child --username=$RABBIT_USERID --hostname=$RABBIT_HOST --port=5672 --password=$RABBIT_PASSWORD --virtual_host=child_cell --woffset=0 --wscale=1
-
-        # Creates the single cells v2 cell for the child cell (v1) nova db.
-        $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CELLS_CONF cell_v2 create_cell \
-            --transport-url $(get_transport_url child_cell) --name 'cell1'
-    fi
-}
-
-# create_nova_cache_dir() - Part of the init_nova() process
-function create_nova_cache_dir {
-    # Create cache dir
-    sudo install -d -o $STACK_USER $NOVA_AUTH_CACHE_DIR
-    rm -f $NOVA_AUTH_CACHE_DIR/*
-}
-
-function create_nova_conf_nova_network {
-    local public_interface=${PUBLIC_INTERFACE:-$PUBLIC_INTERFACE_DEFAULT}
-    iniset $NOVA_CONF DEFAULT network_manager "nova.network.manager.$NETWORK_MANAGER"
-    iniset $NOVA_CONF DEFAULT public_interface "$public_interface"
-    iniset $NOVA_CONF DEFAULT vlan_interface "$VLAN_INTERFACE"
-    iniset $NOVA_CONF DEFAULT flat_network_bridge "$FLAT_NETWORK_BRIDGE"
-    if [ -n "$FLAT_INTERFACE" ]; then
-        iniset $NOVA_CONF DEFAULT flat_interface "$FLAT_INTERFACE"
-    fi
-    iniset $NOVA_CONF DEFAULT use_neutron False
-}
-
 # create_nova_keys_dir() - Part of the init_nova() process
 function create_nova_keys_dir {
     # Create keys dir
@@ -767,10 +776,6 @@
         # Migrate nova and nova_cell0 databases.
         $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db sync
 
-        if is_service_enabled n-cell; then
-            recreate_database $NOVA_CELLS_DB
-        fi
-
         # Run online migrations on the new databases
         # Needed for flavor conversion
         $NOVA_BIN_DIR/nova-manage --config-file $NOVA_CONF db online_data_migrations
@@ -781,7 +786,6 @@
         done
     fi
 
-    create_nova_cache_dir
     create_nova_keys_dir
 
     if [[ "$NOVA_BACKEND" == "LVM" ]]; then
@@ -875,15 +879,6 @@
     export PATH=$old_path
 }
 
-# Detect and setup conditions under which singleconductor setup is
-# needed. Notably cellsv1.
-function _set_singleconductor {
-    # NOTE(danms): Don't setup conductor fleet for cellsv1
-    if is_service_enabled n-cell; then
-        CELLSV2_SETUP="singleconductor"
-    fi
-}
-
 
 # start_nova_compute() - Start the compute process
 function start_nova_compute {
@@ -891,31 +886,31 @@
     local old_path=$PATH
     export PATH=$NOVA_BIN_DIR:$PATH
 
-    if is_service_enabled n-cell; then
-        local compute_cell_conf=$NOVA_CELLS_CONF
-    else
-        local compute_cell_conf=$NOVA_CONF
-    fi
+    local compute_cell_conf=$NOVA_CONF
+
+    # Bug #1802143: $NOVA_CPU_CONF is constructed by first copying $NOVA_CONF...
+    cp $compute_cell_conf $NOVA_CPU_CONF
+    # ...and then adding/overriding anything explicitly set in $NOVA_CPU_CONF
+    merge_config_file $TOP_DIR/local.conf post-config '$NOVA_CPU_CONF'
 
     if [[ "${CELLSV2_SETUP}" == "singleconductor" ]]; then
         # NOTE(danms): Grenade doesn't setup multi-cell rabbit, so
         # skip these bits and use the normal config.
-        NOVA_CPU_CONF=$compute_cell_conf
         echo "Skipping multi-cell conductor fleet setup"
     else
         # "${CELLSV2_SETUP}" is "superconductor"
-        cp $compute_cell_conf $NOVA_CPU_CONF
         # FIXME(danms): Should this be configurable?
         iniset $NOVA_CPU_CONF workarounds disable_group_policy_check_upcall True
         # Since the nova-compute service cannot reach nova-scheduler over
         # RPC, we also disable track_instance_changes.
         iniset $NOVA_CPU_CONF filter_scheduler track_instance_changes False
         iniset_rpc_backend nova $NOVA_CPU_CONF DEFAULT "nova_cell${NOVA_CPU_CELL}"
-        # Make sure we nuke any database config
-        inidelete $NOVA_CPU_CONF database connection
-        inidelete $NOVA_CPU_CONF api_database connection
     fi
 
+    # Make sure we nuke any database config
+    inidelete $NOVA_CPU_CONF database connection
+    inidelete $NOVA_CPU_CONF api_database connection
+
     # Console proxies were configured earlier in create_nova_conf. Now that the
     # nova-cpu.conf has been created, configure the console settings required
     # by the compute process.
@@ -957,22 +952,7 @@
     export PATH=$NOVA_BIN_DIR:$PATH
 
     local api_cell_conf=$NOVA_CONF
-    if is_service_enabled n-cell; then
-        local compute_cell_conf=$NOVA_CELLS_CONF
-    else
-        local compute_cell_conf=$NOVA_CONF
-    fi
-
-    # ``run_process`` checks ``is_service_enabled``, it is not needed here
-    run_process n-cell-region "$NOVA_BIN_DIR/nova-cells --config-file $api_cell_conf"
-    run_process n-cell-child "$NOVA_BIN_DIR/nova-cells --config-file $compute_cell_conf"
-
-    if is_service_enabled n-net; then
-        if ! running_in_container; then
-            enable_kernel_bridge_firewall
-        fi
-    fi
-    run_process n-net "$NOVA_BIN_DIR/nova-network --config-file $compute_cell_conf"
+    local compute_cell_conf=$NOVA_CONF
 
     run_process n-sch "$NOVA_BIN_DIR/nova-scheduler --config-file $compute_cell_conf"
     if [ "$NOVA_USE_MOD_WSGI" == "False" ]; then
@@ -981,9 +961,6 @@
         run_process n-api-meta "$NOVA_BIN_DIR/uwsgi --procname-prefix nova-api-meta --ini $NOVA_METADATA_UWSGI_CONF"
     fi
 
-    # nova-consoleauth always runs globally
-    run_process n-cauth "$NOVA_BIN_DIR/nova-consoleauth --config-file $api_cell_conf"
-
     export PATH=$old_path
 }
 
@@ -1061,11 +1038,7 @@
     # happen between here and the script ending. However, in multinode
     # tests this can very often not be the case. So ensure that the
     # compute is up before we move on.
-    if is_service_enabled n-cell; then
-        # cells v1 can't complete the check below because it munges
-        # hostnames with cell information (grumble grumble).
-        return
-    fi
+
     # TODO(sdague): honestly, this probably should be a plug point for
     # an external system.
     if [[ "$VIRT_DRIVER" == 'xenserver' ]]; then
@@ -1077,8 +1050,6 @@
 }
 
 function start_nova {
-    # this catches the cells v1 case early
-    _set_singleconductor
     start_nova_rest
     start_nova_console_proxies
     start_nova_conductor
@@ -1106,7 +1077,7 @@
 
 function stop_nova_rest {
     # Kill the non-compute nova processes
-    for serv in n-api n-api-meta n-net n-sch n-cauth n-cell n-cell; do
+    for serv in n-api n-api-meta n-sch; do
         stop_process $serv
     done
 }
@@ -1153,19 +1124,19 @@
     if is_service_enabled n-api; then
         if ! openstack --os-region-name="$REGION_NAME" flavor list | grep -q ds512M; then
             # Note that danms hates these flavors and apologizes for sdague
-            openstack --os-region-name="$REGION_NAME" flavor create --id c1 --ram 256 --disk 0 --vcpus 1 cirros256
-            openstack --os-region-name="$REGION_NAME" flavor create --id d1 --ram 512 --disk 5 --vcpus 1 ds512M
-            openstack --os-region-name="$REGION_NAME" flavor create --id d2 --ram 1024 --disk 10 --vcpus 1 ds1G
-            openstack --os-region-name="$REGION_NAME" flavor create --id d3 --ram 2048 --disk 10 --vcpus 2 ds2G
-            openstack --os-region-name="$REGION_NAME" flavor create --id d4 --ram 4096 --disk 20 --vcpus 4 ds4G
+            openstack --os-region-name="$REGION_NAME" flavor create --id c1 --ram 256 --disk 1 --vcpus 1 --property hw_rng:allowed=True cirros256
+            openstack --os-region-name="$REGION_NAME" flavor create --id d1 --ram 512 --disk 5 --vcpus 1 --property hw_rng:allowed=True ds512M
+            openstack --os-region-name="$REGION_NAME" flavor create --id d2 --ram 1024 --disk 10 --vcpus 1 --property hw_rng:allowed=True ds1G
+            openstack --os-region-name="$REGION_NAME" flavor create --id d3 --ram 2048 --disk 10 --vcpus 2 --property hw_rng:allowed=True ds2G
+            openstack --os-region-name="$REGION_NAME" flavor create --id d4 --ram 4096 --disk 20 --vcpus 4 --property hw_rng:allowed=True ds4G
         fi
 
         if ! openstack --os-region-name="$REGION_NAME" flavor list | grep -q m1.tiny; then
-            openstack --os-region-name="$REGION_NAME" flavor create --id 1 --ram 512 --disk 1 --vcpus 1 m1.tiny
-            openstack --os-region-name="$REGION_NAME" flavor create --id 2 --ram 2048 --disk 20 --vcpus 1 m1.small
-            openstack --os-region-name="$REGION_NAME" flavor create --id 3 --ram 4096 --disk 40 --vcpus 2 m1.medium
-            openstack --os-region-name="$REGION_NAME" flavor create --id 4 --ram 8192 --disk 80 --vcpus 4 m1.large
-            openstack --os-region-name="$REGION_NAME" flavor create --id 5 --ram 16384 --disk 160 --vcpus 8 m1.xlarge
+            openstack --os-region-name="$REGION_NAME" flavor create --id 1 --ram 512 --disk 1 --vcpus 1 --property hw_rng:allowed=True m1.tiny
+            openstack --os-region-name="$REGION_NAME" flavor create --id 2 --ram 2048 --disk 20 --vcpus 1 --property hw_rng:allowed=True m1.small
+            openstack --os-region-name="$REGION_NAME" flavor create --id 3 --ram 4096 --disk 40 --vcpus 2 --property hw_rng:allowed=True m1.medium
+            openstack --os-region-name="$REGION_NAME" flavor create --id 4 --ram 8192 --disk 80 --vcpus 4 --property hw_rng:allowed=True m1.large
+            openstack --os-region-name="$REGION_NAME" flavor create --id 5 --ram 16384 --disk 160 --vcpus 8 --property hw_rng:allowed=True m1.xlarge
         fi
     fi
 }
diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt
index fcb4777..3566639 100644
--- a/lib/nova_plugins/functions-libvirt
+++ b/lib/nova_plugins/functions-libvirt
@@ -24,17 +24,10 @@
 # Currently fairly specific to OpenStackCI hosts
 DEBUG_LIBVIRT_COREDUMPS=$(trueorfalse False DEBUG_LIBVIRT_COREDUMPS)
 
-# Only Xenial is left with libvirt-bin.  Everywhere else is libvirtd
-if is_ubuntu && [ ${DISTRO} == "xenial" ]; then
-    LIBVIRT_DAEMON=libvirt-bin
-else
-    LIBVIRT_DAEMON=libvirtd
-fi
-
 # Enable coredumps for libvirt
 #  Bug: https://bugs.launchpad.net/nova/+bug/1643911
 function _enable_coredump {
-    local confdir=/etc/systemd/system/${LIBVIRT_DAEMON}.service.d
+    local confdir=/etc/systemd/system/libvirtd.service.d
     local conffile=${confdir}/coredump.conf
 
     # Create a coredump directory, and instruct the kernel to save to
@@ -61,12 +54,7 @@
 function install_libvirt {
 
     if is_ubuntu; then
-        install_package qemu-system
-        if [[ ${DISTRO} == "xenial" ]]; then
-            install_package libvirt-bin libvirt-dev
-        else
-            install_package libvirt-clients libvirt-daemon-system libvirt-dev
-        fi
+        install_package qemu-system libvirt-clients libvirt-daemon-system libvirt-dev
         # uninstall in case the libvirt version changed
         pip_uninstall libvirt-python
         pip_install_gr libvirt-python
@@ -150,20 +138,24 @@
     fi
 
     if is_nova_console_proxy_compute_tls_enabled ; then
-        if is_service_enabled n-novnc ; then
-            echo "vnc_tls = 1" | sudo tee -a $QEMU_CONF
-            echo "vnc_tls_x509_verify = 1" | sudo tee -a $QEMU_CONF
+        echo "vnc_tls = 1" | sudo tee -a $QEMU_CONF
+        echo "vnc_tls_x509_verify = 1" | sudo tee -a $QEMU_CONF
 
-            sudo mkdir -p /etc/pki/libvirt-vnc
-            sudo chown libvirt-qemu:libvirt-qemu /etc/pki/libvirt-vnc
-            deploy_int_CA /etc/pki/libvirt-vnc/ca-cert.pem
-            deploy_int_cert /etc/pki/libvirt-vnc/server-cert.pem /etc/pki/libvirt-vnc/server-key.pem
-        fi
+        sudo mkdir -p /etc/pki/libvirt-vnc
+        deploy_int_CA /etc/pki/libvirt-vnc/ca-cert.pem
+        deploy_int_cert /etc/pki/libvirt-vnc/server-cert.pem /etc/pki/libvirt-vnc/server-key.pem
+        # OpenSSL 1.1.0 generates the key file with permissions: 600, by
+        # default and the deploy_int* methods use 'sudo cp' to copy the
+        # files, making them owned by root:root.
+        # Change ownership of everything under /etc/pki/libvirt-vnc to
+        # libvirt-qemu:libvirt-qemu so that libvirt-qemu can read the key
+        # file.
+        sudo chown -R libvirt-qemu:libvirt-qemu /etc/pki/libvirt-vnc
     fi
 
     # Service needs to be started on redhat/fedora -- do a restart for
     # sanity after fiddling the config.
-    restart_service $LIBVIRT_DAEMON
+    restart_service libvirtd
 
     # Restart virtlogd companion service to ensure it is running properly
     #  https://bugs.launchpad.net/ubuntu/+source/libvirt/+bug/1577455
diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic
index 49110a8..113e2a7 100644
--- a/lib/nova_plugins/hypervisor-ironic
+++ b/lib/nova_plugins/hypervisor-ironic
@@ -36,11 +36,11 @@
 
 # configure_nova_hypervisor - Set config files, create data dirs, etc
 function configure_nova_hypervisor {
-    configure_libvirt
-    LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.firewall.NoopFirewallDriver"}
+    if ! is_ironic_hardware; then
+        configure_libvirt
+    fi
 
     iniset $NOVA_CONF DEFAULT compute_driver ironic.IronicDriver
-    iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER
 
     # ironic section
     iniset $NOVA_CONF ironic auth_type password
@@ -50,9 +50,16 @@
     iniset $NOVA_CONF ironic project_domain_id default
     iniset $NOVA_CONF ironic user_domain_id default
     iniset $NOVA_CONF ironic project_name demo
+    iniset $NOVA_CONF ironic region_name $REGION_NAME
 
+    # These are used with crufty legacy ironicclient
     iniset $NOVA_CONF ironic api_max_retries 300
     iniset $NOVA_CONF ironic api_retry_interval 5
+    # These are used with shiny new openstacksdk
+    iniset $NOVA_CONF ironic connect_retries 300
+    iniset $NOVA_CONF ironic connect_retry_delay 5
+    iniset $NOVA_CONF ironic status_code_retries 300
+    iniset $NOVA_CONF ironic status_code_retry_delay 5
 }
 
 # install_nova_hypervisor() - Install external components
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index 3d676b9..7d3ace8 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -45,8 +45,6 @@
     iniset $NOVA_CONF libvirt live_migration_uri "qemu+ssh://$STACK_USER@%s/system"
     iniset $NOVA_CONF DEFAULT default_ephemeral_format "ext4"
     iniset $NOVA_CONF DEFAULT compute_driver "libvirt.LibvirtDriver"
-    LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"}
-    iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER"
     # Power architecture currently does not support graphical consoles.
     if is_arch "ppc64"; then
         iniset $NOVA_CONF vnc enabled "false"
diff --git a/lib/nova_plugins/hypervisor-openvz b/lib/nova_plugins/hypervisor-openvz
index 58ab5c1..57dc45c 100644
--- a/lib/nova_plugins/hypervisor-openvz
+++ b/lib/nova_plugins/hypervisor-openvz
@@ -38,8 +38,6 @@
 function configure_nova_hypervisor {
     iniset $NOVA_CONF DEFAULT compute_driver "openvz.OpenVzDriver"
     iniset $NOVA_CONF DEFAULT connection_type "openvz"
-    LIBVIRT_FIREWALL_DRIVER=${LIBVIRT_FIREWALL_DRIVER:-"nova.virt.libvirt.firewall.IptablesFirewallDriver"}
-    iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER"
 }
 
 # install_nova_hypervisor() - Install external components
diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver
index 6f79e4f..511ec1b 100644
--- a/lib/nova_plugins/hypervisor-xenserver
+++ b/lib/nova_plugins/hypervisor-xenserver
@@ -24,9 +24,6 @@
 # Defaults
 # --------
 
-# Allow ``build_domU.sh`` to specify the flat network bridge via kernel args
-FLAT_NETWORK_BRIDGE_DEFAULT=$(sed -e 's/.* flat_network_bridge=\([[:alnum:]]*\).*$/\1/g' /proc/cmdline)
-
 VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=169.254.0.1}
 
 
@@ -59,15 +56,11 @@
         die $LINENO "os-xenapi plugin is not specified. Please enable this plugin in local.conf"
     fi
 
-    read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN."
     iniset $NOVA_CONF DEFAULT compute_driver "xenapi.XenAPIDriver"
     iniset $NOVA_CONF xenserver connection_url "$XENAPI_CONNECTION_URL"
     iniset $NOVA_CONF xenserver connection_username "$XENAPI_USER"
     iniset $NOVA_CONF xenserver connection_password "$XENAPI_PASSWORD"
     iniset $NOVA_CONF DEFAULT flat_injected "False"
-    # Need to avoid crash due to new firewall support
-    XEN_FIREWALL_DRIVER=${XEN_FIREWALL_DRIVER:-"nova.virt.firewall.IptablesFirewallDriver"}
-    iniset $NOVA_CONF DEFAULT firewall_driver "$XEN_FIREWALL_DRIVER"
 
     local dom0_ip
     dom0_ip=$(echo "$XENAPI_CONNECTION_URL" | cut -d "/" -f 3-)
diff --git a/lib/placement b/lib/placement
index a1602ba..785b0dd 100644
--- a/lib/placement
+++ b/lib/placement
@@ -3,9 +3,6 @@
 # lib/placement
 # Functions to control the configuration and operation of the **Placement** service
 #
-# Currently the placement service is embedded in nova. Eventually we
-# expect this to change so this file is started as a separate entity
-# despite making use of some *NOVA* variables and files.
 
 # Dependencies:
 #
@@ -29,23 +26,20 @@
 # Defaults
 # --------
 
-PLACEMENT_CONF_DIR=/etc/nova
-PLACEMENT_CONF=$PLACEMENT_CONF_DIR/nova.conf
-PLACEMENT_AUTH_STRATEGY=${PLACEMENT_AUTH_STRATEGY:-placement}
-# Nova virtual environment
+PLACEMENT_DIR=$DEST/placement
+PLACEMENT_CONF_DIR=/etc/placement
+PLACEMENT_CONF=$PLACEMENT_CONF_DIR/placement.conf
+PLACEMENT_AUTH_STRATEGY=${PLACEMENT_AUTH_STRATEGY:-keystone}
+# Placement virtual environment
 if [[ ${USE_VENV} = True ]]; then
-    PROJECT_VENV["nova"]=${NOVA_DIR}.venv
-    PLACEMENT_BIN_DIR=${PROJECT_VENV["nova"]}/bin
+    PROJECT_VENV["placement"]=${PLACEMENT_DIR}.venv
+    PLACEMENT_BIN_DIR=${PROJECT_VENV["placement"]}/bin
 else
     PLACEMENT_BIN_DIR=$(get_python_exec_prefix)
 fi
-PLACEMENT_UWSGI=$PLACEMENT_BIN_DIR/nova-placement-api
+PLACEMENT_UWSGI=$PLACEMENT_BIN_DIR/placement-api
 PLACEMENT_UWSGI_CONF=$PLACEMENT_CONF_DIR/placement-uwsgi.ini
 
-# The placement service can optionally use a separate database
-# connection. Set PLACEMENT_DB_ENABLED to True to use it.
-PLACEMENT_DB_ENABLED=$(trueorfalse False PLACEMENT_DB_ENABLED)
-
 if is_service_enabled tls-proxy; then
     PLACEMENT_SERVICE_PROTOCOL="https"
 fi
@@ -67,7 +61,6 @@
 # cleanup_placement() - Remove residual data files, anything left over from previous
 # runs that a clean run would need to clean up
 function cleanup_placement {
-    sudo rm -f $(apache_site_config_for nova-placement-api)
     sudo rm -f $(apache_site_config_for placement-api)
     remove_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI"
 }
@@ -76,21 +69,19 @@
 function _config_placement_apache_wsgi {
     local placement_api_apache_conf
     local venv_path=""
-    local nova_bin_dir=""
-    nova_bin_dir=$(get_python_exec_prefix)
+    local placement_bin_dir=""
+    placement_bin_dir=$(get_python_exec_prefix)
     placement_api_apache_conf=$(apache_site_config_for placement-api)
 
-    # reuse nova's venv if there is one as placement code lives
-    # there
     if [[ ${USE_VENV} = True ]]; then
-        venv_path="python-path=${PROJECT_VENV["nova"]}/lib/$(python_version)/site-packages"
-        nova_bin_dir=${PROJECT_VENV["nova"]}/bin
+        venv_path="python-path=${PROJECT_VENV["placement"]}/lib/$(python_version)/site-packages"
+        placement_bin_dir=${PROJECT_VENV["placement"]}/bin
     fi
 
     sudo cp $FILES/apache-placement-api.template $placement_api_apache_conf
     sudo sed -e "
         s|%APACHE_NAME%|$APACHE_NAME|g;
-        s|%PUBLICWSGI%|$nova_bin_dir/nova-placement-api|g;
+        s|%PUBLICWSGI%|$placement_bin_dir/placement-api|g;
         s|%SSLENGINE%|$placement_ssl|g;
         s|%SSLCERTFILE%|$placement_certfile|g;
         s|%SSLKEYFILE%|$placement_keyfile|g;
@@ -100,29 +91,20 @@
     " -i $placement_api_apache_conf
 }
 
-function configure_placement_nova_compute {
-    # Use the provided config file path or default to $NOVA_CONF.
-    local conf=${1:-$NOVA_CONF}
-    iniset $conf placement auth_type "password"
-    iniset $conf placement auth_url "$KEYSTONE_SERVICE_URI"
-    iniset $conf placement username placement
-    iniset $conf placement password "$SERVICE_PASSWORD"
-    iniset $conf placement user_domain_name "$SERVICE_DOMAIN_NAME"
-    iniset $conf placement project_name "$SERVICE_TENANT_NAME"
-    iniset $conf placement project_domain_name "$SERVICE_DOMAIN_NAME"
-    # TODO(cdent): auth_strategy, which is common to see in these
-    # blocks is not currently used here. For the time being the
-    # placement api uses the auth_strategy configuration setting
-    # established by the nova api. This avoids, for the time, being,
-    # creating redundant configuration items that are just used for
-    # testing.
+# create_placement_conf() - Write config
+function create_placement_conf {
+    rm -f $PLACEMENT_CONF
+    iniset $PLACEMENT_CONF placement_database connection `database_connection_url placement`
+    iniset $PLACEMENT_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL"
+    iniset $PLACEMENT_CONF api auth_strategy $PLACEMENT_AUTH_STRATEGY
+    configure_keystone_authtoken_middleware $PLACEMENT_CONF placement
+    setup_logging $PLACEMENT_CONF
 }
 
 # configure_placement() - Set config files, create data dirs, etc
 function configure_placement {
-    if [ "$PLACEMENT_DB_ENABLED" != False ]; then
-        iniset $PLACEMENT_CONF placement_database connection `database_connection_url placement`
-    fi
+    sudo install -d -o $STACK_USER $PLACEMENT_CONF_DIR
+    create_placement_conf
 
     if [[ "$WSGI_MODE" == "uwsgi" ]]; then
         write_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI" "/placement"
@@ -144,15 +126,9 @@
 }
 
 # init_placement() - Create service user and endpoints
-# If PLACEMENT_DB_ENABLED is true, create the separate placement db
-# using, for now, the api_db migrations.
 function init_placement {
-    if [ "$PLACEMENT_DB_ENABLED" != False ]; then
-        recreate_database placement
-        # Database migration will be handled when nova does an api_db sync
-        # TODO(cdent): When placement is extracted we'll do our own sync
-        # here.
-    fi
+    recreate_database placement
+    $PLACEMENT_BIN_DIR/placement-manage db sync
     create_placement_accounts
 }
 
@@ -160,8 +136,9 @@
 function install_placement {
     install_apache_wsgi
     # Install the openstackclient placement client plugin for CLI
-    # TODO(mriedem): Use pip_install_gr once osc-placement is in g-r.
-    pip_install osc-placement
+    pip_install_gr osc-placement
+    git_clone $PLACEMENT_REPO $PLACEMENT_DIR $PLACEMENT_BRANCH
+    setup_develop $PLACEMENT_DIR
 }
 
 # start_placement_api() - Start the API processes ahead of other things
diff --git a/lib/rpc_backend b/lib/rpc_backend
index 1c7c82f..743b4ae 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -66,7 +66,12 @@
             sudo systemctl restart epmd.socket epmd.service
         fi
         if is_fedora || is_suse; then
-            sudo systemctl enable rabbitmq-server
+            # NOTE(jangutter): If rabbitmq is not running (as in a fresh
+            # install) then rabbit_setuser triggers epmd@0.0.0.0.socket with
+            # socket activation. This fails the first time and does not get
+            # cleared. It is benign, but the workaround is to start rabbitmq a
+            # bit earlier for RPM based distros.
+            sudo systemctl --now enable rabbitmq-server
         fi
     fi
 }
diff --git a/lib/swift b/lib/swift
index 3b3e608..5be9e35 100644
--- a/lib/swift
+++ b/lib/swift
@@ -47,9 +47,7 @@
     SWIFT_BIN_DIR=$(get_python_exec_prefix)
 fi
 
-SWIFT_AUTH_CACHE_DIR=${SWIFT_AUTH_CACHE_DIR:-/var/cache/swift}
 SWIFT_APACHE_WSGI_DIR=${SWIFT_APACHE_WSGI_DIR:-/var/www/swift}
-SWIFT3_DIR=$DEST/swift3
 
 SWIFT_SERVICE_PROTOCOL=${SWIFT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
 SWIFT_DEFAULT_BIND_PORT=${SWIFT_DEFAULT_BIND_PORT:-8080}
@@ -68,8 +66,8 @@
 # Default is ``/etc/swift``.
 SWIFT_CONF_DIR=${SWIFT_CONF_DIR:-/etc/swift}
 
-if is_service_enabled s-proxy && is_service_enabled swift3; then
-    # If we are using ``swift3``, we can default the S3 port to swift instead
+if is_service_enabled s-proxy && is_service_enabled s3api; then
+    # If we are using ``s3api``, we can default the S3 port to swift instead
     # of nova-objectstore
     S3_SERVICE_PORT=${S3_SERVICE_PORT:-$SWIFT_DEFAULT_BIND_PORT}
 fi
@@ -423,16 +421,19 @@
     iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:proxy-logging reveal_sensitive_prefix ${SWIFT_LOG_TOKEN_LENGTH}
 
     # By default Swift will be installed with Keystone and tempauth middleware
-    # and add the swift3 middleware if its configured for it. The token for
+    # and add the s3api middleware if its configured for it. The token for
     # tempauth would be prefixed with the reseller_prefix setting `TEMPAUTH_` the
     # token for keystoneauth would have the standard reseller_prefix `AUTH_`
-    if is_service_enabled swift3;then
-        swift_pipeline+=" swift3 s3token "
+    if is_service_enabled s3api;then
+        swift_pipeline+=" s3api"
     fi
-
     if is_service_enabled keystone; then
+        if is_service_enabled s3api;then
+            swift_pipeline+=" s3token"
+        fi
         swift_pipeline+=" authtoken keystoneauth"
     fi
+
     swift_pipeline+=" tempauth "
 
     sed -i "/^pipeline/ { s/tempauth/${swift_pipeline} ${SWIFT_EXTRAS_MIDDLEWARE}/ ;}" ${SWIFT_CONFIG_PROXY_SERVER}
@@ -450,7 +451,7 @@
     iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken log_name swift
 
     iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken paste.filter_factory keystonemiddleware.auth_token:filter_factory
-    configure_auth_token_middleware $SWIFT_CONFIG_PROXY_SERVER swift $SWIFT_AUTH_CACHE_DIR filter:authtoken
+    configure_keystone_authtoken_middleware $SWIFT_CONFIG_PROXY_SERVER swift filter:authtoken
     iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken delay_auth_decision 1
     iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken cache swift.cache
     iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken include_service_catalog False
@@ -467,22 +468,6 @@
     # Allow both reseller prefixes to be used with domain_remap
     iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:domain_remap reseller_prefixes "AUTH, TEMPAUTH"
 
-    if is_service_enabled swift3; then
-        cat <<EOF >>${SWIFT_CONFIG_PROXY_SERVER}
-[filter:s3token]
-paste.filter_factory = keystonemiddleware.s3_token:filter_factory
-auth_uri = ${KEYSTONE_AUTH_URI}
-cafile = ${SSL_BUNDLE_FILE}
-admin_user = swift
-admin_tenant_name = ${SERVICE_PROJECT_NAME}
-admin_password = ${SERVICE_PASSWORD}
-
-[filter:swift3]
-use = egg:swift3#swift3
-location = ${REGION_NAME}
-EOF
-    fi
-
     cp ${SWIFT_DIR}/etc/swift.conf-sample ${SWIFT_CONF_DIR}/swift.conf
     iniset ${SWIFT_CONF_DIR}/swift.conf swift-hash swift_hash_path_suffix ${SWIFT_HASH}
     iniset ${SWIFT_CONF_DIR}/swift.conf swift-constraints max_header_size ${SWIFT_MAX_HEADER_SIZE}
@@ -607,7 +592,7 @@
     # Mount the disk with mount options to make it as efficient as possible
     mkdir -p ${SWIFT_DATA_DIR}/drives/sdb1
     if ! egrep -q ${SWIFT_DATA_DIR}/drives/sdb1 /proc/mounts; then
-        sudo mount -t xfs -o loop,noatime,nodiratime,nobarrier,logbufs=8  \
+        sudo mount -t xfs -o loop,noatime,nodiratime,logbufs=8  \
             ${SWIFT_DISK_IMAGE} ${SWIFT_DATA_DIR}/drives/sdb1
     fi
 
@@ -749,10 +734,6 @@
         $SWIFT_BIN_DIR/swift-ring-builder container.builder rebalance 42
         $SWIFT_BIN_DIR/swift-ring-builder account.builder rebalance 42
     } && popd >/dev/null
-
-    # Create cache dir
-    sudo install -d -o ${STACK_USER} $SWIFT_AUTH_CACHE_DIR
-    rm -f $SWIFT_AUTH_CACHE_DIR/*
 }
 
 function install_swift {
diff --git a/lib/tcpdump b/lib/tcpdump
new file mode 100644
index 0000000..16e8269
--- /dev/null
+++ b/lib/tcpdump
@@ -0,0 +1,43 @@
+#!/bin/bash
+#
+# lib/tcpdump
+# Functions to start and stop a tcpdump
+
+# Dependencies:
+#
+# - ``functions`` file
+
+# ``stack.sh`` calls the entry points in this order:
+#
+# - start_tcpdump
+# - stop_tcpdump
+
+# Save trace setting
+_XTRACE_TCPDUMP=$(set +o | grep xtrace)
+set +o xtrace
+
+TCPDUMP_OUTPUT=${TCPDUMP_OUTPUT:-$LOGDIR/tcpdump.pcap}
+
+# e.g. for iscsi
+#  "-i any tcp port 3260"
+TCPDUMP_ARGS=${TCPDUMP_ARGS:-""}
+
+# start_tcpdump() - Start running processes
+function start_tcpdump {
+    # Run a tcpdump with given arguments and save the packet capture
+    if is_service_enabled tcpdump; then
+        if [[ -z "${TCPDUMP_ARGS}" ]]; then
+            die $LINENO "The tcpdump service requires TCPDUMP_ARGS to be set"
+        fi
+        touch ${TCPDUMP_OUTPUT}
+        run_process tcpdump "/usr/sbin/tcpdump -w $TCPDUMP_OUTPUT $TCPDUMP_ARGS" root root
+    fi
+}
+
+# stop_tcpdump() stop tcpdump process
+function stop_tcpdump {
+    stop_process tcpdump
+}
+
+# Restore xtrace
+$_XTRACE_TCPDUMP
diff --git a/lib/tempest b/lib/tempest
index 00e946e..1066cd4 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -102,6 +102,14 @@
     remove_disabled_services "$extensions_list" "$disabled_exts"
 }
 
+# image_size_in_gib - converts an image size from bytes to GiB, rounded up
+# Takes an image ID parameter as input
+function image_size_in_gib {
+    local size
+    size=$(openstack image show $1 -c size -f value)
+    echo $size | python3 -c "import math; print(int(math.ceil(float(int(input()) / 1024.0 ** 3))))"
+}
+
 # configure_tempest() - Set config files, create data dirs, etc
 function configure_tempest {
     if [[ "$INSTALL_TEMPEST" == "True" ]]; then
@@ -122,9 +130,12 @@
     local available_flavors
     local flavors_ref
     local flavor_lines
+    local flavor_ref_size
+    local flavor_ref_alt_size
     local public_network_id
     local public_router_id
     local ssh_connect_method="floating"
+    local disk
 
     # Save IFS
     ifs=$IFS
@@ -190,11 +201,15 @@
         available_flavors=$(nova flavor-list)
         if  [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then
             if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then
-                openstack flavor create --id 42 --ram 64 --disk 0 --vcpus 1 m1.nano
+                # Determine the flavor disk size based on the image size.
+                disk=$(image_size_in_gib $image_uuid)
+                openstack flavor create --id 42 --ram 64 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.nano
             fi
             flavor_ref=42
             if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then
-                openstack flavor create --id 84 --ram 128 --disk 0 --vcpus 1 m1.micro
+                # Determine the alt flavor disk size based on the alt image size.
+                disk=$(image_size_in_gib $image_uuid_alt)
+                openstack flavor create --id 84 --ram 128 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.micro
             fi
             flavor_ref_alt=84
         else
@@ -220,11 +235,24 @@
             fi
             flavor_ref=${flavors[0]}
             flavor_ref_alt=$flavor_ref
+            flavor_ref_size=$(openstack flavor show --format value --column disk "${flavor_ref}")
 
             # Ensure ``flavor_ref`` and ``flavor_ref_alt`` have different values.
             # Some resize instance in tempest tests depends on this.
             for f in ${flavors[@]:1}; do
                 if [[ "$f" != "$flavor_ref" ]]; then
+                    #
+                    # NOTE(sdatko): Resize is only possible when target flavor
+                    #               is not smaller than the original one. For
+                    #               Tempest tests, in case there was a bigger
+                    #               flavor selected as default, e.g. m1.small,
+                    #               we need to perform additional check.
+                    #
+                    flavor_ref_alt_size=$(openstack flavor show --format value --column disk "${f}")
+                    if [[ "${flavor_ref_alt_size}" -lt "${flavor_ref_size}" ]]; then
+                        continue
+                    fi
+
                     flavor_ref_alt=$f
                     break
                 fi
@@ -242,6 +270,9 @@
     # and the public_network_id should not be set.
     if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]] && is_networking_extension_supported 'external-net'; then
         public_network_id=$(openstack network show -f value -c id $PUBLIC_NETWORK_NAME)
+        # make sure shared network presence does not confuses the tempest tests
+        openstack network create --share shared
+        openstack subnet create --description shared-subnet --subnet-range ${TEMPEST_SHARED_POOL:-192.168.233.0/24} --network shared shared-subnet
     fi
 
     iniset $TEMPEST_CONFIG DEFAULT use_syslog $SYSLOG
@@ -263,8 +294,6 @@
     iniset $TEMPEST_CONFIG identity user_lockout_failure_attempts $KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS
     iniset $TEMPEST_CONFIG identity user_lockout_duration $KEYSTONE_LOCKOUT_DURATION
     iniset $TEMPEST_CONFIG identity user_unique_last_password_count $KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT
-    # Use domain scoped tokens for admin v3 tests, v3 dynamic credentials of v3 account generation
-    iniset $TEMPEST_CONFIG identity admin_domain_scope True
     if [[ "$TEMPEST_HAS_ADMIN" == "True" ]]; then
         iniset $TEMPEST_CONFIG auth admin_username $admin_username
         iniset $TEMPEST_CONFIG auth admin_password "$password"
@@ -328,7 +357,7 @@
     iniset $TEMPEST_CONFIG compute flavor_ref $flavor_ref
     iniset $TEMPEST_CONFIG compute flavor_ref_alt $flavor_ref_alt
     iniset $TEMPEST_CONFIG validation connect_method $ssh_connect_method
-    if ! is_service_enabled n-cell && ! is_service_enabled neutron; then
+    if ! is_service_enabled neutron; then
         iniset $TEMPEST_CONFIG compute fixed_network_name $PRIVATE_NETWORK_NAME
     fi
 
@@ -372,22 +401,9 @@
     iniset $TEMPEST_CONFIG compute-feature-enabled block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False}
     iniset $TEMPEST_CONFIG compute-feature-enabled live_migrate_back_and_forth ${LIVE_MIGRATE_BACK_AND_FORTH:-False}
     iniset $TEMPEST_CONFIG compute-feature-enabled attach_encrypted_volume ${ATTACH_ENCRYPTED_VOLUME_AVAILABLE:-True}
-    if is_service_enabled n-cell; then
-        # Cells doesn't support shelving/unshelving
-        iniset $TEMPEST_CONFIG compute-feature-enabled shelve False
-        # Cells doesn't support hot-plugging virtual interfaces.
-        iniset $TEMPEST_CONFIG compute-feature-enabled interface_attach False
-        # Cells v1 doesn't support the rescue/unrescue tests in Tempest
-        iniset $TEMPEST_CONFIG compute-feature-enabled rescue False
 
-        if  [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then
-            # Cells supports resize but does not currently work with devstack
-            # because of the custom flavors created for Tempest runs which are
-            # not in the cells database.
-            # TODO(mriedem): work on adding a nova-manage command to sync
-            # flavors into the cells database.
-            iniset $TEMPEST_CONFIG compute-feature-enabled resize False
-        fi
+    if [[ -n "$NOVA_FILTERS" ]]; then
+        iniset $TEMPEST_CONFIG compute-feature-enabled scheduler_enabled_filters ${NOVA_FILTERS}
     fi
 
     if [[ $ENABLE_VOLUME_MULTIATTACH == "True" ]]; then
@@ -512,6 +528,24 @@
         iniset $TEMPEST_CONFIG volume storage_protocol "$TEMPEST_STORAGE_PROTOCOL"
     fi
 
+    # Placement Features
+    # Set the microversion range for placement.
+    # Setting [None, latest] range of microversion which allow Tempest to run all microversions tests.
+    # NOTE- To avoid microversion tests failure on stable branch, we need to change "tempest_placement_max_microversion"
+    #       for stable branch on each release which should be changed from "latest" to max supported version of that release.
+    local tempest_placement_min_microversion=${TEMPEST_PLACEMENT_MIN_MICROVERSION:-None}
+    local tempest_placement_max_microversion=${TEMPEST_PLACEMENT_MAX_MICROVERSION:-"latest"}
+    if [ "$tempest_placement_min_microversion" == "None" ]; then
+        inicomment $TEMPEST_CONFIG placement min_microversion
+    else
+        iniset $TEMPEST_CONFIG placement min_microversion $tempest_placement_min_microversion
+    fi
+    if [ "$tempest_placement_max_microversion" == "None" ]; then
+        inicomment $TEMPEST_CONFIG placement max_microversion
+    else
+        iniset $TEMPEST_CONFIG placement max_microversion $tempest_placement_max_microversion
+    fi
+
     # Baremetal
     if [ "$VIRT_DRIVER" = "ironic" ] ; then
         iniset $TEMPEST_CONFIG compute-feature-enabled change_password False
@@ -535,8 +569,7 @@
             iniset $TEMPEST_CONFIG compute-feature-enabled shelve False
             iniset $TEMPEST_CONFIG compute-feature-enabled snapshot False
             iniset $TEMPEST_CONFIG compute-feature-enabled suspend False
-        elif ! is_service_enabled n-cell; then
-            # cells v1 does not support swapping volumes
+        else
             iniset $TEMPEST_CONFIG compute-feature-enabled swap_volume True
         fi
     fi
@@ -578,16 +611,19 @@
     fi
 
     # The requirements might be on a different branch, while tempest needs master requirements.
-    (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) > u-c-m.txt
-    tox -evenv-tempest -- pip install -c u-c-m.txt -r requirements.txt
+    local tmp_u_c_m
+    tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX)
+    (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) > $tmp_u_c_m
+    tox -evenv-tempest -- pip install -c $tmp_u_c_m -r requirements.txt
+    rm -f $tmp_u_c_m
 
     # Auth:
     iniset $TEMPEST_CONFIG auth tempest_roles "member"
     if [[ $TEMPEST_USE_TEST_ACCOUNTS == "True" ]]; then
         if [[ $TEMPEST_HAS_ADMIN == "True" ]]; then
-            tox -evenv-tempest -- tempest-account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-tenant-name $admin_project_name -r $TEMPEST_CONCURRENCY --with-admin etc/accounts.yaml
+            tox -evenv-tempest -- tempest account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-project-name $admin_project_name -r $TEMPEST_CONCURRENCY --with-admin etc/accounts.yaml
         else
-            tox -evenv-tempest -- tempest-account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-tenant-name $admin_project_name -r $TEMPEST_CONCURRENCY etc/accounts.yaml
+            tox -evenv-tempest -- tempest account-generator -c $TEMPEST_CONFIG --os-username $admin_username --os-password "$password" --os-project-name $admin_project_name -r $TEMPEST_CONCURRENCY etc/accounts.yaml
         fi
         iniset $TEMPEST_CONFIG auth use_dynamic_credentials False
         iniset $TEMPEST_CONFIG auth test_accounts_file "etc/accounts.yaml"
@@ -622,6 +658,9 @@
         # Remove disabled extensions
         network_api_extensions=$(remove_disabled_extensions $network_api_extensions $DISABLE_NETWORK_API_EXTENSIONS)
     fi
+    if [[ -n "$ADDITIONAL_NETWORK_API_EXTENSIONS" ]] && [[ "$network_api_extensions" != "all" ]]; then
+        network_api_extensions+=",$ADDITIONAL_NETWORK_API_EXTENSIONS"
+    fi
     iniset $TEMPEST_CONFIG network-feature-enabled api_extensions $network_api_extensions
     # Swift API Extensions
     local object_storage_api_extensions=${OBJECT_STORAGE_API_EXTENSIONS:-"all"}
@@ -664,7 +703,12 @@
 function install_tempest_plugins {
     pushd $TEMPEST_DIR
     if [[ $TEMPEST_PLUGINS != 0 ]] ; then
-        tox -evenv-tempest -- pip install -c $REQUIREMENTS_DIR/upper-constraints.txt $TEMPEST_PLUGINS
+        # The requirements might be on a different branch, while tempest & tempest plugins needs master requirements.
+        local tmp_u_c_m
+        tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX)
+        (cd $REQUIREMENTS_DIR && git show origin/master:upper-constraints.txt) > $tmp_u_c_m
+        tox -evenv-tempest -- pip install -c $tmp_u_c_m $TEMPEST_PLUGINS
+        rm -f $tmp_u_c_m
         echo "Checking installed Tempest plugins:"
         tox -evenv-tempest -- tempest list-plugins
     fi
diff --git a/lib/tls b/lib/tls
index 217f40e..65ffeb9 100644
--- a/lib/tls
+++ b/lib/tls
@@ -234,6 +234,9 @@
                 # see https://bugs.python.org/issue23239
                 TLS_IP="DNS:$TLS_IP,IP:$TLS_IP"
             fi
+            if [[ -n "$HOST_IPV6" ]]; then
+                TLS_IP="$TLS_IP,IP:$HOST_IPV6"
+            fi
         fi
         make_cert $INT_CA_DIR $DEVSTACK_CERT_NAME $DEVSTACK_HOSTNAME "$TLS_IP"
 
@@ -547,6 +550,9 @@
     LimitRequestFieldSize $f_header_size
     RequestHeader set X-Forwarded-Proto "https"
 
+    # Avoid races (at the cost of performance) to re-use a pooled connection
+    # where the connection is closed (bug 1807518).
+    SetEnv proxy-initial-not-pooled
     <Location />
         ProxyPass http://$b_host:$b_port/ retry=0 nocanon
         ProxyPassReverse http://$b_host:$b_port/
diff --git a/pkg/elasticsearch.sh b/pkg/elasticsearch.sh
deleted file mode 100755
index bd44153..0000000
--- a/pkg/elasticsearch.sh
+++ /dev/null
@@ -1,148 +0,0 @@
-#!/bin/bash -xe
-
-# basic reference point for things like filecache
-#
-# TODO(sdague): once we have a few of these I imagine the download
-# step can probably be factored out to something nicer
-TOP_DIR=$(cd $(dirname "$0")/.. && pwd)
-FILES=$TOP_DIR/files
-source $TOP_DIR/stackrc
-
-# Package source and version, all pkg files are expected to have
-# something like this, as well as a way to override them.
-ELASTICSEARCH_VERSION=${ELASTICSEARCH_VERSION:-1.7.5}
-ELASTICSEARCH_BASEURL=${ELASTICSEARCH_BASEURL:-https://download.elasticsearch.org/elasticsearch/elasticsearch}
-
-# Elastic search actual implementation
-function wget_elasticsearch {
-    local file=${1}
-
-    if [ ! -f ${FILES}/${file} ]; then
-        wget $ELASTICSEARCH_BASEURL/${file} -O ${FILES}/${file}
-    fi
-
-    if [ ! -f ${FILES}/${file}.sha1.txt ]; then
-        wget $ELASTICSEARCH_BASEURL/${file}.sha1.txt -O ${FILES}/${file}.sha1.txt
-    fi
-
-    pushd ${FILES};  sha1sum ${file} > ${file}.sha1.gen;  popd
-
-    if ! diff ${FILES}/${file}.sha1.gen ${FILES}/${file}.sha1.txt; then
-        echo "Invalid elasticsearch download. Could not install."
-        return 1
-    fi
-    return 0
-}
-
-function download_elasticsearch {
-    if is_ubuntu; then
-        wget_elasticsearch elasticsearch-${ELASTICSEARCH_VERSION}.deb
-    elif is_fedora || is_suse; then
-        wget_elasticsearch elasticsearch-${ELASTICSEARCH_VERSION}.noarch.rpm
-    fi
-}
-
-function configure_elasticsearch {
-    # currently a no op
-    :
-}
-
-function _check_elasticsearch_ready {
-    # poll elasticsearch to see if it's started
-    if ! wait_for_service 120 http://localhost:9200; then
-        die $LINENO "Maximum timeout reached. Could not connect to ElasticSearch"
-    fi
-}
-
-function start_elasticsearch {
-    if is_ubuntu; then
-        sudo /etc/init.d/elasticsearch start
-        _check_elasticsearch_ready
-    elif is_fedora; then
-        sudo /bin/systemctl start elasticsearch.service
-        _check_elasticsearch_ready
-    elif is_suse; then
-        sudo /usr/bin/systemctl start elasticsearch.service
-        _check_elasticsearch_ready
-    else
-        echo "Unsupported architecture...can not start elasticsearch."
-    fi
-}
-
-function stop_elasticsearch {
-    if is_ubuntu; then
-        sudo /etc/init.d/elasticsearch stop
-    elif is_fedora; then
-        sudo /bin/systemctl stop elasticsearch.service
-    elif is_suse ; then
-        sudo /usr/bin/systemctl stop elasticsearch.service
-    else
-        echo "Unsupported architecture...can not stop elasticsearch."
-    fi
-}
-
-function install_elasticsearch {
-    pip_install_gr elasticsearch
-    if is_package_installed elasticsearch; then
-        echo "Note: elasticsearch was already installed."
-        return
-    fi
-    if is_ubuntu; then
-        is_package_installed default-jre-headless || install_package default-jre-headless
-
-        sudo dpkg -i ${FILES}/elasticsearch-${ELASTICSEARCH_VERSION}.deb
-        sudo update-rc.d elasticsearch defaults 95 10
-    elif is_fedora; then
-        is_package_installed java-1.8.0-openjdk-headless || install_package java-1.8.0-openjdk-headless
-        yum_install ${FILES}/elasticsearch-${ELASTICSEARCH_VERSION}.noarch.rpm
-        sudo /bin/systemctl daemon-reload
-        sudo /bin/systemctl enable elasticsearch.service
-    elif is_suse; then
-        is_package_installed java-1_8_0-openjdk-headless || install_package java-1_8_0-openjdk-headless
-        zypper_install --no-gpg-checks ${FILES}/elasticsearch-${ELASTICSEARCH_VERSION}.noarch.rpm
-        sudo /usr/bin/systemctl daemon-reload
-        sudo /usr/bin/systemctl enable elasticsearch.service
-    else
-        echo "Unsupported install of elasticsearch on this architecture."
-    fi
-}
-
-function uninstall_elasticsearch {
-    if is_package_installed elasticsearch; then
-        if is_ubuntu; then
-            sudo apt-get purge elasticsearch
-        elif is_fedora; then
-            sudo yum remove elasticsearch
-        elif is_suse; then
-            sudo zypper rm elasticsearch
-        else
-            echo "Unsupported install of elasticsearch on this architecture."
-        fi
-    fi
-}
-
-# The PHASE dispatcher. All pkg files are expected to basically cargo
-# cult the case statement.
-PHASE=$1
-echo "Phase is $PHASE"
-
-case $PHASE in
-    download)
-        download_elasticsearch
-        ;;
-    install)
-        install_elasticsearch
-        ;;
-    configure)
-        configure_elasticsearch
-        ;;
-    start)
-        start_elasticsearch
-        ;;
-    stop)
-        stop_elasticsearch
-        ;;
-    uninstall)
-        uninstall_elasticsearch
-        ;;
-esac
diff --git a/playbooks/pre.yaml b/playbooks/pre.yaml
index 4689a63..60f365a 100644
--- a/playbooks/pre.yaml
+++ b/playbooks/pre.yaml
@@ -1,5 +1,12 @@
 - hosts: all
   pre_tasks:
+    - name: Fix the permissions of the zuul home directory
+      # Make sure that the zuul home can be traversed,
+      # so that all users can access the sources placed there.
+      # Some distributions create it with 700 by default.
+      file:
+        path: "{{ ansible_user_dir }}"
+        mode: a+x
     - name: Gather minimum local MTU
       set_fact:
         local_mtu: >
diff --git a/roles/capture-system-logs/tasks/main.yaml b/roles/capture-system-logs/tasks/main.yaml
index de4f8ed..905806d 100644
--- a/roles/capture-system-logs/tasks/main.yaml
+++ b/roles/capture-system-logs/tasks/main.yaml
@@ -19,6 +19,17 @@
           rpm -qa | sort > {{ stage_dir }}/rpm-qa.txt
       fi
 
+      # NOTE(kchamart) The 'audit.log' can be useful in cases when QEMU
+      # failed to start due to denials from SELinux — useful for CentOS
+      # and Fedora machines.  For Ubuntu (which runs AppArmor), DevStack
+      # already captures the contents of /var/log/kern.log (via
+      # `journalctl -t kernel` redirected into syslog.txt.gz), which
+      # contains AppArmor-related messages.
+      if [ -f /var/log/audit/audit.log ] ; then
+          sudo cp /var/log/audit/audit.log {{stage_dir }}/audit.log &&
+          chmod +r {{ stage_dir }}/audit.log;
+      fi
+
       # gzip and save any coredumps in /var/core
       if [ -d /var/core ]; then
           sudo gzip -r /var/core
diff --git a/roles/export-devstack-journal/README.rst b/roles/export-devstack-journal/README.rst
index a34e070..9e3c919 100644
--- a/roles/export-devstack-journal/README.rst
+++ b/roles/export-devstack-journal/README.rst
@@ -1,11 +1,15 @@
 Export journal files from devstack services
 
-Export the systemd journal for every devstack service in native
-journal format as well as text.  Also, export a syslog-style file with
-kernal and sudo messages.
+This performs a number of logging collection services
 
-Writes the output to the ``logs/`` subdirectory of
-``stage_dir``.
+* Export the systemd journal in native format
+* For every devstack service, export logs to text in a file named
+  ``screen-*`` to maintain legacy compatability when devstack services
+  used to run in a screen session and were logged separately.
+* Export a syslog-style file with kernel and sudo messages for legacy
+  compatability.
+
+Writes the output to the ``logs/`` subdirectory of ``stage_dir``.
 
 **Role Variables**
 
diff --git a/roles/export-devstack-journal/tasks/main.yaml b/roles/export-devstack-journal/tasks/main.yaml
index 6e760c1..ef839ed 100644
--- a/roles/export-devstack-journal/tasks/main.yaml
+++ b/roles/export-devstack-journal/tasks/main.yaml
@@ -6,32 +6,49 @@
     state: directory
     owner: "{{ ansible_user }}"
 
-# TODO: convert this to ansible
-- name: Export journal files
+- name: Export legacy stack screen log files
   become: true
   shell:
     cmd: |
       u=""
       name=""
-      for u in `systemctl list-unit-files | grep devstack | awk '{print $1}'`; do
+      for u in $(systemctl list-unit-files | grep devstack | awk '{print $1}'); do
         name=$(echo $u | sed 's/devstack@/screen-/' | sed 's/\.service//')
-        journalctl -o short-precise --unit $u | gzip - > {{ stage_dir }}/logs/$name.txt.gz
+        journalctl -o short-precise --unit $u  > {{ stage_dir }}/logs/$name.txt
       done
 
-      # Export the journal in export format to make it downloadable
-      # for later searching. It can then be rewritten to a journal native
-      # format locally using systemd-journal-remote. This makes a class of
-      # debugging much easier. We don't do the native conversion here as
-      # some distros do not package that tooling.
-      journalctl -u 'devstack@*' -o export | \
-          xz --threads=0 - > {{ stage_dir }}/logs/devstack.journal.xz
-
-      # The journal contains everything running under systemd, we'll
-      # build an old school version of the syslog with just the
-      # kernel and sudo messages.
+- name: Export legacy syslog.txt
+  become: true
+  shell:
+    # The journal contains everything running under systemd, we'll
+    # build an old school version of the syslog with just the
+    # kernel and sudo messages.
+    cmd: |
       journalctl \
           -t kernel \
           -t sudo \
           --no-pager \
           --since="$(cat {{ devstack_base_dir }}/log-start-timestamp.txt)" \
-        | gzip - > {{ stage_dir }}/logs/syslog.txt.gz
+         > {{ stage_dir }}/logs/syslog.txt
+
+# TODO: convert this to ansible
+#  - make a list of the above units
+#  - iterate the list here
+- name: Export journal
+  become: true
+  shell:
+    # Export the journal in export format to make it downloadable
+    # for later searching. It can then be rewritten to a journal native
+    # format locally using systemd-journal-remote. This makes a class of
+    # debugging much easier. We don't do the native conversion here as
+    # some distros do not package that tooling.
+    cmd: |
+      journalctl -o export \
+          --since="$(cat {{ devstack_base_dir }}/log-start-timestamp.txt)" \
+        | xz --threads=0 - > {{ stage_dir }}/logs/devstack.journal.xz
+
+- name: Save journal README
+  become: true
+  template:
+    src: devstack.journal.README.txt.j2
+    dest: '{{ stage_dir }}/logs/devstack.journal.README.txt'
diff --git a/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2 b/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2
new file mode 100644
index 0000000..fe36653
--- /dev/null
+++ b/roles/export-devstack-journal/templates/devstack.journal.README.txt.j2
@@ -0,0 +1,33 @@
+Devstack systemd journal
+========================
+
+The devstack.journal file is a copy of the systemd journal during the
+devstack run.
+
+To use it, you will need to convert it so journalctl can read it
+locally.  After downloading the file:
+
+ $ /lib/systemd/systemd-journal-remote <(xzcat ./devstack.journal.xz) -o output.journal
+
+Note this binary is not in the regular path.  On Debian/Ubuntu
+platforms, you will need to have the "systemd-journal-remote" package
+installed.
+
+It should result in something like:
+
+ Finishing after writing <large number> entries
+
+You can then use journalctl to examine this file.  For example, to see
+all devstack services try:
+
+ $ journalctl --file ./output.journal -u 'devstack@*'
+
+To see just cinder API server logs restrict the match with
+
+ $ journalctl --file ./output.journal -u 'devstack@c-api'
+
+There may be many types of logs available in the journal, a command like
+
+ $ journalctl --file ./output.journal --output=json-pretty | grep "_SYSTEMD_UNIT" | sort -u
+
+can help you find interesting things to filter on.
\ No newline at end of file
diff --git a/roles/fetch-devstack-log-dir/tasks/main.yaml b/roles/fetch-devstack-log-dir/tasks/main.yaml
index 5a198b2..276c4e0 100644
--- a/roles/fetch-devstack-log-dir/tasks/main.yaml
+++ b/roles/fetch-devstack-log-dir/tasks/main.yaml
@@ -1,5 +1,10 @@
+# as the user in the guest may not exist on the executor
+# we do not preserve the group or owner of the copied logs.
+
 - name: Collect devstack logs
   synchronize:
     dest: "{{ zuul.executor.log_root }}/{{ inventory_hostname }}"
     mode: pull
     src: "{{ devstack_base_dir }}/logs"
+    group: no
+    owner: no
diff --git a/roles/setup-devstack-log-dir/tasks/main.yaml b/roles/setup-devstack-log-dir/tasks/main.yaml
index b9f38df..d8e8cfe 100644
--- a/roles/setup-devstack-log-dir/tasks/main.yaml
+++ b/roles/setup-devstack-log-dir/tasks/main.yaml
@@ -2,4 +2,7 @@
   file:
     path: '{{ devstack_base_dir }}/logs'
     state: directory
+    mode: 0755
+    owner: stack
+    group: stack
   become: yes
diff --git a/roles/setup-devstack-source-dirs/README.rst b/roles/setup-devstack-source-dirs/README.rst
index 4ebf839..0aa048b 100644
--- a/roles/setup-devstack-source-dirs/README.rst
+++ b/roles/setup-devstack-source-dirs/README.rst
@@ -9,3 +9,8 @@
    :default: /opt/stack
 
    The devstack base directory.
+
+.. zuul:rolevar:: devstack_sources_branch
+   :default: None
+
+   The target branch to be setup (where available).
diff --git a/roles/setup-devstack-source-dirs/tasks/main.yaml b/roles/setup-devstack-source-dirs/tasks/main.yaml
index e6bbae2..160757e 100644
--- a/roles/setup-devstack-source-dirs/tasks/main.yaml
+++ b/roles/setup-devstack-source-dirs/tasks/main.yaml
@@ -1,9 +1,13 @@
-- name: Find all source repos used by this job
+- name: Find all OpenStack source repos used by this job
   find:
     paths:
-      - src/git.openstack.org/openstack
-      - src/git.openstack.org/openstack-dev
-      - src/git.openstack.org/openstack-infra
+      - src/opendev.org/opendev
+      - src/opendev.org/openstack
+      - src/opendev.org/openstack-dev
+      - src/opendev.org/openstack-infra
+      - src/opendev.org/starlingx
+      - src/opendev.org/x
+      - src/opendev.org/zuul
     file_type: directory
   register: found_repos
 
@@ -12,6 +16,59 @@
   with_items: '{{ found_repos.files }}'
   become: yes
 
+# Github projects are github.com/username/repo (username might be a
+# top-level project too), so we have to do a two-step swizzle to just
+# get the full repo path (ansible's find module doesn't help with this
+# :/)
+- name: Find top level github projects
+  find:
+    paths:
+      - src/github.com
+    file_type: directory
+  register: found_github_projects
+
+- name: Find actual github repos
+  find:
+    paths: '{{ found_github_projects.files | map(attribute="path") | list }}'
+    file_type: directory
+  register: found_github_repos
+  when: found_github_projects.files
+
+- name: Copy github repos into devstack working directory
+  command: rsync -a {{ item.path }} {{ devstack_base_dir }}
+  with_items: '{{ found_github_repos.files }}'
+  become: yes
+  when: found_github_projects.files
+
+- name: Setup refspec for repos into devstack working directory
+  shell:
+    # Copied almost "as-is" from devstack-gate setup-workspace function
+    # but removing the dependency on functions.sh
+    # TODO this should be rewritten as a python module.
+    cmd: |
+      cd {{ devstack_base_dir }}/{{ item.path | basename }}
+      base_branch={{ devstack_sources_branch }}
+      if git branch -a | grep "$base_branch" > /dev/null ; then
+          git checkout $base_branch
+      elif [[ "$base_branch" == stable/* ]]; then
+          # Look for an eol tag for the stable branch.
+          eol_tag=${base_branch#stable/}-eol
+          if git tag -l |grep $eol_tag >/dev/null; then
+              git checkout $eol_tag
+              git reset --hard $eol_tag
+              if ! git clean -x -f -d -q ; then
+                  sleep 1
+                  git clean -x -f -d -q
+              fi
+          fi
+      else
+          git checkout master
+      fi
+  args:
+    executable: /bin/bash
+  with_items: '{{ found_repos.files }}'
+  when: devstack_sources_branch is defined
+
 - name: Set ownership of repos
   file:
     path: '{{ devstack_base_dir }}'
diff --git a/roles/sync-devstack-data/README.rst b/roles/sync-devstack-data/README.rst
index 500e8cc..388625c 100644
--- a/roles/sync-devstack-data/README.rst
+++ b/roles/sync-devstack-data/README.rst
@@ -10,3 +10,10 @@
    :default: /opt/stack
 
    The devstack base directory.
+
+.. zuul:rolevar:: devstack_data_base_dir
+   :default: {{ devstack_base_dir }}
+
+   The devstack base directory for data/.
+   Useful for example when multiple executions of devstack (i.e. grenade)
+   share the same data directory.
diff --git a/roles/sync-devstack-data/defaults/main.yaml b/roles/sync-devstack-data/defaults/main.yaml
index fea05c8..6b5017b 100644
--- a/roles/sync-devstack-data/defaults/main.yaml
+++ b/roles/sync-devstack-data/defaults/main.yaml
@@ -1 +1,2 @@
 devstack_base_dir: /opt/stack
+devstack_data_base_dir: "{{ devstack_base_dir }}"
diff --git a/roles/sync-devstack-data/tasks/main.yaml b/roles/sync-devstack-data/tasks/main.yaml
index 4600015..a1d37c3 100644
--- a/roles/sync-devstack-data/tasks/main.yaml
+++ b/roles/sync-devstack-data/tasks/main.yaml
@@ -1,7 +1,7 @@
 - name: Ensure the data folder exists
   become: true
   file:
-    path: "{{ devstack_base_dir }}/data"
+    path: "{{ devstack_data_base_dir }}/data"
     state: directory
     owner: stack
     group: stack
@@ -11,7 +11,7 @@
 - name: Ensure the CA folder exists
   become: true
   file:
-    path: "{{ devstack_base_dir }}/data/CA"
+    path: "{{ devstack_data_base_dir }}/data/CA"
     state: directory
     owner: stack
     group: stack
@@ -25,8 +25,8 @@
     dest: "{{ zuul.executor.work_root }}/{{ item | basename }}"
     mode: pull
   with_items:
-    - "{{ devstack_base_dir }}/data/ca-bundle.pem"
-    - "{{ devstack_base_dir }}/data/CA"
+    - "{{ devstack_data_base_dir }}/data/ca-bundle.pem"
+    - "{{ devstack_data_base_dir }}/data/CA"
   when: inventory_hostname == 'controller'
 
 - name: Push the CA certificate
@@ -34,7 +34,7 @@
   become_user: stack
   synchronize:
     src: "{{ zuul.executor.work_root }}/ca-bundle.pem"
-    dest: "{{ devstack_base_dir }}/data/ca-bundle.pem"
+    dest: "{{ devstack_data_base_dir }}/data/ca-bundle.pem"
     mode: push
   when: 'inventory_hostname in groups["subnode"]|default([])'
 
@@ -43,6 +43,17 @@
   become_user: stack
   synchronize:
     src: "{{ zuul.executor.work_root }}/CA/"
-    dest: "{{ devstack_base_dir }}/data/"
+    dest: "{{ devstack_data_base_dir }}/data/"
     mode: push
   when: 'inventory_hostname in groups["subnode"]|default([])'
+
+- name: Ensure the data folder and subfolders have the correct permissions
+  become: true
+  file:
+    path: "{{ devstack_data_base_dir }}/data"
+    state: directory
+    owner: stack
+    group: stack
+    mode: 0755
+    recurse: yes
+  when: 'inventory_hostname in groups["subnode"]|default([])'
diff --git a/roles/write-devstack-local-conf/README.rst b/roles/write-devstack-local-conf/README.rst
index e9739cd..d0a51e7 100644
--- a/roles/write-devstack-local-conf/README.rst
+++ b/roles/write-devstack-local-conf/README.rst
@@ -88,3 +88,12 @@
    If a plugin declares a dependency on another plugin (via
    ``plugin_requires`` in the plugin's settings file), this role will
    automatically emit ``enable_plugin`` lines in the correct order.
+
+.. zuul:rolevar:: tempest_plugins
+   :type: list
+
+   A list of tempest plugins which are installed alongside tempest.
+
+   The list of values will be combined with the base devstack directory
+   and used to populate the ``TEMPEST_PLUGINS`` variable. If the variable
+   already exists, its value is *not* changed.
diff --git a/roles/write-devstack-local-conf/library/devstack_local_conf.py b/roles/write-devstack-local-conf/library/devstack_local_conf.py
index bba7e31..2f97d0e 100644
--- a/roles/write-devstack-local-conf/library/devstack_local_conf.py
+++ b/roles/write-devstack-local-conf/library/devstack_local_conf.py
@@ -155,8 +155,8 @@
                 continue
             self.loadDevstackPluginInfo(settings)
 
-    define_re = re.compile(r'^define_plugin\s+(\w+).*')
-    require_re = re.compile(r'^plugin_requires\s+(\w+)\s+(\w+).*')
+    define_re = re.compile(r'^define_plugin\s+(\S+).*')
+    require_re = re.compile(r'^plugin_requires\s+(\S+)\s+(\S+).*')
     def loadDevstackPluginInfo(self, fn):
         name = None
         reqs = set()
@@ -207,18 +207,23 @@
 class LocalConf(object):
 
     def __init__(self, localrc, localconf, base_services, services, plugins,
-                 base_dir, projects, project):
+                 base_dir, projects, project, tempest_plugins):
         self.localrc = []
+        self.warnings = []
         self.meta_sections = {}
         self.plugin_deps = {}
         self.base_dir = base_dir
         self.projects = projects
         self.project = project
-        if plugins:
-            self.handle_plugins(plugins)
+        self.tempest_plugins = tempest_plugins
         if services or base_services:
             self.handle_services(base_services, services or {})
         self.handle_localrc(localrc)
+        # Plugins must be the last items in localrc, otherwise
+        # the configuration lines which follows them in the file are
+        # not applied to the plugins (for example, the value of DEST.)
+        if plugins:
+            self.handle_plugins(plugins)
         if localconf:
             self.handle_localconf(localconf)
 
@@ -243,12 +248,19 @@
 
     def handle_localrc(self, localrc):
         lfg = False
+        tp = False
         if localrc:
             vg = VarGraph(localrc)
             for k, v in vg.getVars():
-                self.localrc.append('{}={}'.format(k, v))
+                # Avoid double quoting
+                if len(v) and v[0]=='"':
+                    self.localrc.append('{}={}'.format(k, v))
+                else:
+                    self.localrc.append('{}="{}"'.format(k, v))
                 if k == 'LIBS_FROM_GIT':
                     lfg = True
+                elif k == 'TEMPEST_PLUGINS':
+                    tp = True
 
         if not lfg and (self.projects or self.project):
             required_projects = []
@@ -263,6 +275,19 @@
                 self.localrc.append('LIBS_FROM_GIT={}'.format(
                     ','.join(required_projects)))
 
+        if self.tempest_plugins:
+            if not tp:
+                tp_dirs = []
+                for tempest_plugin in self.tempest_plugins:
+                    tp_dirs.append(os.path.join(self.base_dir, tempest_plugin))
+                self.localrc.append('TEMPEST_PLUGINS="{}"'.format(
+                        ' '.join(tp_dirs)))
+            else:
+                self.warnings.append('TEMPEST_PLUGINS already defined ({}),'
+                                     'requested value {} ignored'.format(
+                                         tp, self.tempest_plugins))
+
+
     def handle_localconf(self, localconf):
         for phase, phase_data in localconf.items():
             for fn, fn_data in phase_data.items():
@@ -297,6 +322,7 @@
             path=dict(type='str'),
             projects=dict(type='dict'),
             project=dict(type='dict'),
+            tempest_plugins=dict(type='list'),
         )
     )
 
@@ -308,10 +334,11 @@
                    p.get('plugins'),
                    p.get('base_dir'),
                    p.get('projects'),
-                   p.get('project'))
+                   p.get('project'),
+                   p.get('tempest_plugins'))
     lc.write(p['path'])
 
-    module.exit_json()
+    module.exit_json(warnings=lc.warnings)
 
 
 try:
diff --git a/roles/write-devstack-local-conf/library/test.py b/roles/write-devstack-local-conf/library/test.py
index 791552d..7c526b3 100644
--- a/roles/write-devstack-local-conf/library/test.py
+++ b/roles/write-devstack-local-conf/library/test.py
@@ -23,6 +23,20 @@
 from collections import OrderedDict
 
 class TestDevstackLocalConf(unittest.TestCase):
+
+    @staticmethod
+    def _init_localconf(p):
+        lc = LocalConf(p.get('localrc'),
+                       p.get('local_conf'),
+                       p.get('base_services'),
+                       p.get('services'),
+                       p.get('plugins'),
+                       p.get('base_dir'),
+                       p.get('projects'),
+                       p.get('project'),
+                       p.get('tempest_plugins'))
+        return lc
+
     def setUp(self):
         self.tmpdir = tempfile.mkdtemp()
 
@@ -40,9 +54,9 @@
         # We use ordereddict here to make sure the plugins are in the
         # *wrong* order for testing.
         plugins = OrderedDict([
-            ('bar', 'git://git.openstack.org/openstack/bar-plugin'),
-            ('foo', 'git://git.openstack.org/openstack/foo-plugin'),
-            ('baz', 'git://git.openstack.org/openstack/baz-plugin'),
+            ('bar', 'https://git.openstack.org/openstack/bar-plugin'),
+            ('foo', 'https://git.openstack.org/openstack/foo-plugin'),
+            ('baz', 'https://git.openstack.org/openstack/baz-plugin'),
             ])
         p = dict(localrc=localrc,
                  local_conf=local_conf,
@@ -51,14 +65,7 @@
                  plugins=plugins,
                  base_dir='./test',
                  path=os.path.join(self.tmpdir, 'test.local.conf'))
-        lc = LocalConf(p.get('localrc'),
-                       p.get('local_conf'),
-                       p.get('base_services'),
-                       p.get('services'),
-                       p.get('plugins'),
-                       p.get('base_dir'),
-                       p.get('projects'),
-                       p.get('project'))
+        lc = self._init_localconf(p)
         lc.write(p['path'])
 
         plugins = []
@@ -78,12 +85,12 @@
         with open(os.path.join(
                 self.tmpdir,
                 'foo-plugin', 'devstack', 'settings'), 'w') as f:
-            f.write('define_plugin foo\n')
+            f.write('define_plugin foo-plugin\n')
         with open(os.path.join(
                 self.tmpdir,
                 'bar-plugin', 'devstack', 'settings'), 'w') as f:
-            f.write('define_plugin bar\n')
-            f.write('plugin_requires bar foo\n')
+            f.write('define_plugin bar-plugin\n')
+            f.write('plugin_requires bar-plugin foo-plugin\n')
 
         localrc = {'test_localrc': '1'}
         local_conf = {'install':
@@ -94,8 +101,8 @@
         # We use ordereddict here to make sure the plugins are in the
         # *wrong* order for testing.
         plugins = OrderedDict([
-            ('bar', 'git://git.openstack.org/openstack/bar-plugin'),
-            ('foo', 'git://git.openstack.org/openstack/foo-plugin'),
+            ('bar-plugin', 'https://git.openstack.org/openstack/bar-plugin'),
+            ('foo-plugin', 'https://git.openstack.org/openstack/foo-plugin'),
             ])
         p = dict(localrc=localrc,
                  local_conf=local_conf,
@@ -104,6 +111,15 @@
                  plugins=plugins,
                  base_dir=self.tmpdir,
                  path=os.path.join(self.tmpdir, 'test.local.conf'))
+        lc = self._init_localconf(p)
+        lc.write(p['path'])
+
+        plugins = []
+        with open(p['path']) as f:
+            for line in f:
+                if line.startswith('enable_plugin'):
+                    plugins.append(line.split()[1])
+        self.assertEqual(['foo-plugin', 'bar-plugin'], plugins)
 
     def test_libs_from_git(self):
         "Test that LIBS_FROM_GIT is auto-generated"
@@ -129,14 +145,7 @@
                  path=os.path.join(self.tmpdir, 'test.local.conf'),
                  projects=projects,
                  project=project)
-        lc = LocalConf(p.get('localrc'),
-                       p.get('local_conf'),
-                       p.get('base_services'),
-                       p.get('services'),
-                       p.get('plugins'),
-                       p.get('base_dir'),
-                       p.get('projects'),
-                       p.get('project'))
+        lc = self._init_localconf(p)
         lc.write(p['path'])
 
         lfg = None
@@ -168,14 +177,7 @@
                  base_dir='./test',
                  path=os.path.join(self.tmpdir, 'test.local.conf'),
                  projects=projects)
-        lc = LocalConf(p.get('localrc'),
-                       p.get('local_conf'),
-                       p.get('base_services'),
-                       p.get('services'),
-                       p.get('plugins'),
-                       p.get('base_dir'),
-                       p.get('projects'),
-                       p.get('project'))
+        lc = self._init_localconf(p)
         lc.write(p['path'])
 
         lfg = None
@@ -183,7 +185,25 @@
             for line in f:
                 if line.startswith('LIBS_FROM_GIT'):
                     lfg = line.strip().split('=')[1]
-        self.assertEqual('oslo.db', lfg)
+        self.assertEqual('"oslo.db"', lfg)
+
+    def test_avoid_double_quote(self):
+        "Test that there a no duplicated quotes"
+        localrc = {'TESTVAR': '"quoted value"'}
+        p = dict(localrc=localrc,
+                 base_services=[],
+                 base_dir='./test',
+                 path=os.path.join(self.tmpdir, 'test.local.conf'),
+                 projects={})
+        lc = self._init_localconf(p)
+        lc.write(p['path'])
+
+        testvar = None
+        with open(p['path']) as f:
+            for line in f:
+                if line.startswith('TESTVAR'):
+                    testvar = line.strip().split('=')[1]
+        self.assertEqual('"quoted value"', testvar)
 
     def test_plugin_circular_deps(self):
         "Test that plugins with circular dependencies fail"
@@ -211,8 +231,8 @@
         # We use ordereddict here to make sure the plugins are in the
         # *wrong* order for testing.
         plugins = OrderedDict([
-            ('bar', 'git://git.openstack.org/openstack/bar-plugin'),
-            ('foo', 'git://git.openstack.org/openstack/foo-plugin'),
+            ('bar', 'https://git.openstack.org/openstack/bar-plugin'),
+            ('foo', 'https://git.openstack.org/openstack/foo-plugin'),
             ])
         p = dict(localrc=localrc,
                  local_conf=local_conf,
@@ -222,14 +242,50 @@
                  base_dir=self.tmpdir,
                  path=os.path.join(self.tmpdir, 'test.local.conf'))
         with self.assertRaises(Exception):
-            lc = LocalConf(p.get('localrc'),
-                           p.get('local_conf'),
-                           p.get('base_services'),
-                           p.get('services'),
-                           p.get('plugins'),
-                           p.get('base_dir'))
+            lc = self._init_localconf(p)
             lc.write(p['path'])
 
+    def _find_tempest_plugins_value(self, file_path):
+        tp = None
+        with open(file_path) as f:
+            for line in f:
+                if line.startswith('TEMPEST_PLUGINS'):
+                    found = line.strip().split('=')[1]
+                    self.assertIsNone(tp,
+                        "TEMPEST_PLUGIN ({}) found again ({})".format(
+                            tp, found))
+                    tp = found
+        return tp
+
+    def test_tempest_plugins(self):
+        "Test that TEMPEST_PLUGINS is correctly populated."
+        p = dict(base_services=[],
+                 base_dir='./test',
+                 path=os.path.join(self.tmpdir, 'test.local.conf'),
+                 tempest_plugins=['heat-tempest-plugin', 'sahara-tests'])
+        lc = self._init_localconf(p)
+        lc.write(p['path'])
+
+        tp = self._find_tempest_plugins_value(p['path'])
+        self.assertEqual('"./test/heat-tempest-plugin ./test/sahara-tests"', tp)
+        self.assertEqual(len(lc.warnings), 0)
+
+    def test_tempest_plugins_not_overridden(self):
+        """Test that the existing value of TEMPEST_PLUGINS is not overridden
+        by the user-provided value, but a warning is emitted."""
+        localrc = {'TEMPEST_PLUGINS': 'someplugin'}
+        p = dict(localrc=localrc,
+                 base_services=[],
+                 base_dir='./test',
+                 path=os.path.join(self.tmpdir, 'test.local.conf'),
+                 tempest_plugins=['heat-tempest-plugin', 'sahara-tests'])
+        lc = self._init_localconf(p)
+        lc.write(p['path'])
+
+        tp = self._find_tempest_plugins_value(p['path'])
+        self.assertEqual('"someplugin"', tp)
+        self.assertEqual(len(lc.warnings), 1)
+
 
 if __name__ == '__main__':
     unittest.main()
diff --git a/roles/write-devstack-local-conf/tasks/main.yaml b/roles/write-devstack-local-conf/tasks/main.yaml
index 9a6b083..bfd0860 100644
--- a/roles/write-devstack-local-conf/tasks/main.yaml
+++ b/roles/write-devstack-local-conf/tasks/main.yaml
@@ -10,4 +10,5 @@
     local_conf: "{{ devstack_local_conf|default(omit) }}"
     base_dir: "{{ devstack_base_dir|default(omit) }}"
     projects: "{{ zuul.projects }}"
-    project: "{{ zuul.project }}"
\ No newline at end of file
+    project: "{{ zuul.project }}"
+    tempest_plugins: "{{ tempest_plugins|default(omit) }}"
diff --git a/samples/local.sh b/samples/local.sh
index 9cd0bdc..a1c5c81 100755
--- a/samples/local.sh
+++ b/samples/local.sh
@@ -41,6 +41,13 @@
         fi
     done
 
+    # Update security default group
+    # -----------------------------
+
+    # Add tcp/22 and icmp to default security group
+    default=$(openstack security group list -f value -c ID)
+    openstack security group rule create $default --protocol tcp --dst-port 22
+    openstack security group rule create $default --protocol icmp
 
     # Create A Flavor
     # ---------------
@@ -57,12 +64,4 @@
         openstack flavor create $MI_NAME --id 6 --ram 128 --disk 0 --vcpus 1
     fi
 
-
-    # Other Uses
-    # ----------
-
-    # Add tcp/22 and icmp to default security group
-    openstack security group rule create --project $OS_PROJECT_NAME default --protocol tcp --ingress --dst-port 22
-    openstack security group rule create --project $OS_PROJECT_NAME default --protocol icmp
-
 fi
diff --git a/setup.cfg b/setup.cfg
index fcd2b13..4e27ad8 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -4,21 +4,12 @@
 description-file =
     README.rst
 author = OpenStack
-author-email = openstack-dev@lists.openstack.org
+author-email = openstack-discuss@lists.openstack.org
 home-page = https://docs.openstack.org/devstack/latest
 classifier =
     Intended Audience :: Developers
     License :: OSI Approved :: Apache Software License
     Operating System :: POSIX :: Linux
 
-[build_sphinx]
-all_files = 1
-build-dir = doc/build
-source-dir = doc/source
-warning-is-error = 1
-
-[pbr]
-warnerrors = True
-
 [wheel]
 universal = 1
diff --git a/stack.sh b/stack.sh
index be3c4be..089510f 100755
--- a/stack.sh
+++ b/stack.sh
@@ -12,7 +12,7 @@
 # a multi-node developer install.
 
 # To keep this script simple we assume you are running on a recent **Ubuntu**
-# (16.04 Xenial or newer), **Fedora** (F24 or newer), or **CentOS/RHEL**
+# (Bionic or newer), **Fedora** (F24 or newer), or **CentOS/RHEL**
 # (7 or newer) machine. (It may work on other platforms but support for those
 # platforms is left to those who added them to DevStack.) It should work in
 # a VM or physical server. Additionally, we maintain a list of ``deb`` and
@@ -60,6 +60,9 @@
 LC_ALL=en_US.utf8
 export LC_ALL
 
+# Clear all OpenStack related envvars
+unset `env | grep -E '^OS_' | cut -d = -f 1`
+
 # Make sure umask is sane
 umask 022
 
@@ -164,9 +167,6 @@
 # Import common functions
 source $TOP_DIR/functions
 
-# Import config functions
-source $TOP_DIR/inc/meta-config
-
 # Import 'public' stack.sh functions
 source $TOP_DIR/lib/stack
 
@@ -221,7 +221,7 @@
 
 # Warn users who aren't on an explicitly supported distro, but allow them to
 # override check and attempt installation with ``FORCE=yes ./stack``
-if [[ ! ${DISTRO} =~ (xenial|artful|bionic|stretch|jessie|f27|f28|opensuse-42.3|opensuse-15.0|opensuse-tumbleweed|rhel7) ]]; then
+if [[ ! ${DISTRO} =~ (bionic|stretch|jessie|f29|opensuse-15.0|opensuse-15.1|opensuse-tumbleweed|rhel7) ]]; then
     echo "WARNING: this script has not been tested on $DISTRO"
     if [[ "$FORCE" != "yes" ]]; then
         die $LINENO "If you wish to run this script anyway run with FORCE=yes"
@@ -244,7 +244,7 @@
 # --------------
 
 # We're not as **root** so make sure ``sudo`` is available
-is_package_installed sudo || install_package sudo
+is_package_installed sudo || is_package_installed sudo-ldap || install_package sudo
 
 # UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one
 sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers ||
@@ -328,13 +328,26 @@
     # Per the point above, it's a bunch of repos so starts getting a
     # little messy...
     if ! is_package_installed rdo-release ; then
-        yum_install https://rdoproject.org/repos/rdo-release.rpm
+        if [[ "$TARGET_BRANCH" == "master" ]]; then
+            yum_install https://rdoproject.org/repos/rdo-release.rpm
+        else
+            # Get latest rdo-release-$rdo_release RPM package version
+            rdo_release=$(echo $TARGET_BRANCH | sed "s|stable/||g")
+            yum_install https://rdoproject.org/repos/openstack-$rdo_release/rdo-release-$rdo_release.rpm
+        fi
     fi
 
     # Also enable optional for RHEL7 proper.  Note this is a silent
     # no-op on other platforms.
     sudo yum-config-manager --enable rhel-7-server-optional-rpms
 
+    # Enable the Software Collections (SCL) repository for CentOS.
+    # This repository includes useful software (e.g. the Go Toolset)
+    # which is not present in the main repository.
+    if [[ "$os_VENDOR" =~ (CentOS) ]]; then
+        yum_install centos-release-scl
+    fi
+
     if is_oraclelinux; then
         sudo yum-config-manager --enable ol7_optional_latest ol7_addons ol7_MySQL56
     fi
@@ -349,9 +362,12 @@
 
 # Create the destination directory and ensure it is writable by the user
 # and read/executable by everybody for daemons (e.g. apache run for horizon)
-sudo mkdir -p $DEST
-safe_chown -R $STACK_USER $DEST
-safe_chmod 0755 $DEST
+# If directory exists do not modify the permissions.
+if [[ ! -d $DEST ]]; then
+    sudo mkdir -p $DEST
+    safe_chown -R $STACK_USER $DEST
+    safe_chmod 0755 $DEST
+fi
 
 # Destination path for devstack logs
 if [[ -n ${LOGDIR:-} ]]; then
@@ -360,9 +376,11 @@
 
 # Destination path for service data
 DATA_DIR=${DATA_DIR:-${DEST}/data}
-sudo mkdir -p $DATA_DIR
-safe_chown -R $STACK_USER $DATA_DIR
-safe_chmod 0755 $DATA_DIR
+if [[ ! -d $DATA_DIR ]]; then
+    sudo mkdir -p $DATA_DIR
+    safe_chown -R $STACK_USER $DATA_DIR
+    safe_chmod 0755 $DATA_DIR
+fi
 
 # Configure proper hostname
 # Certain services such as rabbitmq require that the local hostname resolves
@@ -607,6 +625,7 @@
 source $TOP_DIR/lib/neutron
 source $TOP_DIR/lib/ldap
 source $TOP_DIR/lib/dstat
+source $TOP_DIR/lib/tcpdump
 source $TOP_DIR/lib/etcd3
 
 # Extras Source
@@ -684,7 +703,14 @@
 # The available database backends are listed in ``DATABASE_BACKENDS`` after
 # ``lib/database`` is sourced. ``mysql`` is the default.
 
-initialize_database_backends && echo "Using $DATABASE_TYPE database backend" || echo "No database enabled"
+if initialize_database_backends; then
+    echo "Using $DATABASE_TYPE database backend"
+    # Last chance for the database password. This must be handled here
+    # because read_password is not a library function.
+    read_password DATABASE_PASSWORD "ENTER A PASSWORD TO USE FOR THE DATABASE."
+else
+    echo "No database enabled"
+fi
 
 
 # Queue Configuration
@@ -722,6 +748,16 @@
 fi
 
 
+# Nova
+# -----
+
+if is_service_enabled nova && [[ "$VIRT_DRIVER" == 'xenserver' ]]; then
+    # Look for the backend password here because read_password
+    # is not a library function.
+    read_password XENAPI_PASSWORD "ENTER A PASSWORD TO USE FOR XEN."
+fi
+
+
 # Swift
 # -----
 
@@ -760,26 +796,13 @@
     PYPI_ALTERNATIVE_URL=${PYPI_ALTERNATIVE_URL:-""} $TOP_DIR/tools/install_pip.sh
 fi
 
-# Install subunit for the subunit output stream
-pip_install -U os-testr
-
-TRACK_DEPENDS=${TRACK_DEPENDS:-False}
-
-# Install Python packages into a virtualenv so that we can track them
-if [[ $TRACK_DEPENDS = True ]]; then
-    echo_summary "Installing Python packages into a virtualenv $DEST/.venv"
-    pip_install -U virtualenv
-
-    rm -rf $DEST/.venv
-    virtualenv --system-site-packages $DEST/.venv
-    source $DEST/.venv/bin/activate
-    $DEST/.venv/bin/pip freeze > $DEST/requires-pre-pip
-fi
-
 # Do the ugly hacks for broken packages and distros
 source $TOP_DIR/tools/fixup_stuff.sh
 fixup_all
 
+# Install subunit for the subunit output stream
+pip_install -U os-testr
+
 if [[ "$USE_SYSTEMD" == "True" ]]; then
     pip_install_gr systemd-python
     # the default rate limit of 1000 messages / 30 seconds is not
@@ -794,6 +817,18 @@
 # Install required infra support libraries
 install_infra
 
+# Install bindep
+$VIRTUALENV_CMD $DEST/bindep-venv
+# TODO(ianw) : optionally install from zuul checkout?
+$DEST/bindep-venv/bin/pip install bindep
+export BINDEP_CMD=${DEST}/bindep-venv/bin/bindep
+
+# Install packages as defined in plugin bindep.txt files
+pkgs="$( _get_plugin_bindep_packages )"
+if [[ -n "${pkgs}" ]]; then
+    install_package ${pkgs}
+fi
+
 # Extras Pre-install
 # ------------------
 # Phase: pre-install
@@ -820,6 +855,18 @@
     install_etcd3
 fi
 
+# Setup TLS certs
+# ---------------
+
+# Do this early, before any webservers are set up to ensure
+# we don't run into problems with missing certs when apache
+# is restarted.
+if is_service_enabled tls-proxy; then
+    configure_CA
+    init_CA
+    init_cert
+fi
+
 # Check Out and Install Source
 # ----------------------------
 
@@ -844,13 +891,6 @@
     install_neutronclient
 fi
 
-# Setup TLS certs
-if is_service_enabled tls-proxy; then
-    configure_CA
-    init_CA
-    init_cert
-fi
-
 # Install middleware
 install_keystonemiddleware
 
@@ -868,12 +908,10 @@
     stack_install_service swift
     configure_swift
 
-    # swift3 middleware to provide S3 emulation to Swift
-    if is_service_enabled swift3; then
+    # s3api middleware to provide S3 emulation to Swift
+    if is_service_enabled s3api; then
         # Replace the nova-objectstore port by the swift port
         S3_SERVICE_PORT=8080
-        git_clone $SWIFT3_REPO $SWIFT3_DIR $SWIFT3_BRANCH
-        setup_develop $SWIFT3_DIR
     fi
 fi
 
@@ -894,8 +932,6 @@
     stack_install_service neutron
 fi
 
-# Nova configuration is used by placement so we need to create nova.conf
-# first.
 if is_service_enabled nova; then
     # Compute service
     stack_install_service nova
@@ -948,17 +984,6 @@
 # osc commands. Alias dies with stack.sh.
 install_oscwrap
 
-if [[ $TRACK_DEPENDS = True ]]; then
-    $DEST/.venv/bin/pip freeze > $DEST/requires-post-pip
-    if ! diff -Nru $DEST/requires-pre-pip $DEST/requires-post-pip > $DEST/requires.diff; then
-        echo "Detect some changes for installed packages of pip, in depend tracking mode"
-        cat $DEST/requires.diff
-    fi
-    echo "Ran stack.sh in depend tracking mode, bailing out now"
-    exit 0
-fi
-
-
 # Syslog
 # ------
 
@@ -1043,6 +1068,12 @@
 # A better kind of sysstat, with the top process per time slice
 start_dstat
 
+# Run a background tcpdump for debugging
+# Note: must set TCPDUMP_ARGS with the enabled service
+if is_service_enabled tcpdump; then
+    start_tcpdump
+fi
+
 # Etcd
 # -----
 
@@ -1144,10 +1175,11 @@
     fi
 fi
 
+
 # Nova
 # ----
 
-if is_service_enabled n-net q-dhcp; then
+if is_service_enabled q-dhcp; then
     # Delete traces of nova networks from prior runs
     # Do not kill any dnsmasq instance spawned by NetworkManager
     netman_pid=$(pidof NetworkManager || true)
@@ -1159,12 +1191,6 @@
 
     clean_iptables
 
-    if is_service_enabled n-net; then
-        rm -rf ${NOVA_STATE_PATH}/networks
-        sudo mkdir -p ${NOVA_STATE_PATH}/networks
-        safe_chown -R ${STACK_USER} ${NOVA_STATE_PATH}/networks
-    fi
-
     # Force IP forwarding on, just in case
     sudo sysctl -w net.ipv4.ip_forward=1
 fi
@@ -1203,13 +1229,11 @@
     init_nova
 
     # Additional Nova configuration that is dependent on other services
+    # TODO(stephenfin): Is it possible for neutron to *not* be enabled now? If
+    # not, remove the if here
     if is_service_enabled neutron; then
         configure_neutron_nova
-    elif is_service_enabled n-net; then
-        create_nova_conf_nova_network
     fi
-
-    init_nova_cells
 fi
 
 
@@ -1291,20 +1315,6 @@
     echo_summary "Starting Neutron"
     configure_neutron_after_post_config
     start_neutron_service_and_check
-elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then
-    NM_CONF=${NOVA_CONF}
-    if is_service_enabled n-cell; then
-        NM_CONF=${NOVA_CELLS_CONF}
-    fi
-
-    # Create a small network
-    $NOVA_BIN_DIR/nova-manage --config-file $NM_CONF network create "$PRIVATE_NETWORK_NAME" $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS
-
-    # Create some floating ips
-    $NOVA_BIN_DIR/nova-manage --config-file $NM_CONF floating create $FLOATING_RANGE --pool=$PUBLIC_NETWORK_NAME
-
-    # Create a second pool
-    $NOVA_BIN_DIR/nova-manage --config-file $NM_CONF floating create --ip_range=$TEST_FLOATING_RANGE --pool=$TEST_FLOATING_POOL
 fi
 
 # Start placement before any of the service that are likely to want
@@ -1417,6 +1427,12 @@
         # environment is up.
         echo_summary "SKIPPING Cell setup because n-cpu is not enabled. You will have to do this manually before you have a working environment."
     fi
+    # Run the nova-status upgrade check command which can also be used
+    # to verify the base install. Note that this is good enough in a
+    # single node deployment, but in a multi-node setup it won't verify
+    # any subnodes - that would have to be driven from whatever tooling
+    # is deploying the subnodes, e.g. the zuul v3 devstack-multinode job.
+    $NOVA_BIN_DIR/nova-status --config-file $NOVA_CONF upgrade check
 fi
 
 # Run local script
@@ -1432,7 +1448,10 @@
 # ===============
 
 # Prepare bash completion for OSC
-openstack complete | sudo tee /etc/bash_completion.d/osc.bash_completion > /dev/null
+# Note we use "command" to avoid the timing wrapper
+# which isn't relevant here and floods logs
+command openstack complete \
+    | sudo tee /etc/bash_completion.d/osc.bash_completion > /dev/null
 
 # If cinder is configured, set global_filter for PV devices
 if is_service_enabled cinder; then
diff --git a/stackrc b/stackrc
index 34bd677..2d3a599 100644
--- a/stackrc
+++ b/stackrc
@@ -65,7 +65,7 @@
     # Keystone - nothing works without keystone
     ENABLED_SERVICES=key
     # Nova - services to support libvirt based openstack clouds
-    ENABLED_SERVICES+=,n-api,n-cpu,n-cond,n-sch,n-novnc,n-cauth,n-api-meta
+    ENABLED_SERVICES+=,n-api,n-cpu,n-cond,n-sch,n-novnc,n-api-meta
     # Placement service needed for Nova
     ENABLED_SERVICES+=,placement-api,placement-client
     # Glance services needed for Nova
@@ -89,6 +89,15 @@
 # Set the default Nova APIs to enable
 NOVA_ENABLED_APIS=osapi_compute,metadata
 
+# allow local overrides of env variables, including repo config
+if [[ -f $RC_DIR/localrc ]]; then
+    # Old-style user-supplied config
+    source $RC_DIR/localrc
+elif [[ -f $RC_DIR/.localrc.auto ]]; then
+    # New-style user-supplied config extracted from local.conf
+    source $RC_DIR/.localrc.auto
+fi
+
 # CELLSV2_SETUP - how we should configure services with cells v2
 #
 # - superconductor - this is one conductor for the api services, and
@@ -127,13 +136,7 @@
 fi
 
 # Control whether Python 3 should be used at all.
-export USE_PYTHON3=$(trueorfalse False USE_PYTHON3)
-
-# Control whether Python 3 is enabled for specific services by the
-# base name of the directory from which they are installed. See
-# enable_python3_package to edit this variable and use_python3_for to
-# test membership.
-export ENABLED_PYTHON3_PACKAGES="nova,glance,cinder,uwsgi,python-openstackclient,openstacksdk"
+export USE_PYTHON3=$(trueorfalse True USE_PYTHON3)
 
 # Explicitly list services not to run under Python 3. See
 # disable_python3_package to edit this variable.
@@ -149,13 +152,11 @@
 _DEFAULT_PYTHON2_VERSION="$(_get_python_version python2)"
 export PYTHON2_VERSION=${PYTHON2_VERSION:-${_DEFAULT_PYTHON2_VERSION:-2.7}}
 
-# allow local overrides of env variables, including repo config
-if [[ -f $RC_DIR/localrc ]]; then
-    # Old-style user-supplied config
-    source $RC_DIR/localrc
-elif [[ -f $RC_DIR/.localrc.auto ]]; then
-    # New-style user-supplied config extracted from local.conf
-    source $RC_DIR/.localrc.auto
+# Create a virtualenv with this
+if [[ ${USE_PYTHON3} == True ]]; then
+    export VIRTUALENV_CMD="virtualenv -p python3"
+else
+    export VIRTUALENV_CMD="virtualenv "
 fi
 
 # Default for log coloring is based on interactive-or-not.
@@ -236,11 +237,10 @@
 # ------------
 
 # Base GIT Repo URL
-# Another option is https://git.openstack.org
-GIT_BASE=${GIT_BASE:-git://git.openstack.org}
+GIT_BASE=${GIT_BASE:-https://opendev.org}
 
 # The location of REQUIREMENTS once cloned
-REQUIREMENTS_DIR=$DEST/requirements
+REQUIREMENTS_DIR=${REQUIREMENTS_DIR:-$DEST/requirements}
 
 # Which libraries should we install from git instead of using released
 # versions on pypi?
@@ -258,7 +258,7 @@
 # Setting the variable to 'ALL' will activate the download for all
 # libraries.
 
-DEVSTACK_SERIES="stein"
+DEVSTACK_SERIES="ussuri"
 
 ##############
 #
@@ -298,6 +298,10 @@
 SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git}
 SWIFT_BRANCH=${SWIFT_BRANCH:-$TARGET_BRANCH}
 
+# placement service
+PLACEMENT_REPO=${PLACEMENT_REPO:-${GIT_BASE}/openstack/placement.git}
+PLACEMENT_BRANCH=${PLACEMENT_BRANCH:-$TARGET_BRANCH}
+
 ##############
 #
 #  Testing Components
@@ -494,8 +498,8 @@
 GITBRANCH["tooz"]=${TOOZ_BRANCH:-$TARGET_BRANCH}
 
 # pbr drives the setuptools configs
-GITREPO["pbr"]=${PBR_REPO:-${GIT_BASE}/openstack-dev/pbr.git}
-GITBRANCH["pbr"]=${PBR_BRANCH:-$TARGET_BRANCH}
+GITREPO["pbr"]=${PBR_REPO:-${GIT_BASE}/openstack/pbr.git}
+GITBRANCH["pbr"]=${PBR_BRANCH:-$BRANCHLESS_TARGET_BRANCH}
 
 
 ##################
@@ -516,10 +520,6 @@
 GITREPO["keystonemiddleware"]=${KEYSTONEMIDDLEWARE_REPO:-${GIT_BASE}/openstack/keystonemiddleware.git}
 GITBRANCH["keystonemiddleware"]=${KEYSTONEMIDDLEWARE_BRANCH:-$TARGET_BRANCH}
 
-# s3 support for swift
-SWIFT3_REPO=${SWIFT3_REPO:-${GIT_BASE}/openstack/swift3.git}
-SWIFT3_BRANCH=${SWIFT3_BRANCH:-$TARGET_BRANCH}
-
 # ceilometer middleware
 GITREPO["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_REPO:-${GIT_BASE}/openstack/ceilometermiddleware.git}
 GITBRANCH["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_BRANCH:-$TARGET_BRANCH}
@@ -554,7 +554,7 @@
 
 # diskimage-builder tool
 GITREPO["diskimage-builder"]=${DIB_REPO:-${GIT_BASE}/openstack/diskimage-builder.git}
-GITBRANCH["diskimage-builder"]=${DIB_BRANCH:-$TARGET_BRANCH}
+GITBRANCH["diskimage-builder"]=${DIB_BRANCH:-$BRANCHLESS_TARGET_BRANCH}
 GITDIR["diskimage-builder"]=$DEST/diskimage-builder
 
 # neutron-lib library containing neutron stable non-REST interfaces
@@ -603,7 +603,7 @@
 
 # a websockets/html5 or flash powered VNC console for vm instances
 NOVNC_REPO=${NOVNC_REPO:-https://github.com/novnc/noVNC.git}
-NOVNC_BRANCH=${NOVNC_BRANCH:-stable/v0.6}
+NOVNC_BRANCH=${NOVNC_BRANCH:-v1.0.0}
 
 # a websockets/html5 or flash powered SPICE console for vm instances
 SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.git}
@@ -656,9 +656,6 @@
         ;;
 esac
 
-# By default, devstack will use Ubuntu Cloud Archive.
-ENABLE_UBUNTU_CLOUD_ARCHIVE=$(trueorfalse True ENABLE_UBUNTU_CLOUD_ARCHIVE)
-
 # Images
 # ------
 
@@ -681,7 +678,7 @@
 #IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image
 #IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image
 
-CIRROS_VERSION=${CIRROS_VERSION:-"0.3.5"}
+CIRROS_VERSION=${CIRROS_VERSION:-"0.4.0"}
 CIRROS_ARCH=${CIRROS_ARCH:-"x86_64"}
 
 # Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of
@@ -732,10 +729,10 @@
 EXTRA_CACHE_URLS=""
 
 # etcd3 defaults
-ETCD_VERSION=${ETCD_VERSION:-v3.2.17}
-ETCD_SHA256_AMD64=${ETCD_SHA256_AMD64:-"0a75e794502e2e76417b19da2807a9915fa58dcbf0985e397741d570f4f305cd"}
-ETCD_SHA256_ARM64=${ETCD_SHA256_ARM64:-"0ab4621c44c79d17d94e43bd184d0f23b763a3669056ce4ae2d0b2942410a98f"}
-ETCD_SHA256_PPC64=${ETCD_SHA256_PPC64:-"69e1279c4a2a52256b78d2a8dd23346ac46b836e678b971a459f2afaef3c275e"}
+ETCD_VERSION=${ETCD_VERSION:-v3.3.12}
+ETCD_SHA256_AMD64=${ETCD_SHA256_AMD64:-"dc5d82df095dae0a2970e4d870b6929590689dd707ae3d33e7b86da0f7f211b6"}
+ETCD_SHA256_ARM64=${ETCD_SHA256_ARM64:-"170b848ac1a071fe7d495d404a868a2c0090750b2944f8a260ef1c6125b2b4f4"}
+ETCD_SHA256_PPC64=${ETCD_SHA256_PPC64:-"77f807b1b51abbf51e020bb05bdb8ce088cb58260fcd22749ea32eee710463d3"}
 # etcd v3.2.x doesn't have anything for s390x
 ETCD_SHA256_S390X=${ETCD_SHA256_S390X:-""}
 # Make sure etcd3 downloads the correct architecture
@@ -763,13 +760,17 @@
 fi
 ETCD_PORT=${ETCD_PORT:-2379}
 ETCD_PEER_PORT=${ETCD_PEER_PORT:-2380}
-ETCD_DOWNLOAD_URL=${ETCD_DOWNLOAD_URL:-https://github.com/coreos/etcd/releases/download}
+ETCD_DOWNLOAD_URL=${ETCD_DOWNLOAD_URL:-https://github.com/etcd-io/etcd/releases/download}
 ETCD_NAME=etcd-$ETCD_VERSION-linux-$ETCD_ARCH
 ETCD_DOWNLOAD_FILE=$ETCD_NAME.tar.gz
 ETCD_DOWNLOAD_LOCATION=$ETCD_DOWNLOAD_URL/$ETCD_VERSION/$ETCD_DOWNLOAD_FILE
 # etcd is always required, so place it into list of pre-cached downloads
 EXTRA_CACHE_URLS+=",$ETCD_DOWNLOAD_LOCATION"
 
+# Cache settings
+CACHE_BACKEND=${CACHE_BACKEND:-"dogpile.cache.memcached"}
+MEMCACHE_SERVERS=${MEMCACHE_SERVERS:-"localhost:11211"}
+
 # Detect duplicate values in IMAGE_URLS
 for image_url in ${IMAGE_URLS//,/ }; do
     if [ $(echo "$IMAGE_URLS" | grep -o -F "$image_url" | wc -l) -gt 1 ]; then
@@ -846,7 +847,6 @@
 FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24}
 IPV4_ADDRS_SAFE_TO_USE=${IPV4_ADDRS_SAFE_TO_USE:-10.0.0.0/22}
 FIXED_RANGE=${FIXED_RANGE:-$IPV4_ADDRS_SAFE_TO_USE}
-FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256}
 HOST_IP_IFACE=${HOST_IP_IFACE:-}
 HOST_IP=${HOST_IP:-}
 HOST_IPV6=${HOST_IPV6:-}
diff --git a/tests/test_functions.sh b/tests/test_functions.sh
index adf20cd..08143d2 100755
--- a/tests/test_functions.sh
+++ b/tests/test_functions.sh
@@ -272,7 +272,7 @@
 
     export_proxy_variables
     expected=$(echo -e "http_proxy=$http_proxy\nhttps_proxy=$https_proxy\nno_proxy=$no_proxy")
-    results=$(env | egrep '(http(s)?|no)_proxy=')
+    results=$(env | egrep '(http(s)?|no)_proxy=' | sort)
     if [[ $expected = $results ]]; then
         passed "OK: Proxy variables are exported when proxy variables are set"
     else
diff --git a/tests/test_ini_config.sh b/tests/test_ini_config.sh
index f7dc89a..6ed1647 100755
--- a/tests/test_ini_config.sh
+++ b/tests/test_ini_config.sh
@@ -125,14 +125,14 @@
 assert_equal "$VAL" "33,44" "inset at EOF"
 
 # test empty option
-if ini_has_option ${TEST_INI} ddd empty; then
+if ini_has_option ${SUDO_ARG} ${TEST_INI} ddd empty; then
     passed "ini_has_option: ddd.empty present"
 else
     failed "ini_has_option failed: ddd.empty not found"
 fi
 
 # test non-empty option
-if ini_has_option ${TEST_INI} bbb handlers; then
+if ini_has_option ${SUDO_ARG} ${TEST_INI} bbb handlers; then
     passed "ini_has_option: bbb.handlers present"
 else
     failed "ini_has_option failed: bbb.handlers not found"
diff --git a/tests/test_python.sh b/tests/test_python.sh
index 8652798..1f5453c 100755
--- a/tests/test_python.sh
+++ b/tests/test_python.sh
@@ -12,14 +12,9 @@
 echo "Testing Python 3 functions"
 
 # Initialize variables manipulated by functions under test.
-export ENABLED_PYTHON3_PACKAGES=""
 export DISABLED_PYTHON3_PACKAGES=""
 
-assert_false "should not be enabled yet" python3_enabled_for testpackage1
-
-enable_python3_package testpackage1
-assert_equal "$ENABLED_PYTHON3_PACKAGES" "testpackage1"  "unexpected result"
-assert_true "should be enabled" python3_enabled_for testpackage1
+assert_true "should be enabled by default" python3_enabled_for testpackage1
 
 assert_false "should not be disabled yet" python3_disabled_for testpackage2
 
diff --git a/tools/create-stack-user.sh b/tools/create-stack-user.sh
index c0b7ac7..919cacb 100755
--- a/tools/create-stack-user.sh
+++ b/tools/create-stack-user.sh
@@ -32,7 +32,7 @@
 source $TOP_DIR/stackrc
 
 # Give the non-root user the ability to run as **root** via ``sudo``
-is_package_installed sudo || install_package sudo
+is_package_installed sudo || is_package_installed sudo-ldap || install_package sudo
 
 [[ -z "$STACK_USER" ]] && die "STACK_USER is not set. Exiting."
 
diff --git a/tools/dstat.sh b/tools/dstat.sh
index 01c6d9b..e6cbb0f 100755
--- a/tools/dstat.sh
+++ b/tools/dstat.sh
@@ -12,8 +12,17 @@
 # Retrieve log directory as argument from calling script.
 LOGDIR=$1
 
+DSTAT_TOP_OPTS="--top-cpu-adv --top-io-adv --top-mem"
+if dstat --version | grep -q 'pcp-dstat' ; then
+    # dstat is unmaintained, and moving to a plugin of performance
+    # co-pilot.  Fedora 29 for example has rolled this out.  It's
+    # mostly compatible, except for a few options which are not
+    # implemented (yet?)
+    DSTAT_TOP_OPTS=""
+fi
+
 # Command line arguments for primary DStat process.
-DSTAT_OPTS="-tcmndrylpg --top-cpu-adv --top-io-adv --top-mem --swap --tcp"
+DSTAT_OPTS="-tcmndrylpg ${DSTAT_TOP_OPTS} --swap --tcp"
 
 # Command-line arguments for secondary background DStat process.
 DSTAT_CSV_OPTS="-tcmndrylpg --tcp --output $LOGDIR/dstat-csv.log"
diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh
index a939e30..eb8a76f 100755
--- a/tools/fixup_stuff.sh
+++ b/tools/fixup_stuff.sh
@@ -69,42 +69,19 @@
     fi
 }
 
-# Ubuntu Cloud Archive
-#---------------------
-# We've found that Libvirt on Xenial is flaky and crashes enough to be
-# a regular top e-r bug. Opt into Ubuntu Cloud Archive if on Xenial to
-# get newer Libvirt.
-# Make it possible to switch this based on an environment variable as
-# libvirt 2.5.0 doesn't handle nested virtualization quite well and this
-# is required for the trove development environment.
-function fixup_uca {
-    if [[ "${ENABLE_UBUNTU_CLOUD_ARCHIVE}" == "False" || "$DISTRO" != "xenial" ]]; then
+# Ubuntu Repositories
+#--------------------
+# Enable universe for bionic since it is missing when installing from ISO.
+function fixup_ubuntu {
+    if [[ "$DISTRO" != "bionic" ]]; then
         return
     fi
 
     # This pulls in apt-add-repository
     install_package "software-properties-common"
-    # Use UCA for newer libvirt.
-    if [[ -f /etc/ci/mirror_info.sh ]] ; then
-        # If we are on a nodepool provided host and it has told us about where
-        # we can find local mirrors then use that mirror.
-        source /etc/ci/mirror_info.sh
 
-        sudo apt-add-repository -y "deb $NODEPOOL_UCA_MIRROR xenial-updates/queens main"
-    else
-        # Otherwise use upstream UCA
-        sudo add-apt-repository -y cloud-archive:queens
-    fi
-
-    # Disable use of libvirt wheel since a cached wheel build might be
-    # against older libvirt binary.  Particularly a problem if using
-    # the openstack wheel mirrors, but can hit locally too.
-    # TODO(clarkb) figure out how to use upstream wheel again.
-    iniset -sudo /etc/pip.conf "global" "no-binary" "libvirt-python"
-
-    # Force update our APT repos, since we added UCA above.
-    REPOS_UPDATED=False
-    apt_get_update
+    # Enable universe
+    sudo add-apt-repository -y universe
 }
 
 # Python Packages
@@ -202,7 +179,19 @@
             # install requests with the bundled urllib3 to avoid conflicts
             pip_install --upgrade --force-reinstall requests
         fi
+
     fi
+
+    # Since pip10, pip will refuse to uninstall files from packages
+    # that were created with distutils (rather than more modern
+    # setuptools).  This is because it technically doesn't have a
+    # manifest of what to remove.  However, in most cases, simply
+    # overwriting works.  So this hacks around those packages that
+    # have been dragged in by some other system dependency
+    sudo rm -rf /usr/lib/python2.7/site-packages/enum34*.egg-info
+    sudo rm -rf /usr/lib/python2.7/site-packages/ipaddress*.egg-info
+    sudo rm -rf /usr/lib/python2.7/site-packages/ply-*.egg-info
+    sudo rm -rf /usr/lib/python2.7/site-packages/typing-*.egg-info
 }
 
 function fixup_suse {
@@ -210,12 +199,33 @@
         return
     fi
 
-    # Disable apparmor profiles in openSUSE distros
-    # to avoid issues with haproxy and dnsmasq
-    if [ -x /usr/sbin/aa-enabled ] && sudo /usr/sbin/aa-enabled -q; then
-        sudo systemctl disable apparmor
+    # Deactivate and disable apparmor profiles in openSUSE and SLE
+    # distros to avoid issues with haproxy and dnsmasq.  In newer
+    # releases, systemctl stop apparmor is actually a no-op, so we
+    # have to use aa-teardown to make sure we've deactivated the
+    # profiles:
+    #
+    # https://www.suse.com/releasenotes/x86_64/SUSE-SLES/15/#fate-325343
+    # https://gitlab.com/apparmor/apparmor/merge_requests/81
+    # https://build.opensuse.org/package/view_file/openSUSE:Leap:15.2/apparmor/apparmor.service?expand=1
+    if sudo systemctl is-active -q apparmor; then
+        sudo systemctl stop apparmor
+    fi
+    if [ -x /usr/sbin/aa-teardown ]; then
         sudo /usr/sbin/aa-teardown
     fi
+    if sudo systemctl is-enabled -q apparmor; then
+        sudo systemctl disable apparmor
+    fi
+
+    # Since pip10, pip will refuse to uninstall files from packages
+    # that were created with distutils (rather than more modern
+    # setuptools).  This is because it technically doesn't have a
+    # manifest of what to remove.  However, in most cases, simply
+    # overwriting works.  So this hacks around those packages that
+    # have been dragged in by some other system dependency
+    sudo rm -rf /usr/lib/python3.6/site-packages/ply-*.egg-info
+    sudo rm -rf /usr/lib/python3.6/site-packages/six-*.egg-info
 }
 
 # The version of pip(1.5.4) supported by python-virtualenv(1.11.4) has
@@ -235,7 +245,7 @@
 # looking for the mirror config script before doing this, and just
 # skip it if so.
 
-# [1] https://git.openstack.org/cgit/openstack/diskimage-builder/tree/ \
+# [1] https://opendev.org/openstack/diskimage-builder/src/branch/master/ \
 #        diskimage_builder/elements/pip-and-virtualenv/ \
 #            install.d/pip-and-virtualenv-source-install/04-install-pip
 # [2] https://bugzilla.redhat.com/show_bug.cgi?id=1477823
@@ -249,7 +259,7 @@
 
 function fixup_all {
     fixup_keystone
-    fixup_uca
+    fixup_ubuntu
     fixup_python_packages
     fixup_fedora
     fixup_suse
diff --git a/tools/generate-devstack-plugins-list.py b/tools/generate-devstack-plugins-list.py
index 56f12e7..d39b801 100644
--- a/tools/generate-devstack-plugins-list.py
+++ b/tools/generate-devstack-plugins-list.py
@@ -19,17 +19,21 @@
 #
 # In order to function correctly, the environment in which the
 # script runs must have
-#   * network access to the review.openstack.org Gerrit API
+#   * network access to the review.opendev.org Gerrit API
 #     working directory
-#   * network access to https://git.openstack.org/cgit
+#   * network access to https://opendev.org/
 
+import functools
 import logging
 import json
 import requests
 
+from requests.adapters import HTTPAdapter
+from requests.packages.urllib3.util.retry import Retry
+
 logging.basicConfig(level=logging.DEBUG)
 
-url = 'https://review.openstack.org/projects/'
+url = 'https://review.opendev.org/projects/'
 
 # This is what a project looks like
 '''
@@ -39,26 +43,36 @@
   },
 '''
 
-def is_in_openstack_namespace(proj):
-    # only interested in openstack namespace (e.g. not retired
+def is_in_wanted_namespace(proj):
+    # only interested in openstack or x namespace (e.g. not retired
     # stackforge, etc)
-    return proj.startswith('openstack/')
+    if proj.startswith('stackforge/') or \
+       proj.startswith('stackforge-attic/'):
+        return False
+    else:
+        return True
 
 # Check if this project has a plugin file
-def has_devstack_plugin(proj):
+def has_devstack_plugin(session, proj):
     # Don't link in the deb packaging repos
     if "openstack/deb-" in proj:
         return False
-    r = requests.get("https://git.openstack.org/cgit/%s/plain/devstack/plugin.sh" % proj)
+    r = session.get("https://opendev.org/%s/raw/branch/master/devstack/plugin.sh" % proj)
     return r.status_code == 200
 
 logging.debug("Getting project list from %s" % url)
 r = requests.get(url)
-projects = sorted(filter(is_in_openstack_namespace, json.loads(r.text[4:])))
+projects = sorted(filter(is_in_wanted_namespace, json.loads(r.text[4:])))
 logging.debug("Found %d projects" % len(projects))
 
-found_plugins = filter(has_devstack_plugin, projects)
+s = requests.Session()
+# sometimes gitea gives us a 500 error; retry sanely
+#  https://stackoverflow.com/a/35636367
+retries = Retry(total=3, backoff_factor=1,
+                status_forcelist=[ 500 ])
+s.mount('https://', HTTPAdapter(max_retries=retries))
+
+found_plugins = filter(functools.partial(has_devstack_plugin, s), projects)
 
 for project in found_plugins:
-    # strip of openstack/
-    print(project[10:])
+    print(project)
diff --git a/tools/generate-devstack-plugins-list.sh b/tools/generate-devstack-plugins-list.sh
index 95f1331..a3aa7ba 100755
--- a/tools/generate-devstack-plugins-list.sh
+++ b/tools/generate-devstack-plugins-list.sh
@@ -28,9 +28,9 @@
 #   * the environment variable git_dir pointing to the location
 #   * of said git repositories
 #   ) OR (
-#   * network access to the review.openstack.org Gerrit API
+#   * network access to the review.opendev.org Gerrit API
 #     working directory
-#   * network access to https://git.openstack.org/cgit
+#   * network access to https://opendev.org
 #   ))
 #
 # If a file named data/devstack-plugins-registry.header or
@@ -50,8 +50,6 @@
 }
 
 (
-declare -A plugins
-
 if [[ -r data/devstack-plugins-registry.header ]]; then
     cat data/devstack-plugins-registry.header
 fi
@@ -65,7 +63,7 @@
 # ====================== ===
 # Plugin Name            URL
 # ====================== ===
-# foobar                 `git://... <http://...>`__
+# foobar                 `https://... <https://...>`__
 # ...
 
 printf "\n\n"
@@ -74,8 +72,8 @@
 title_underline ${name_col_len}
 
 for plugin in ${sorted_plugins}; do
-    giturl="git://git.openstack.org/openstack/${plugin}"
-    gitlink="https://git.openstack.org/cgit/openstack/${plugin}"
+    giturl="https://opendev.org/${plugin}"
+    gitlink="https://opendev.org/${plugin}"
     printf "%-${name_col_len}s %s\n" "${plugin}" "\`${giturl} <${gitlink}>\`__"
 done
 
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index 1bd7392..2b6aa4c 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -35,7 +35,7 @@
 # done by openstack-infra diskimage-builder elements as part of image
 # preparation [1].  This prevents any network access, which can be
 # unreliable in CI situations.
-# [1] http://git.openstack.org/cgit/openstack-infra/project-config/tree/nodepool/elements/cache-devstack/source-repository-pip
+# [1] https://opendev.org/openstack/project-config/src/branch/master/nodepool/elements/cache-devstack/source-repository-pip
 
 PIP_GET_PIP_URL=${PIP_GET_PIP_URL:-"https://bootstrap.pypa.io/get-pip.py"}
 LOCAL_PIP="$FILES/$(basename $PIP_GET_PIP_URL)"
diff --git a/tools/update_clouds_yaml.py b/tools/update_clouds_yaml.py
index eb7265f..9187c66 100755
--- a/tools/update_clouds_yaml.py
+++ b/tools/update_clouds_yaml.py
@@ -41,12 +41,19 @@
                 'auth_url': args.os_auth_url,
                 'username': args.os_username,
                 'password': args.os_password,
-                'project_name': args.os_project_name,
             },
         }
-        if args.os_identity_api_version == '3':
+        if args.os_project_name and args.os_system_scope:
+            print(
+                "WARNING: os_project_name and os_system_scope were both"
+                " given. os_system_scope will take priority.")
+        if args.os_project_name and not args.os_system_scope:
+            self._cloud_data['auth']['project_name'] = args.os_project_name
+        if args.os_identity_api_version == '3' and not args.os_system_scope:
             self._cloud_data['auth']['user_domain_id'] = 'default'
             self._cloud_data['auth']['project_domain_id'] = 'default'
+        if args.os_system_scope:
+            self._cloud_data['auth']['system_scope'] = args.os_system_scope
         if args.os_cacert:
             self._cloud_data['cacert'] = args.os_cacert
 
@@ -83,12 +90,13 @@
     parser.add_argument('--os-cloud', required=True)
     parser.add_argument('--os-region-name', default='RegionOne')
     parser.add_argument('--os-identity-api-version', default='3')
-    parser.add_argument('--os-volume-api-version', default='2')
+    parser.add_argument('--os-volume-api-version', default='3')
     parser.add_argument('--os-cacert')
     parser.add_argument('--os-auth-url', required=True)
     parser.add_argument('--os-username', required=True)
     parser.add_argument('--os-password', required=True)
-    parser.add_argument('--os-project-name', required=True)
+    parser.add_argument('--os-project-name')
+    parser.add_argument('--os-system-scope')
 
     args = parser.parse_args()
 
diff --git a/tools/worlddump.py b/tools/worlddump.py
index 7506082..d5ff5d1 100755
--- a/tools/worlddump.py
+++ b/tools/worlddump.py
@@ -25,6 +25,7 @@
 import fnmatch
 import os
 import os.path
+import shutil
 import subprocess
 import sys
 
@@ -163,7 +164,10 @@
 def network_dump():
     _header("Network Dump")
 
-    _dump_cmd("brctl show")
+    _dump_cmd("bridge link")
+    if _find_cmd("brctl"):
+        _dump_cmd("brctl show")
+    _dump_cmd("ip link show type bridge")
     ip_cmds = ["neigh", "addr", "link", "route"]
     for cmd in ip_cmds + ['netns']:
         _dump_cmd("ip %s" % cmd)
@@ -246,6 +250,14 @@
         compute_consoles()
         guru_meditation_reports()
         var_core()
+    # Singular name for ease of log retrieval
+    copyname = os.path.join(opts.dir, 'worlddump')
+    if opts.name:
+        copyname += '-' + opts.name
+    copyname += '-latest.txt'
+    # We make a full copy to deal with jobs that may or may not
+    # gzip logs breaking symlinks.
+    shutil.copyfile(fname, copyname)
 
 
 if __name__ == '__main__':
diff --git a/tools/xen/README.md b/tools/xen/README.md
index 22263bb..2873011 100644
--- a/tools/xen/README.md
+++ b/tools/xen/README.md
@@ -1,3 +1,3 @@
 Note: XenServer relative tools have been moved to `os-xenapi`_ and be maintained there.
 
-.. _os-xenapi: https://github.com/openstack/os-xenapi/
+.. _os-xenapi: https://opendev.org/x/os-xenapi/
diff --git a/tox.ini b/tox.ini
index f643fdb..26baa2a 100644
--- a/tox.ini
+++ b/tox.ini
@@ -41,7 +41,16 @@
 setenv =
   TOP_DIR={toxinidir}
 commands =
-  python setup.py build_sphinx
+  sphinx-build -W -b html -d doc/build/doctrees doc/source doc/build/html
+
+[testenv:pdf-docs]
+basepython = python3
+deps = {[testenv:docs]deps}
+whitelist_externals =
+   make
+commands =
+   sphinx-build -W -b latex doc/source doc/build/pdf
+   make -C doc/build/pdf
 
 [testenv:venv]
 basepython = python3