Merge "ovn: use bundled ovs"
diff --git a/.gitignore b/.gitignore
index 8fe56ad..ad153f4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -38,3 +38,5 @@
 userrc_early
 AUTHORS
 ChangeLog
+tools/dbcounter/build/
+tools/dbcounter/dbcounter.egg-info/
diff --git a/.zuul.yaml b/.zuul.yaml
index 1c517f1..59a577e 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -1,16 +1,18 @@
-- pragma:
-   # NOTE(gtema): this is required for the changes in SDK feature/r1 branch to
-   # be using devstack
-   # TODO(gtema): delete this once r1 branch is merged into master
-    implied-branches:
-      - master
-      - feature/r1
-
 - nodeset:
-    name: openstack-single-node
+    name: openstack-single-node-jammy
     nodes:
       - name: controller
-        label: ubuntu-xenial
+        label: ubuntu-jammy
+    groups:
+      - name: tempest
+        nodes:
+          - controller
+
+- nodeset:
+    name: openstack-single-node-noble
+    nodes:
+      - name: controller
+        label: ubuntu-noble
     groups:
       - name: tempest
         nodes:
@@ -37,36 +39,6 @@
           - controller
 
 - nodeset:
-    name: openstack-single-node-xenial
-    nodes:
-      - name: controller
-        label: ubuntu-xenial
-    groups:
-      - name: tempest
-        nodes:
-          - controller
-
-- nodeset:
-    name: devstack-single-node-centos-7
-    nodes:
-      - name: controller
-        label: centos-7
-    groups:
-      - name: tempest
-        nodes:
-          - controller
-
-- nodeset:
-    name: devstack-single-node-centos-8-stream
-    nodes:
-      - name: controller
-        label: centos-8-stream
-    groups:
-      - name: tempest
-        nodes:
-          - controller
-
-- nodeset:
     name: devstack-single-node-centos-9-stream
     nodes:
       - name: controller
@@ -87,15 +59,18 @@
           - controller
 
 - nodeset:
-    name: devstack-single-node-fedora-latest
+    name: devstack-single-node-debian-bookworm
     nodes:
       - name: controller
-        label: fedora-35
+        label: debian-bookworm
     groups:
       - name: tempest
         nodes:
           - controller
 
+# Note(sean-k-mooney): this is still used by horizon for
+# horizon-integration-tests, horizon-integration-pytest and
+# horizon-ui-pytest, remove when horizon is updated.
 - nodeset:
     name: devstack-single-node-debian-bullseye
     nodes:
@@ -107,76 +82,16 @@
           - controller
 
 - nodeset:
-    name: devstack-single-node-openeuler-20.03-sp2
+    name: devstack-single-node-rockylinux-9
     nodes:
       - name: controller
-        label: openEuler-20-03-LTS-SP2
+        label: rockylinux-9
     groups:
       - name: tempest
         nodes:
           - controller
 
 - nodeset:
-    name: openstack-two-node
-    nodes:
-      - name: controller
-        label: ubuntu-xenial
-      - name: compute1
-        label: ubuntu-xenial
-    groups:
-      # Node where tests are executed and test results collected
-      - name: tempest
-        nodes:
-          - controller
-      # Nodes running the compute service
-      - name: compute
-        nodes:
-          - controller
-          - compute1
-      # Nodes that are not the controller
-      - name: subnode
-        nodes:
-          - compute1
-      # Switch node for multinode networking setup
-      - name: switch
-        nodes:
-          - controller
-      # Peer nodes for multinode networking setup
-      - name: peers
-        nodes:
-          - compute1
-
-- nodeset:
-    name: openstack-two-node-centos-8-stream
-    nodes:
-      - name: controller
-        label: centos-8-stream
-      - name: compute1
-        label: centos-8-stream
-    groups:
-      # Node where tests are executed and test results collected
-      - name: tempest
-        nodes:
-          - controller
-      # Nodes running the compute service
-      - name: compute
-        nodes:
-          - controller
-          - compute1
-      # Nodes that are not the controller
-      - name: subnode
-        nodes:
-          - compute1
-      # Switch node for multinode networking setup
-      - name: switch
-        nodes:
-          - controller
-      # Peer nodes for multinode networking setup
-      - name: peers
-        nodes:
-          - compute1
-
-- nodeset:
     name: openstack-two-node-centos-9-stream
     nodes:
       - name: controller
@@ -207,6 +122,66 @@
           - compute1
 
 - nodeset:
+    name: openstack-two-node-jammy
+    nodes:
+      - name: controller
+        label: ubuntu-jammy
+      - name: compute1
+        label: ubuntu-jammy
+    groups:
+      # Node where tests are executed and test results collected
+      - name: tempest
+        nodes:
+          - controller
+      # Nodes running the compute service
+      - name: compute
+        nodes:
+          - controller
+          - compute1
+      # Nodes that are not the controller
+      - name: subnode
+        nodes:
+          - compute1
+      # Switch node for multinode networking setup
+      - name: switch
+        nodes:
+          - controller
+      # Peer nodes for multinode networking setup
+      - name: peers
+        nodes:
+          - compute1
+
+- nodeset:
+    name: openstack-two-node-noble
+    nodes:
+      - name: controller
+        label: ubuntu-noble
+      - name: compute1
+        label: ubuntu-noble
+    groups:
+      # Node where tests are executed and test results collected
+      - name: tempest
+        nodes:
+          - controller
+      # Nodes running the compute service
+      - name: compute
+        nodes:
+          - controller
+          - compute1
+      # Nodes that are not the controller
+      - name: subnode
+        nodes:
+          - compute1
+      # Switch node for multinode networking setup
+      - name: switch
+        nodes:
+          - controller
+      # Peer nodes for multinode networking setup
+      - name: peers
+        nodes:
+          - compute1
+
+- nodeset:
     name: openstack-two-node-focal
     nodes:
       - name: controller
@@ -267,36 +242,6 @@
           - compute1
 
 - nodeset:
-    name: openstack-two-node-xenial
-    nodes:
-      - name: controller
-        label: ubuntu-xenial
-      - name: compute1
-        label: ubuntu-xenial
-    groups:
-      # Node where tests are executed and test results collected
-      - name: tempest
-        nodes:
-          - controller
-      # Nodes running the compute service
-      - name: compute
-        nodes:
-          - controller
-          - compute1
-      # Nodes that are not the controller
-      - name: subnode
-        nodes:
-          - compute1
-      # Switch node for multinode networking setup
-      - name: switch
-        nodes:
-          - controller
-      # Peer nodes for multinode networking setup
-      - name: peers
-        nodes:
-          - compute1
-
-- nodeset:
     name: openstack-three-node-focal
     nodes:
       - name: controller
@@ -368,7 +313,7 @@
 
 - job:
     name: devstack-base
-    parent: multinode
+    parent: openstack-multinode-fips
     abstract: true
     description: |
       Base abstract Devstack job.
@@ -383,7 +328,6 @@
     required-projects:
       - opendev.org/openstack/devstack
     roles:
-      - zuul: opendev.org/openstack/devstack-gate
       - zuul: opendev.org/openstack/openstack-zuul-jobs
     vars:
       devstack_localrc:
@@ -417,8 +361,10 @@
         '{{ devstack_log_dir }}/devstacklog.txt.summary': logs
         '{{ devstack_log_dir }}/tcpdump.pcap': logs
         '{{ devstack_log_dir }}/worlddump-latest.txt': logs
+        '{{ devstack_log_dir }}/qemu.coredump': logs
         '{{ devstack_full_log}}': logs
         '{{ stage_dir }}/verify_tempest_conf.log': logs
+        '{{ stage_dir }}/performance.json': logs
         '{{ stage_dir }}/apache': logs
         '{{ stage_dir }}/apache_config': logs
         '{{ stage_dir }}/etc': logs
@@ -427,6 +373,7 @@
         /var/log/mysql: logs
         /var/log/libvirt: logs
         /etc/libvirt: logs
+        /etc/lvm: logs
         /etc/sudoers: logs
         /etc/sudoers.d: logs
         '{{ stage_dir }}/iptables.txt': logs
@@ -437,6 +384,7 @@
         '{{ stage_dir }}/rpm-qa.txt': logs
         '{{ stage_dir }}/core': logs
         '{{ stage_dir }}/listen53.txt': logs
+        '{{ stage_dir }}/services.txt': logs
         '{{ stage_dir }}/deprecations.log': logs
         '{{ stage_dir }}/audit.log': logs
         /etc/ceph: logs
@@ -491,7 +439,7 @@
     description: |
       Minimal devstack base job, intended for use by jobs that need
       less than the normal minimum set of required-projects.
-    nodeset: openstack-single-node-focal
+    nodeset: openstack-single-node-jammy
     required-projects:
       - opendev.org/openstack/requirements
     vars:
@@ -505,14 +453,18 @@
         dstat: false
         etcd3: true
         memory_tracker: true
+        file_tracker: true
         mysql: true
         rabbit: true
+        openstack-cli-server: true
     group-vars:
       subnode:
         devstack_services:
           # Shared services
           dstat: false
           memory_tracker: true
+          file_tracker: true
+          openstack-cli-server: true
         devstack_localrc:
           # Multinode specific settings
           HOST_IP: "{{ hostvars[inventory_hostname]['nodepool']['private_ipv4'] }}"
@@ -558,8 +510,17 @@
       - opendev.org/openstack/nova
       - opendev.org/openstack/placement
       - opendev.org/openstack/swift
+      - opendev.org/openstack/os-test-images
     timeout: 7200
     vars:
+      # based on observation of the integrated gate
+      # tempest-integrated-compute was only using ~1.7GB of swap
+      # when zswap and the host turning are enabled that increase
+      # slightly to ~2GB. we are setting the swap size to 8GB to
+      # be safe and account for more complex scenarios.
+      # we should revisit this value after some time to see if we
+      # can reduce it.
+      configure_swap_size: 8192
       devstack_localrc:
         # Common OpenStack services settings
         SWIFT_REPLICAS: 1
@@ -568,6 +529,26 @@
         DEBUG_LIBVIRT_COREDUMPS: true
         NOVA_VNC_ENABLED: true
         OVN_DBS_LOG_LEVEL: dbg
+        # tune the host to optimize memory usage and hide io latency
+        # these setting will configure the kernel to treat the host page
+        # cache and swap with equal priority, and prefer deferring writes
+        # changing the default swappiness, dirty_ratio and
+        # the vfs_cache_pressure
+        ENABLE_SYSCTL_MEM_TUNING: true
+        # the net tuning optimizes ipv4 tcp fast open and config the default
+        # qdisk policy to pfifo_fast which effectively disable all qos.
+        # this minimizes the cpu load of the host network stack
+        ENABLE_SYSCTL_NET_TUNING: true
+        # zswap allows the kernel to compress pages in memory before swapping
+        # them to disk. this can reduce the amount of swap used and improve
+        # performance. effectively this trades a small amount of cpu for an
+        # increase in swap performance by reducing the amount of data
+        # written to disk. the overall speedup is proportional to the
+        # compression ratio and the speed of the swap device.
+        # NOTE: this option is ignored when not using nova with the libvirt
+        # virt driver.
+        NOVA_LIBVIRT_TB_CACHE_SIZE: 128
+        ENABLE_ZSWAP: true
       devstack_local_conf:
         post-config:
           $NEUTRON_CONF:
@@ -580,6 +561,7 @@
         dstat: false
         etcd3: true
         memory_tracker: true
+        file_tracker: true
         mysql: true
         rabbit: true
         tls-proxy: true
@@ -629,6 +611,7 @@
           # Shared services
           dstat: false
           memory_tracker: true
+          file_tracker: true
           tls-proxy: true
           # Nova services
           n-cpu: true
@@ -658,16 +641,38 @@
           Q_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}"
           NOVA_VNC_ENABLED: true
           ENABLE_CHASSIS_AS_GW: false
+          # tune the host to optimize memory usage and hide io latency
+          # these setting will configure the kernel to treat the host page
+          # cache and swap with equal priority, and prefer deferring writes
+          # changing the default swappiness, dirty_ratio and
+          # the vfs_cache_pressure
+          ENABLE_SYSCTL_MEM_TUNING: true
+          # the net tuning optimizes ipv4 tcp fast open and config the default
+          # qdisk policy to pfifo_fast which effectively disable all qos.
+          # this minimizes the cpu load of the host network stack
+          ENABLE_SYSCTL_NET_TUNING: true
+          # zswap allows the kernel to compress pages in memory before swapping
+          # them to disk. this can reduce the amount of swap used and improve
+          # performance. effectivly this trades a small amount of cpu for an
+          # increase in swap performance by reducing the amount of data
+          # written to disk. the overall speedup is porportional to the
+          # compression ratio and the speed of the swap device.
+          ENABLE_ZSWAP: true
+          # NOTE: this option is ignored when not using nova with the libvirt
+          # virt driver.
+          NOVA_LIBVIRT_TB_CACHE_SIZE: 128
 
 - job:
     name: devstack-ipv6
     parent: devstack
     description: |
-      Devstack single node job for integration gate with IPv6.
+      Devstack single node job for integration gate with IPv6,
+      all services and tunnels using IPv6 addresses.
     vars:
       devstack_localrc:
         SERVICE_IP_VERSION: 6
         SERVICE_HOST: ""
+        TUNNEL_IP_VERSION: 6
 
 - job:
     name: devstack-enforce-scope
@@ -676,15 +681,12 @@
       This job runs the devstack with scope checks enabled.
     vars:
       devstack_localrc:
-        # Keep enabeling the services here to run with system scope
-        CINDER_ENFORCE_SCOPE: true
-        GLANCE_ENFORCE_SCOPE: true
-        NEUTRON_ENFORCE_SCOPE: true
+        ENFORCE_SCOPE: true
 
 - job:
     name: devstack-multinode
     parent: devstack
-    nodeset: openstack-two-node-focal
+    nodeset: openstack-two-node-jammy
     description: |
       Simple multinode test to verify multinode functionality on devstack side.
       This is not meant to be used as a parent job.
@@ -694,35 +696,67 @@
 # and these platforms don't have the round-the-clock support to avoid
 # becoming blockers in that situation.
 - job:
-    name: devstack-platform-centos-8-stream
-    parent: tempest-full-py3
-    description: CentOS 8 Stream platform test
-    nodeset: devstack-single-node-centos-8-stream
-    voting: false
-    timeout: 9000
-    vars:
-      configure_swap_size: 4096
-
-- job:
     name: devstack-platform-centos-9-stream
     parent: tempest-full-py3
     description: CentOS 9 Stream platform test
     nodeset: devstack-single-node-centos-9-stream
     timeout: 9000
+    voting: false
+
+- job:
+    name: devstack-platform-debian-bookworm
+    parent: tempest-full-py3
+    description: Debian Bookworm platform test
+    nodeset: devstack-single-node-debian-bookworm
+    timeout: 9000
     vars:
       configure_swap_size: 4096
 
 - job:
-    name: devstack-platform-debian-bullseye
+    name: devstack-platform-rocky-blue-onyx
     parent: tempest-full-py3
-    description: Debian Bullseye platform test
-    nodeset: devstack-single-node-debian-bullseye
+    description: Rocky Linux 9 Blue Onyx platform test
+    nodeset: devstack-single-node-rockylinux-9
+    timeout: 9000
+    # NOTE(danms): This has been failing lately with some repository metadata
+    # errors. We're marking this as non-voting until it appears to have
+    # stabilized:
+    # https://zuul.openstack.org/builds?job_name=devstack-platform-rocky-blue-onyx&skip=0
+    voting: false
+    vars:
+      configure_swap_size: 4096
+
+- job:
+    name: devstack-platform-ubuntu-noble
+    parent: tempest-full-py3
+    description: Ubuntu 24.04 LTS (noble) platform test
+    nodeset: openstack-single-node-noble
+    timeout: 9000
+    voting: false
+    vars:
+      configure_swap_size: 8192
+
+- job:
+    name: devstack-platform-ubuntu-jammy-ovn-source
+    parent: devstack-platform-ubuntu-jammy
+    description: Ubuntu 22.04 LTS (jammy) platform test (OVN from source)
+    voting: false
+    vars:
+      devstack_localrc:
+        OVN_BUILD_FROM_SOURCE: True
+        OVN_BRANCH: "v21.06.0"
+        OVS_BRANCH: "a4b04276ab5934d087669ff2d191a23931335c87"
+        OVS_SYSCONFDIR: "/usr/local/etc/openvswitch"
+
+- job:
+    name: devstack-platform-ubuntu-jammy-ovs
+    parent: tempest-full-py3
+    description: Ubuntu 22.04 LTS (jammy) platform test (OVS)
+    nodeset: openstack-single-node-jammy
     voting: false
     timeout: 9000
     vars:
-      configure_swap_size: 4096
-      # NOTE(yoctozepto): Debian Bullseye does not yet offer OVN. Switch to OVS
-      # for the time being.
+      configure_swap_size: 8192
       devstack_localrc:
         Q_AGENT: openvswitch
         Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
@@ -754,20 +788,6 @@
           q-agt: true
 
 - job:
-    name: devstack-platform-openEuler-20.03-SP2
-    parent: tempest-full-py3
-    description: openEuler 20.03 SP2 platform test
-    nodeset: devstack-single-node-openeuler-20.03-sp2
-    voting: false
-    timeout: 9000
-    vars:
-      configure_swap_size: 4096
-      devstack_localrc:
-        # NOTE(wxy):  OVN package is not supported by openEuler yet. Build it
-        # from source instead.
-        OVN_BUILD_FROM_SOURCE: True
-
-- job:
     name: devstack-no-tls-proxy
     parent: tempest-full-py3
     description: |
@@ -779,30 +799,6 @@
         tls-proxy: false
 
 - job:
-    name: devstack-platform-fedora-latest
-    parent: tempest-full-py3
-    description: Fedora latest platform test
-    nodeset: devstack-single-node-fedora-latest
-    voting: false
-    vars:
-      configure_swap_size: 4096
-      # Python 3.10 dependency issues; see
-      # https://bugs.launchpad.net/horizon/+bug/1960204
-      devstack_services:
-        horizon: false
-
-- job:
-    name: devstack-platform-fedora-latest-virt-preview
-    parent: tempest-full-py3
-    description: Fedora latest platform test using the virt-preview repo.
-    nodeset: devstack-single-node-fedora-latest
-    voting: false
-    vars:
-      configure_swap_size: 4096
-      devstack_localrc:
-        ENABLE_FEDORA_VIRT_PREVIEW_REPO: true
-
-- job:
     name: devstack-tox-base
     parent: devstack
     description: |
@@ -858,7 +854,7 @@
 
 - job:
     name: devstack-unit-tests
-    nodeset: ubuntu-focal
+    nodeset: ubuntu-jammy
     description: |
       Runs unit tests on devstack project.
 
@@ -875,10 +871,12 @@
         - devstack
         - devstack-ipv6
         - devstack-enforce-scope
-        - devstack-platform-fedora-latest
-        - devstack-platform-centos-8-stream
         - devstack-platform-centos-9-stream
-        - devstack-platform-debian-bullseye
+        - devstack-platform-debian-bookworm
+        - devstack-platform-rocky-blue-onyx
+        - devstack-platform-ubuntu-jammy-ovn-source
+        - devstack-platform-ubuntu-jammy-ovs
+        - devstack-platform-ubuntu-noble
         - devstack-multinode
         - devstack-unit-tests
         - openstack-tox-bashate
@@ -892,10 +890,6 @@
             irrelevant-files:
               - ^.*\.rst$
               - ^doc/.*$
-        - neutron-linuxbridge-tempest:
-            irrelevant-files:
-              - ^.*\.rst$
-              - ^doc/.*$
         - neutron-ovn-tempest-ovs-release:
             voting: false
             irrelevant-files:
@@ -922,7 +916,10 @@
       jobs:
         - devstack
         - devstack-ipv6
-        - devstack-platform-centos-9-stream
+        - devstack-platform-debian-bookworm
+        # NOTE(danms): Disabled due to instability, see comment in the job
+        # definition above.
+        # - devstack-platform-rocky-blue-onyx
         - devstack-enforce-scope
         - devstack-multinode
         - devstack-unit-tests
@@ -931,10 +928,6 @@
             irrelevant-files:
               - ^.*\.rst$
               - ^doc/.*$
-        - neutron-linuxbridge-tempest:
-            irrelevant-files:
-              - ^.*\.rst$
-              - ^doc/.*$
         - ironic-tempest-bios-ipmi-direct-tinyipa
         - swift-dsvm-functional
         - grenade:
@@ -958,7 +951,9 @@
     # pruned.
     #
     # * nova-next: maintained by nova for unreleased/undefaulted
-    #    things
+    #    things, this job is not experimental but often is used to test
+    #    things that are not yet production ready or to test what will be
+    #    the new default after a deprecation period has ended.
     # * neutron-fullstack-with-uwsgi: maintained by neutron for fullstack test
     #    when neutron-api is served by uwsgi, it's in exprimental for testing.
     #    the next cycle we can remove this  job if things turn out to be
@@ -966,23 +961,18 @@
     # * neutron-functional-with-uwsgi: maintained by neutron for functional
     #    test. Next cycle we can remove this one if things turn out to be
     #    stable engouh with uwsgi.
-    # * neutron-tempest-with-uwsgi: maintained by neutron for tempest test.
+    # * neutron-ovn-tempest-with-uwsgi: maintained by neutron for tempest test.
     #    Next cycle we can remove this if everything run out stable enough.
-    # * nova-multi-cell: maintained by nova and currently non-voting in the
+    # * nova-multi-cell: maintained by nova and now is voting in the
     #    check queue for nova changes but relies on devstack configuration
-    # * devstack-platform-fedora-latest-virt-preview: Maintained by lyarwood
-    #    for Nova to allow early testing of the latest versions of Libvirt and
-    #    QEMU. Should only graduate out of experimental if it ever moves into
-    #    the check queue for Nova.
 
     experimental:
       jobs:
-        - devstack-platform-openEuler-20.03-SP2
         - nova-multi-cell
         - nova-next
         - neutron-fullstack-with-uwsgi
         - neutron-functional-with-uwsgi
-        - neutron-tempest-with-uwsgi
+        - neutron-ovn-tempest-with-uwsgi
         - devstack-plugin-ceph-tempest-py3:
             irrelevant-files:
               - ^.*\.rst$
@@ -1003,8 +993,15 @@
             irrelevant-files:
               - ^.*\.rst$
               - ^doc/.*$
-        - devstack-platform-fedora-latest-virt-preview
         - devstack-no-tls-proxy
     periodic:
       jobs:
         - devstack-no-tls-proxy
+    periodic-weekly:
+      jobs:
+        - devstack-platform-centos-9-stream
+        - devstack-platform-debian-bookworm
+        - devstack-platform-rocky-blue-onyx
+        - devstack-platform-ubuntu-jammy-ovn-source
+        - devstack-platform-ubuntu-jammy-ovs
+        - devstack-platform-ubuntu-noble
diff --git a/README.rst b/README.rst
index f3a585a..86b85da 100644
--- a/README.rst
+++ b/README.rst
@@ -4,7 +4,7 @@
 Goals
 =====
 
-* To quickly build dev OpenStack environments in a clean Ubuntu or Fedora
+* To quickly build dev OpenStack environments in a clean Ubuntu or RockyLinux
   environment
 * To describe working configurations of OpenStack (which code branches
   work together?  what do config files look like for those branches?)
@@ -28,9 +28,9 @@
 The DevStack master branch generally points to trunk versions of OpenStack
 components.  For older, stable versions, look for branches named
 stable/[release] in the DevStack repo.  For example, you can do the
-following to create a Pike OpenStack cloud::
+following to create a Zed OpenStack cloud::
 
-    git checkout stable/pike
+    git checkout stable/zed
     ./stack.sh
 
 You can also pick specific OpenStack project releases by setting the appropriate
@@ -55,7 +55,7 @@
 endpoints, like so:
 
 * Horizon: http://myhost/
-* Keystone: http://myhost/identity/v2.0/
+* Keystone: http://myhost/identity/v3/
 
 We also provide an environment file that you can use to interact with your
 cloud via CLI::
diff --git a/clean.sh b/clean.sh
index 870dfd4..6a31cc6 100755
--- a/clean.sh
+++ b/clean.sh
@@ -50,7 +50,6 @@
 source $TOP_DIR/lib/cinder
 source $TOP_DIR/lib/swift
 source $TOP_DIR/lib/neutron
-source $TOP_DIR/lib/neutron-legacy
 
 set -o xtrace
 
diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst
index dd8f21f..a83b2de 100644
--- a/doc/source/configuration.rst
+++ b/doc/source/configuration.rst
@@ -181,6 +181,9 @@
 If the ``*_PASSWORD`` variables are not set here you will be prompted to
 enter values for them by ``stack.sh``.
 
+.. warning:: Only use alphanumeric characters in your passwords, as some
+   services fail to work when using special characters.
+
 The network ranges must not overlap with any networks in use on the
 host. Overlap is not uncommon as RFC-1918 'private' ranges are commonly
 used for both the local networking and Nova's fixed and floating ranges.
@@ -279,7 +282,7 @@
 
 ::
 
-    LOGDAYS=1
+    LOGDAYS=2
 
 Some coloring is used during the DevStack runs to make it easier to
 see what is going on. This can be disabled with::
@@ -521,8 +524,8 @@
 can be configured with any valid IPv6 prefix. The default values make
 use of an auto-generated ``IPV6_GLOBAL_ID`` to comply with RFC4193.
 
-Service Version
-~~~~~~~~~~~~~~~
+Service IP Version
+~~~~~~~~~~~~~~~~~~
 
 DevStack can enable service operation over either IPv4 or IPv6 by
 setting ``SERVICE_IP_VERSION`` to either ``SERVICE_IP_VERSION=4`` or
@@ -542,6 +545,27 @@
 
   HOST_IPV6=${some_local_ipv6_address}
 
+Tunnel IP Version
+~~~~~~~~~~~~~~~~~
+
+DevStack can enable tunnel operation over either IPv4 or IPv6 by
+setting ``TUNNEL_IP_VERSION`` to either ``TUNNEL_IP_VERSION=4`` or
+``TUNNEL_IP_VERSION=6`` respectively.
+
+When set to ``4`` Neutron will use an IPv4 address for tunnel endpoints,
+for example, ``HOST_IP``.
+
+When set to ``6`` Neutron will use an IPv6 address for tunnel endpoints,
+for example, ``HOST_IPV6``.
+
+The default value for this setting is ``4``.  Dual-mode support, for
+example ``4+6`` is not supported, as this value must match the address
+family of the local tunnel endpoint IP(v6) address.
+
+The value of ``TUNNEL_IP_VERSION`` has a direct relationship to the
+setting of ``TUNNEL_ENDPOINT_IP``, which will default to ``HOST_IP``
+when set to ``4``, and ``HOST_IPV6`` when set to ``6``.
+
 Multi-node setup
 ~~~~~~~~~~~~~~~~
 
@@ -615,7 +639,7 @@
 ::
 
     $ cd /opt/stack/tempest
-    $ tox -efull  tempest.scenario.test_network_basic_ops
+    $ tox -e smoke
 
 By default tempest is downloaded and the config file is generated, but the
 tempest package is not installed in the system's global site-packages (the
@@ -648,6 +672,35 @@
 or ``CINDER_QUOTA_SNAPSHOTS`` to the desired value.  (The default for
 each is 10.)
 
+DevStack's Cinder LVM configuration module currently supports both iSCSI and
+NVMe connections, and we can choose which one to use with options
+``CINDER_TARGET_HELPER``, ``CINDER_TARGET_PROTOCOL``, ``CINDER_TARGET_PREFIX``,
+and ``CINDER_TARGET_PORT``.
+
+Defaults use iSCSI with the LIO target manager::
+
+  CINDER_TARGET_HELPER="lioadm"
+  CINDER_TARGET_PROTOCOL="iscsi"
+  CINDER_TARGET_PREFIX="iqn.2010-10.org.openstack:"
+  CINDER_TARGET_PORT=3260
+
+Additionally there are 3 supported transport protocols for NVMe,
+``nvmet_rdma``, ``nvmet_tcp``, and ``nvmet_fc``, and when the ``nvmet`` target
+is selected the protocol, prefix, and port defaults will change to more
+sensible defaults for NVMe::
+
+  CINDER_TARGET_HELPER="nvmet"
+  CINDER_TARGET_PROTOCOL="nvmet_rdma"
+  CINDER_TARGET_PREFIX="nvme-subsystem-1"
+  CINDER_TARGET_PORT=4420
+
+When selecting the RDMA transport protocol DevStack will create on Cinder nodes
+a Software RoCE device on top of the ``HOST_IP_IFACE`` and if it is not defined
+then on top of the interface with IP address ``HOST_IP`` or ``HOST_IPV6``.
+
+This Soft-RoCE device will always be created on the Nova compute side since we
+cannot tell beforehand whether there will be an RDMA connection or not.
+
 
 Keystone
 ~~~~~~~~
@@ -698,7 +751,7 @@
 
 ::
 
-    openstack --os-cloud devstack-system-admin registered limit update \
+    openstack --os-cloud devstack-system-admin registered limit set \
       --service glance --default-limit 5000 --region RegionOne image_size_total
 
 .. _arch-configuration:
diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst
index 4de238f..8b5a85b 100644
--- a/doc/source/contributor/contributing.rst
+++ b/doc/source/contributor/contributing.rst
@@ -42,8 +42,9 @@
 ~~~~~~~~~~~~~~~~~~~~~~~~~
 All changes proposed to the Devstack require two ``Code-Review +2`` votes from
 Devstack core reviewers before one of the core reviewers can approve the patch
-by giving ``Workflow +1`` vote. One exception is for patches to unblock the gate
-which can be approved by single core reviewers.
+by giving ``Workflow +1`` vote. There are 2 exceptions, approving patches to
+unblock the gate and patches that do not relate to the Devstack's core logic,
+like for example old job cleanups, can be approved by single core reviewers.
 
 Project Team Lead Duties
 ~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/source/debugging.rst b/doc/source/debugging.rst
index fd0d9cd..3ca0ad9 100644
--- a/doc/source/debugging.rst
+++ b/doc/source/debugging.rst
@@ -20,6 +20,12 @@
 falling (i.e. processes are consuming memory).  It also provides
 output showing locked (unswappable) memory.
 
+file_tracker
+------------
+
+The ``file_tracker`` service periodically monitors the number of
+open files in the system.
+
 tcpdump
 -------
 
diff --git a/doc/source/guides.rst b/doc/source/guides.rst
index e7ec629..e7b46b6 100644
--- a/doc/source/guides.rst
+++ b/doc/source/guides.rst
@@ -20,7 +20,7 @@
    guides/neutron
    guides/devstack-with-nested-kvm
    guides/nova
-   guides/devstack-with-lbaas-v2
+   guides/devstack-with-octavia
    guides/devstack-with-ldap
 
 All-In-One Single VM
@@ -69,10 +69,10 @@
 
 Guide to working with nova features :doc:`Nova and devstack <guides/nova>`.
 
-Configure Load-Balancer Version 2
------------------------------------
+Configure Octavia
+-----------------
 
-Guide on :doc:`Configure Load-Balancer Version 2 <guides/devstack-with-lbaas-v2>`.
+Guide on :doc:`Configure Octavia <guides/devstack-with-octavia>`.
 
 Deploying DevStack with LDAP
 ----------------------------
diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst
deleted file mode 100644
index 5d96ca7..0000000
--- a/doc/source/guides/devstack-with-lbaas-v2.rst
+++ /dev/null
@@ -1,145 +0,0 @@
-Devstack with Octavia Load Balancing
-====================================
-
-Starting with the OpenStack Pike release, Octavia is now a standalone service
-providing load balancing services for OpenStack.
-
-This guide will show you how to create a devstack with `Octavia API`_ enabled.
-
-.. _Octavia API: https://docs.openstack.org/api-ref/load-balancer/v2/index.html
-
-Phase 1: Create DevStack + 2 nova instances
---------------------------------------------
-
-First, set up a vm of your choice with at least 8 GB RAM and 16 GB disk space,
-make sure it is updated. Install git and any other developer tools you find
-useful.
-
-Install devstack
-
-::
-
-    git clone https://opendev.org/openstack/devstack
-    cd devstack/tools
-    sudo ./create-stack-user.sh
-    cd ../..
-    sudo mv devstack /opt/stack
-    sudo chown -R stack.stack /opt/stack/devstack
-
-This will clone the current devstack code locally, then setup the "stack"
-account that devstack services will run under. Finally, it will move devstack
-into its default location in /opt/stack/devstack.
-
-Edit your ``/opt/stack/devstack/local.conf`` to look like
-
-::
-
-    [[local|localrc]]
-    enable_plugin octavia https://opendev.org/openstack/octavia
-    # If you are enabling horizon, include the octavia dashboard
-    # enable_plugin octavia-dashboard https://opendev.org/openstack/octavia-dashboard.git
-    # If you are enabling barbican for TLS offload in Octavia, include it here.
-    # enable_plugin barbican https://opendev.org/openstack/barbican
-
-    # ===== BEGIN localrc =====
-    DATABASE_PASSWORD=password
-    ADMIN_PASSWORD=password
-    SERVICE_PASSWORD=password
-    SERVICE_TOKEN=password
-    RABBIT_PASSWORD=password
-    # Enable Logging
-    LOGFILE=$DEST/logs/stack.sh.log
-    VERBOSE=True
-    LOG_COLOR=True
-    # Pre-requisite
-    ENABLED_SERVICES=rabbit,mysql,key
-    # Horizon - enable for the OpenStack web GUI
-    # ENABLED_SERVICES+=,horizon
-    # Nova
-    ENABLED_SERVICES+=,n-api,n-crt,n-cpu,n-cond,n-sch,n-api-meta,n-sproxy
-    ENABLED_SERVICES+=,placement-api,placement-client
-    # Glance
-    ENABLED_SERVICES+=,g-api
-    # Neutron
-    ENABLED_SERVICES+=,q-svc,q-agt,q-dhcp,q-l3,q-meta,neutron
-    ENABLED_SERVICES+=,octavia,o-cw,o-hk,o-hm,o-api
-    # Cinder
-    ENABLED_SERVICES+=,c-api,c-vol,c-sch
-    # Tempest
-    ENABLED_SERVICES+=,tempest
-    # Barbican - Optionally used for TLS offload in Octavia
-    # ENABLED_SERVICES+=,barbican
-    # ===== END localrc =====
-
-Run stack.sh and do some sanity checks
-
-::
-
-    sudo su - stack
-    cd /opt/stack/devstack
-    ./stack.sh
-    . ./openrc
-
-    openstack network list  # should show public and private networks
-
-Create two nova instances that we can use as test http servers:
-
-::
-
-    #create nova instances on private network
-    openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1
-    openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2
-    openstack server list # should show the nova instances just created
-
-    #add secgroup rules to allow ssh etc..
-    openstack security group rule create default --protocol icmp
-    openstack security group rule create default --protocol tcp --dst-port 22:22
-    openstack security group rule create default --protocol tcp --dst-port 80:80
-
-Set up a simple web server on each of these instances. ssh into each instance (username 'cirros', password 'cubswin:)' or 'gocubsgo') and run
-
-::
-
-    MYIP=$(ifconfig eth0|grep 'inet addr'|awk -F: '{print $2}'| awk '{print $1}')
-    while true; do echo -e "HTTP/1.0 200 OK\r\n\r\nWelcome to $MYIP" | sudo nc -l -p 80 ; done&
-
-Phase 2: Create your load balancer
-----------------------------------
-
-Make sure you have the 'openstack loadbalancer' commands:
-
-::
-
-    pip install python-octaviaclient
-
-Create your load balancer:
-
-::
-
-    openstack loadbalancer create --name lb1 --vip-subnet-id private-subnet
-    openstack loadbalancer show lb1  # Wait for the provisioning_status to be ACTIVE.
-    openstack loadbalancer listener create --protocol HTTP --protocol-port 80 --name listener1 lb1
-    openstack loadbalancer show lb1  # Wait for the provisioning_status to be ACTIVE.
-    openstack loadbalancer pool create --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1
-    openstack loadbalancer show lb1  # Wait for the provisioning_status to be ACTIVE.
-    openstack loadbalancer healthmonitor create --delay 5 --timeout 2 --max-retries 1 --type HTTP pool1
-    openstack loadbalancer show lb1  # Wait for the provisioning_status to be ACTIVE.
-    openstack loadbalancer member create --subnet-id private-subnet --address <web server 1 address> --protocol-port 80 pool1
-    openstack loadbalancer show lb1  # Wait for the provisioning_status to be ACTIVE.
-    openstack loadbalancer member create --subnet-id private-subnet --address <web server 2 address> --protocol-port 80 pool1
-
-Please note: The <web server # address> fields are the IP addresses of the nova
-servers created in Phase 1.
-Also note, using the API directly you can do all of the above commands in one
-API call.
-
-Phase 3: Test your load balancer
---------------------------------
-
-::
-
-    openstack loadbalancer show lb1 # Note the vip_address
-    curl http://<vip_address>
-    curl http://<vip_address>
-
-This should show the "Welcome to <IP>" message from each member server.
diff --git a/doc/source/guides/devstack-with-nested-kvm.rst b/doc/source/guides/devstack-with-nested-kvm.rst
index 3732f06..ba483e9 100644
--- a/doc/source/guides/devstack-with-nested-kvm.rst
+++ b/doc/source/guides/devstack-with-nested-kvm.rst
@@ -1,3 +1,5 @@
+.. _kvm_nested_virt:
+
 =======================================================
 Configure DevStack with KVM-based Nested Virtualization
 =======================================================
diff --git a/doc/source/guides/devstack-with-octavia.rst b/doc/source/guides/devstack-with-octavia.rst
new file mode 100644
index 0000000..55939f0
--- /dev/null
+++ b/doc/source/guides/devstack-with-octavia.rst
@@ -0,0 +1,144 @@
+Devstack with Octavia Load Balancing
+====================================
+
+Starting with the OpenStack Pike release, Octavia is now a standalone service
+providing load balancing services for OpenStack.
+
+This guide will show you how to create a devstack with `Octavia API`_ enabled.
+
+.. _Octavia API: https://docs.openstack.org/api-ref/load-balancer/v2/index.html
+
+Phase 1: Create DevStack + 2 nova instances
+--------------------------------------------
+
+First, set up a VM of your choice with at least 8 GB RAM and 16 GB disk space,
+make sure it is updated. Install git and any other developer tools you find
+useful.
+
+Install devstack::
+
+    git clone https://opendev.org/openstack/devstack
+    cd devstack/tools
+    sudo ./create-stack-user.sh
+    cd ../..
+    sudo mv devstack /opt/stack
+    sudo chown -R stack.stack /opt/stack/devstack
+
+This will clone the current devstack code locally, then setup the "stack"
+account that devstack services will run under. Finally, it will move devstack
+into its default location in /opt/stack/devstack.
+
+Edit your ``/opt/stack/devstack/local.conf`` to look like::
+
+    [[local|localrc]]
+    # ===== BEGIN localrc =====
+    DATABASE_PASSWORD=password
+    ADMIN_PASSWORD=password
+    SERVICE_PASSWORD=password
+    SERVICE_TOKEN=password
+    RABBIT_PASSWORD=password
+    GIT_BASE=https://opendev.org
+    # Optional settings:
+    # OCTAVIA_AMP_BASE_OS=centos
+    # OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID=9-stream
+    # OCTAVIA_AMP_IMAGE_SIZE=3
+    # OCTAVIA_LB_TOPOLOGY=ACTIVE_STANDBY
+    # OCTAVIA_ENABLE_AMPHORAV2_JOBBOARD=True
+    # LIBS_FROM_GIT+=octavia-lib,
+    # Enable Logging
+    LOGFILE=$DEST/logs/stack.sh.log
+    VERBOSE=True
+    LOG_COLOR=True
+    enable_service rabbit
+    enable_plugin neutron $GIT_BASE/openstack/neutron
+    # Octavia supports using QoS policies on the VIP port:
+    enable_service q-qos
+    enable_service placement-api placement-client
+    # Octavia services
+    enable_plugin octavia $GIT_BASE/openstack/octavia master
+    enable_plugin octavia-dashboard $GIT_BASE/openstack/octavia-dashboard
+    enable_plugin ovn-octavia-provider $GIT_BASE/openstack/ovn-octavia-provider
+    enable_plugin octavia-tempest-plugin $GIT_BASE/openstack/octavia-tempest-plugin
+    enable_service octavia o-api o-cw o-hm o-hk o-da
+    # If you are enabling barbican for TLS offload in Octavia, include it here.
+    # enable_plugin barbican $GIT_BASE/openstack/barbican
+    # enable_service barbican
+    # Cinder (optional)
+    disable_service c-api c-vol c-sch
+    # Tempest
+    enable_service tempest
+    # ===== END localrc =====
+
+.. note::
+    For best performance it is highly recommended to use KVM
+    virtualization instead of QEMU.
+    Also make sure nested virtualization is enabled as documented in
+    :ref:`the respective guide <kvm_nested_virt>`.
+    By adding ``LIBVIRT_CPU_MODE="host-passthrough"`` to your
+    ``local.conf`` you enable the guest VMs to make use of all features your
+    host's CPU provides.
+
+Run stack.sh and do some sanity checks::
+
+    sudo su - stack
+    cd /opt/stack/devstack
+    ./stack.sh
+    . ./openrc
+
+    openstack network list  # should show public and private networks
+
+Create two nova instances that we can use as test http servers::
+
+    # create nova instances on private network
+    openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node1
+    openstack server create --image $(openstack image list | awk '/ cirros-.*-x86_64-.* / {print $2}') --flavor 1 --nic net-id=$(openstack network list | awk '/ private / {print $2}') node2
+    openstack server list # should show the nova instances just created
+
+    # add secgroup rules to allow ssh etc..
+    openstack security group rule create default --protocol icmp
+    openstack security group rule create default --protocol tcp --dst-port 22:22
+    openstack security group rule create default --protocol tcp --dst-port 80:80
+
+Set up a simple web server on each of these instances. One possibility is to use
+the `Golang test server`_ that is used by the Octavia project for CI testing
+as well.
+Copy the binary to your instances and start it as shown below
+(username 'cirros', password 'gocubsgo')::
+
+    INST_IP=<instance IP>
+    scp -O test_server.bin cirros@${INST_IP}:
+    ssh -f cirros@${INST_IP} ./test_server.bin -id ${INST_IP}
+
+When started this way the test server will respond to HTTP requests with
+its own IP.
+
+Phase 2: Create your load balancer
+----------------------------------
+
+Create your load balancer::
+
+    openstack loadbalancer create --wait --name lb1 --vip-subnet-id private-subnet
+    openstack loadbalancer listener create --wait --protocol HTTP --protocol-port 80 --name listener1 lb1
+    openstack loadbalancer pool create --wait --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1
+    openstack loadbalancer healthmonitor create --wait --delay 5 --timeout 2 --max-retries 1 --type HTTP pool1
+    openstack loadbalancer member create --wait --subnet-id private-subnet --address <web server 1 address> --protocol-port 80 pool1
+    openstack loadbalancer member create --wait --subnet-id private-subnet --address <web server 2 address> --protocol-port 80 pool1
+
+Please note: The <web server # address> fields are the IP addresses of the nova
+servers created in Phase 1.
+Also note, using the API directly you can do all of the above commands in one
+API call.
+
+Phase 3: Test your load balancer
+--------------------------------
+
+::
+
+    openstack loadbalancer show lb1 # Note the vip_address
+    curl http://<vip_address>
+    curl http://<vip_address>
+
+This should show the "Welcome to <IP>" message from each member server.
+
+
+.. _Golang test server: https://opendev.org/openstack/octavia-tempest-plugin/src/branch/master/octavia_tempest_plugin/contrib/test_server
diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst
index 81c5945..658422b 100644
--- a/doc/source/guides/multinode-lab.rst
+++ b/doc/source/guides/multinode-lab.rst
@@ -75,13 +75,21 @@
 
     useradd -s /bin/bash -d /opt/stack -m stack
 
+Ensure home directory for the ``stack`` user has executable permission for all,
+as RHEL based distros create it with ``700`` and Ubuntu 21.04+ with ``750``
+which can cause issues during deployment.
+
+::
+
+    chmod +x /opt/stack
+
 This user will be making many changes to your system during installation
 and operation so it needs to have sudo privileges to root without a
 password:
 
 ::
 
-    echo "stack ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
+    echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack
 
 From here on use the ``stack`` user. **Logout** and **login** as the
 ``stack`` user.
diff --git a/doc/source/guides/nova.rst b/doc/source/guides/nova.rst
index 5b42797..705d427 100644
--- a/doc/source/guides/nova.rst
+++ b/doc/source/guides/nova.rst
@@ -122,7 +122,7 @@
 .. code-block:: shell
 
   $ openstack --os-compute-api-version 2.37 server create --flavor cirros256 \
-      --image cirros-0.3.5-x86_64-disk --nic none --wait test-server
+      --image cirros-0.6.2-x86_64-disk --nic none --wait test-server
 
 .. note:: ``--os-compute-api-version`` greater than or equal to 2.37 is
           required to use ``--nic=none``.
diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst
index a0e97ed..a4385b5 100644
--- a/doc/source/guides/single-machine.rst
+++ b/doc/source/guides/single-machine.rst
@@ -49,13 +49,21 @@
 
     $ sudo useradd -s /bin/bash -d /opt/stack -m stack
 
+Ensure home directory for the ``stack`` user has executable permission for all,
+as RHEL based distros create it with ``700`` and Ubuntu 21.04+ with ``750``
+which can cause issues during deployment.
+
+.. code-block:: console
+
+    $ sudo chmod +x /opt/stack
+
 Since this user will be making many changes to your system, it will need
 to have sudo privileges:
 
 .. code-block:: console
 
     $ apt-get install sudo -y || yum install -y sudo
-    $ echo "stack ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
+    $ echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack
 
 .. note:: On some systems you may need to use ``sudo visudo``.
 
@@ -98,6 +106,9 @@
 -  Set the service password. This is used by the OpenStack services
    (Nova, Glance, etc) to authenticate with Keystone.
 
+.. warning:: Only use alphanumeric characters in your passwords, as some
+   services fail to work when using special characters.
+
 ``local.conf`` should look something like this:
 
 .. code-block:: ini
diff --git a/doc/source/index.rst b/doc/source/index.rst
index feb50ce..a5a11e2 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -37,10 +37,10 @@
 -------------
 
 Start with a clean and minimal install of a Linux system. DevStack
-attempts to support the two latest LTS releases of Ubuntu, the
-latest/current Fedora version, CentOS/RHEL 8, OpenSUSE and openEuler.
+attempts to support the two latest LTS releases of Ubuntu,
+Rocky Linux 9 and openEuler.
 
-If you do not have a preference, Ubuntu 20.04 (Focal Fossa) is the
+If you do not have a preference, Ubuntu 22.04 (Jammy) is the
 most tested, and will probably go the smoothest.
 
 Add Stack User (optional)
@@ -57,6 +57,14 @@
 
    $ sudo useradd -s /bin/bash -d /opt/stack -m stack
 
+Ensure home directory for the ``stack`` user has executable permission for all,
+as RHEL based distros create it with ``700`` and Ubuntu 21.04+ with ``750``
+which can cause issues during deployment.
+
+.. code-block:: console
+
+    $ sudo chmod +x /opt/stack
+
 Since this user will be making many changes to your system, it should
 have sudo privileges:
 
@@ -93,7 +101,10 @@
 This is the minimum required config to get started with DevStack.
 
 .. note:: There is a sample :download:`local.conf </assets/local.conf>` file
-    under the *samples* directory in the devstack repository.
+   under the *samples* directory in the devstack repository.
+
+.. warning:: Only use alphanumeric characters in your passwords, as some
+   services fail to work when using special characters.
 
 Start the install
 -----------------
@@ -102,7 +113,7 @@
 
    $ ./stack.sh
 
-This will take a 15 - 20 minutes, largely depending on the speed of
+This will take 15 - 30 minutes, largely depending on the speed of
 your internet connection. Many git trees and packages will be
 installed during this process.
 
diff --git a/doc/source/overview.rst b/doc/source/overview.rst
index a609333..4384081 100644
--- a/doc/source/overview.rst
+++ b/doc/source/overview.rst
@@ -23,13 +23,12 @@
 release.*
 
 -  Ubuntu: current LTS release plus current development release
--  Fedora: current release plus previous release
--  RHEL/CentOS: current major release
+-  RHEL/CentOS/RockyLinux: current major release
 -  Other OS platforms may continue to be included but the maintenance of
    those platforms shall not be assumed simply due to their presence.
    Having a listed point-of-contact for each additional OS will greatly
    increase its chance of being well-maintained.
--  Patches for Ubuntu and/or Fedora will not be held up due to
+-  Patches for Ubuntu and/or RockyLinux will not be held up due to
    side-effects on other OS platforms.
 
 Databases
diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst
index 2e8e8f5..21cf52c 100644
--- a/doc/source/plugin-registry.rst
+++ b/doc/source/plugin-registry.rst
@@ -28,8 +28,6 @@
 openstack/barbican                       `https://opendev.org/openstack/barbican <https://opendev.org/openstack/barbican>`__
 openstack/blazar                         `https://opendev.org/openstack/blazar <https://opendev.org/openstack/blazar>`__
 openstack/ceilometer                     `https://opendev.org/openstack/ceilometer <https://opendev.org/openstack/ceilometer>`__
-openstack/ceilometer-powervm             `https://opendev.org/openstack/ceilometer-powervm <https://opendev.org/openstack/ceilometer-powervm>`__
-openstack/cinderlib                      `https://opendev.org/openstack/cinderlib <https://opendev.org/openstack/cinderlib>`__
 openstack/cloudkitty                     `https://opendev.org/openstack/cloudkitty <https://opendev.org/openstack/cloudkitty>`__
 openstack/cyborg                         `https://opendev.org/openstack/cyborg <https://opendev.org/openstack/cyborg>`__
 openstack/designate                      `https://opendev.org/openstack/designate <https://opendev.org/openstack/designate>`__
@@ -39,7 +37,6 @@
 openstack/devstack-plugin-kafka          `https://opendev.org/openstack/devstack-plugin-kafka <https://opendev.org/openstack/devstack-plugin-kafka>`__
 openstack/devstack-plugin-nfs            `https://opendev.org/openstack/devstack-plugin-nfs <https://opendev.org/openstack/devstack-plugin-nfs>`__
 openstack/devstack-plugin-open-cas       `https://opendev.org/openstack/devstack-plugin-open-cas <https://opendev.org/openstack/devstack-plugin-open-cas>`__
-openstack/ec2-api                        `https://opendev.org/openstack/ec2-api <https://opendev.org/openstack/ec2-api>`__
 openstack/freezer                        `https://opendev.org/openstack/freezer <https://opendev.org/openstack/freezer>`__
 openstack/freezer-api                    `https://opendev.org/openstack/freezer-api <https://opendev.org/openstack/freezer-api>`__
 openstack/freezer-tempest-plugin         `https://opendev.org/openstack/freezer-tempest-plugin <https://opendev.org/openstack/freezer-tempest-plugin>`__
@@ -64,14 +61,10 @@
 openstack/monasca-api                    `https://opendev.org/openstack/monasca-api <https://opendev.org/openstack/monasca-api>`__
 openstack/monasca-events-api             `https://opendev.org/openstack/monasca-events-api <https://opendev.org/openstack/monasca-events-api>`__
 openstack/monasca-tempest-plugin         `https://opendev.org/openstack/monasca-tempest-plugin <https://opendev.org/openstack/monasca-tempest-plugin>`__
-openstack/murano                         `https://opendev.org/openstack/murano <https://opendev.org/openstack/murano>`__
 openstack/networking-bagpipe             `https://opendev.org/openstack/networking-bagpipe <https://opendev.org/openstack/networking-bagpipe>`__
 openstack/networking-baremetal           `https://opendev.org/openstack/networking-baremetal <https://opendev.org/openstack/networking-baremetal>`__
 openstack/networking-bgpvpn              `https://opendev.org/openstack/networking-bgpvpn <https://opendev.org/openstack/networking-bgpvpn>`__
 openstack/networking-generic-switch      `https://opendev.org/openstack/networking-generic-switch <https://opendev.org/openstack/networking-generic-switch>`__
-openstack/networking-hyperv              `https://opendev.org/openstack/networking-hyperv <https://opendev.org/openstack/networking-hyperv>`__
-openstack/networking-odl                 `https://opendev.org/openstack/networking-odl <https://opendev.org/openstack/networking-odl>`__
-openstack/networking-powervm             `https://opendev.org/openstack/networking-powervm <https://opendev.org/openstack/networking-powervm>`__
 openstack/networking-sfc                 `https://opendev.org/openstack/networking-sfc <https://opendev.org/openstack/networking-sfc>`__
 openstack/neutron                        `https://opendev.org/openstack/neutron <https://opendev.org/openstack/neutron>`__
 openstack/neutron-dynamic-routing        `https://opendev.org/openstack/neutron-dynamic-routing <https://opendev.org/openstack/neutron-dynamic-routing>`__
@@ -80,22 +73,17 @@
 openstack/neutron-tempest-plugin         `https://opendev.org/openstack/neutron-tempest-plugin <https://opendev.org/openstack/neutron-tempest-plugin>`__
 openstack/neutron-vpnaas                 `https://opendev.org/openstack/neutron-vpnaas <https://opendev.org/openstack/neutron-vpnaas>`__
 openstack/neutron-vpnaas-dashboard       `https://opendev.org/openstack/neutron-vpnaas-dashboard <https://opendev.org/openstack/neutron-vpnaas-dashboard>`__
-openstack/nova-powervm                   `https://opendev.org/openstack/nova-powervm <https://opendev.org/openstack/nova-powervm>`__
+openstack/nova                           `https://opendev.org/openstack/nova <https://opendev.org/openstack/nova>`__
 openstack/octavia                        `https://opendev.org/openstack/octavia <https://opendev.org/openstack/octavia>`__
 openstack/octavia-dashboard              `https://opendev.org/openstack/octavia-dashboard <https://opendev.org/openstack/octavia-dashboard>`__
 openstack/octavia-tempest-plugin         `https://opendev.org/openstack/octavia-tempest-plugin <https://opendev.org/openstack/octavia-tempest-plugin>`__
 openstack/openstacksdk                   `https://opendev.org/openstack/openstacksdk <https://opendev.org/openstack/openstacksdk>`__
 openstack/osprofiler                     `https://opendev.org/openstack/osprofiler <https://opendev.org/openstack/osprofiler>`__
-openstack/oswin-tempest-plugin           `https://opendev.org/openstack/oswin-tempest-plugin <https://opendev.org/openstack/oswin-tempest-plugin>`__
+openstack/ovn-bgp-agent                  `https://opendev.org/openstack/ovn-bgp-agent <https://opendev.org/openstack/ovn-bgp-agent>`__
 openstack/ovn-octavia-provider           `https://opendev.org/openstack/ovn-octavia-provider <https://opendev.org/openstack/ovn-octavia-provider>`__
-openstack/patrole                        `https://opendev.org/openstack/patrole <https://opendev.org/openstack/patrole>`__
 openstack/rally-openstack                `https://opendev.org/openstack/rally-openstack <https://opendev.org/openstack/rally-openstack>`__
-openstack/sahara                         `https://opendev.org/openstack/sahara <https://opendev.org/openstack/sahara>`__
-openstack/sahara-dashboard               `https://opendev.org/openstack/sahara-dashboard <https://opendev.org/openstack/sahara-dashboard>`__
-openstack/senlin                         `https://opendev.org/openstack/senlin <https://opendev.org/openstack/senlin>`__
 openstack/shade                          `https://opendev.org/openstack/shade <https://opendev.org/openstack/shade>`__
 openstack/skyline-apiserver              `https://opendev.org/openstack/skyline-apiserver <https://opendev.org/openstack/skyline-apiserver>`__
-openstack/solum                          `https://opendev.org/openstack/solum <https://opendev.org/openstack/solum>`__
 openstack/storlets                       `https://opendev.org/openstack/storlets <https://opendev.org/openstack/storlets>`__
 openstack/tacker                         `https://opendev.org/openstack/tacker <https://opendev.org/openstack/tacker>`__
 openstack/tap-as-a-service               `https://opendev.org/openstack/tap-as-a-service <https://opendev.org/openstack/tap-as-a-service>`__
@@ -188,6 +176,7 @@
 x/valet                                  `https://opendev.org/x/valet <https://opendev.org/x/valet>`__
 x/vmware-nsx                             `https://opendev.org/x/vmware-nsx <https://opendev.org/x/vmware-nsx>`__
 x/vmware-vspc                            `https://opendev.org/x/vmware-vspc <https://opendev.org/x/vmware-vspc>`__
+x/whitebox-neutron-tempest-plugin        `https://opendev.org/x/whitebox-neutron-tempest-plugin <https://opendev.org/x/whitebox-neutron-tempest-plugin>`__
 ======================================== ===
 
 
diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst
index 7d70d74..dd75b5a 100644
--- a/doc/source/plugins.rst
+++ b/doc/source/plugins.rst
@@ -238,14 +238,11 @@
 locations in the top-level of the plugin repository:
 
 - ``./devstack/files/debs/$plugin_name`` - Packages to install when running
-  on Ubuntu, Debian or Linux Mint.
+  on Ubuntu or Debian.
 
 - ``./devstack/files/rpms/$plugin_name`` - Packages to install when running
   on Red Hat, Fedora, or CentOS.
 
-- ``./devstack/files/rpms-suse/$plugin_name`` - Packages to install when
-  running on SUSE Linux or openSUSE.
-
 Although there a no plans to remove this method of installing
 packages, plugins should consider it deprecated for ``bindep`` support
 described below.
diff --git a/doc/source/tempest.rst b/doc/source/tempest.rst
new file mode 100644
index 0000000..65dd5b1
--- /dev/null
+++ b/doc/source/tempest.rst
@@ -0,0 +1,25 @@
+=======
+Tempest
+=======
+
+`Tempest`_ is the OpenStack Integration test suite. It is installed by default
+and is used to provide integration testing for many of the OpenStack services.
+Just like DevStack itself, it is possible to extend Tempest with plugins. In
+fact, many Tempest plugin packages also include DevStack plugin to do things
+like pre-create required static resources.
+
+The `Tempest documentation <Tempest>`_ provides a thorough guide to using
+Tempest. However, if you simply wish to run the standard set of Tempest tests
+against an existing deployment, you can do the following:
+
+.. code-block:: shell
+
+    cd /opt/stack/tempest
+    /opt/stack/data/venv/bin/tempest run ...
+
+The above assumes you have installed DevStack in the default location
+(configured via the ``DEST`` configuration variable) and have enabled
+virtualenv-based installation in the standard location (configured via the
+``USE_VENV`` and ``VENV_DEST`` configuration variables, respectively).
+
+.. _Tempest: https://docs.openstack.org/tempest/latest/
diff --git a/files/apache-horizon.template b/files/apache-horizon.template
index efcfc03..da7a7d2 100644
--- a/files/apache-horizon.template
+++ b/files/apache-horizon.template
@@ -39,4 +39,5 @@
     CustomLog /var/log/%APACHE_NAME%/horizon_access.log combined
 </VirtualHost>
 
+%WSGIPYTHONHOME%
 WSGISocketPrefix /var/run/%APACHE_NAME%
diff --git a/files/apache-keystone.template b/files/apache-keystone.template
index 1a353e5..d99e8e6 100644
--- a/files/apache-keystone.template
+++ b/files/apache-keystone.template
@@ -23,6 +23,7 @@
 %SSLLISTEN%    %SSLENGINE%
 %SSLLISTEN%    %SSLCERTFILE%
 %SSLLISTEN%    %SSLKEYFILE%
+%SSLLISTEN%    SSLProtocol -all +TLSv1.3 +TLSv1.2
 %SSLLISTEN%</VirtualHost>
 
 Alias /identity %KEYSTONE_BIN%/keystone-wsgi-public
diff --git a/files/apache-neutron.template b/files/apache-neutron.template
index c7796b9..358e87f 100644
--- a/files/apache-neutron.template
+++ b/files/apache-neutron.template
@@ -24,6 +24,7 @@
 %SSLLISTEN%    %SSLENGINE%
 %SSLLISTEN%    %SSLCERTFILE%
 %SSLLISTEN%    %SSLKEYFILE%
+%SSLLISTEN%    SSLProtocol -all +TLSv1.3 +TLSv1.2
 %SSLLISTEN%</VirtualHost>
 
 Alias /networking %NEUTRON_BIN%/neutron-api
diff --git a/files/debs/nova b/files/debs/nova
index 0194f00..5c00ad7 100644
--- a/files/debs/nova
+++ b/files/debs/nova
@@ -1,7 +1,5 @@
 conntrack
 curl
-dnsmasq-base
-dnsmasq-utils # for dhcp_release
 ebtables
 genisoimage # required for config_drive
 iptables
diff --git a/files/dnsmasq-for-baremetal-from-nova-network.conf b/files/dnsmasq-for-baremetal-from-nova-network.conf
deleted file mode 100644
index 66a3751..0000000
--- a/files/dnsmasq-for-baremetal-from-nova-network.conf
+++ /dev/null
@@ -1,3 +0,0 @@
-enable-tftp
-tftp-root=/tftpboot
-dhcp-boot=pxelinux.0
diff --git a/files/openstack-cli-server/openstack b/files/openstack-cli-server/openstack
new file mode 100755
index 0000000..47fbfc5
--- /dev/null
+++ b/files/openstack-cli-server/openstack
@@ -0,0 +1,118 @@
+#!/usr/bin/env python3
+# Copyright 2016 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import socket
+import sys
+import os
+import os.path
+import json
+
+server_address = "/tmp/openstack.sock"
+
+sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+
+try:
+    sock.connect(server_address)
+except socket.error as msg:
+    print(msg, file=sys.stderr)
+    sys.exit(1)
+
+
+def send(sock, doc):
+    jdoc = json.dumps(doc)
+    sock.send(b'%d\n' % len(jdoc))
+    sock.sendall(jdoc.encode('utf-8'))
+
+def recv(sock):
+    length_str = b''
+
+    char = sock.recv(1)
+    if len(char) == 0:
+        print("Unexpected end of file", file=sys.stderr)
+        sys.exit(1)
+
+    while char != b'\n':
+        length_str += char
+        char = sock.recv(1)
+        if len(char) == 0:
+            print("Unexpected end of file", file=sys.stderr)
+            sys.exit(1)
+
+    total = int(length_str)
+
+    # use a memoryview to receive the data chunk by chunk efficiently
+    jdoc = memoryview(bytearray(total))
+    next_offset = 0
+    while total - next_offset > 0:
+        recv_size = sock.recv_into(jdoc[next_offset:], total - next_offset)
+        next_offset += recv_size
+    try:
+        doc = json.loads(jdoc.tobytes())
+    except (TypeError, ValueError) as e:
+        raise Exception('Data received was not in JSON format')
+    return doc
+
+try:
+    env = {}
+    passenv = ["CINDER_VERSION",
+               "OS_AUTH_URL",
+               "OS_NO_CACHE",
+               "OS_PASSWORD",
+               "OS_PROJECT_NAME",
+               "OS_REGION_NAME",
+               "OS_TENANT_NAME",
+               "OS_USERNAME",
+               "OS_VOLUME_API_VERSION",
+               "OS_CLOUD"]
+    for name in passenv:
+        if name in os.environ:
+            env[name] = os.environ[name]
+
+    cmd = {
+        "app": os.path.basename(sys.argv[0]),
+        "env": env,
+        "argv": sys.argv[1:]
+    }
+    try:
+        image_idx = sys.argv.index('image')
+        create_idx = sys.argv.index('create')
+        missing_file = image_idx < create_idx and \
+                not any(x.startswith('--file') for x in sys.argv)
+    except ValueError:
+        missing_file = False
+
+    if missing_file:
+        # This means we were called with an image create command, but were
+        # not provided a --file option. That likely means we're being passed
+        # the image data to stdin, which won't work because we do not proxy
+        # stdin to the server. So, we just reject the operation and ask the
+        # caller to provide the file with --file instead.
+        # We've already connected to the server, we need to send it some dummy
+        # data so it doesn't wait forever.
+        send(sock, {})
+        print('Image create without --file is not allowed in server mode',
+              file=sys.stderr)
+        sys.exit(1)
+    else:
+        send(sock, cmd)
+
+    doc = recv(sock)
+    if doc["stdout"] != b'':
+        print(doc["stdout"], end='')
+    if doc["stderr"] != b'':
+        print(doc["stderr"], file=sys.stderr)
+    sys.exit(doc["status"])
+finally:
+    sock.close()
diff --git a/files/openstack-cli-server/openstack-cli-server b/files/openstack-cli-server/openstack-cli-server
new file mode 100755
index 0000000..f3d2747
--- /dev/null
+++ b/files/openstack-cli-server/openstack-cli-server
@@ -0,0 +1,118 @@
+#!/usr/bin/env python3
+# Copyright 2016 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import socket
+import sys
+import os
+import json
+
+from openstackclient import shell as osc_shell
+from io import StringIO
+
+server_address = "/tmp/openstack.sock"
+
+try:
+    os.unlink(server_address)
+except OSError:
+    if os.path.exists(server_address):
+        raise
+
+sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+print('starting up on %s' % server_address, file=sys.stderr)
+sock.bind(server_address)
+
+# Listen for incoming connections
+sock.listen(1)
+
+def send(sock, doc):
+    jdoc = json.dumps(doc)
+    sock.send(b'%d\n' % len(jdoc))
+    sock.sendall(jdoc.encode('utf-8'))
+
+def recv(sock):
+    length_str = b''
+    char = sock.recv(1)
+    while char != b'\n':
+        length_str += char
+        char = sock.recv(1)
+
+    total = int(length_str)
+
+    # use a memoryview to receive the data chunk by chunk efficiently
+    jdoc = memoryview(bytearray(total))
+    next_offset = 0
+    while total - next_offset > 0:
+        recv_size = sock.recv_into(jdoc[next_offset:], total - next_offset)
+        next_offset += recv_size
+    try:
+        doc = json.loads(jdoc.tobytes())
+    except (TypeError, ValueError) as e:
+        raise Exception('Data received was not in JSON format')
+    return doc
+
+while True:
+    csock, client_address = sock.accept()
+    try:
+        doc = recv(csock)
+
+        print("%s %s" % (doc["app"], doc["argv"]), file=sys.stderr)
+        oldenv = {}
+        for name in doc["env"].keys():
+            oldenv[name] = os.environ.get(name, None)
+            os.environ[name] = doc["env"][name]
+
+        try:
+            old_stdout = sys.stdout
+            old_stderr = sys.stderr
+            my_stdout = sys.stdout = StringIO()
+            my_stderr = sys.stderr = StringIO()
+
+            class Exit(BaseException):
+                def __init__(self, status):
+                    self.status = status
+
+            def noexit(stat):
+                raise Exit(stat)
+
+            sys.exit = noexit
+
+            if doc["app"] == "openstack":
+                sh = osc_shell.OpenStackShell()
+                ret = sh.run(doc["argv"])
+            else:
+                print("Unknown application %s" % doc["app"], file=sys.stderr)
+                ret = 1
+        except Exit as e:
+            ret = e.status
+        finally:
+            sys.stdout = old_stdout
+            sys.stderr = old_stderr
+
+            for name in oldenv.keys():
+                if oldenv[name] is None:
+                    del os.environ[name]
+                else:
+                    os.environ[name] = oldenv[name]
+
+        send(csock, {
+            "stdout": my_stdout.getvalue(),
+            "stderr": my_stderr.getvalue(),
+            "status": ret,
+        })
+
+    except BaseException as e:
+        print(e, file=sys.stderr)
+    finally:
+        csock.close()
diff --git a/files/rpms-suse/baremetal b/files/rpms-suse/baremetal
deleted file mode 100644
index 61f73ee..0000000
--- a/files/rpms-suse/baremetal
+++ /dev/null
@@ -1 +0,0 @@
-dnsmasq
diff --git a/files/rpms-suse/ceph b/files/rpms-suse/ceph
deleted file mode 100644
index 8c4955d..0000000
--- a/files/rpms-suse/ceph
+++ /dev/null
@@ -1,3 +0,0 @@
-ceph    # NOPRIME
-lsb
-xfsprogs
diff --git a/files/rpms-suse/cinder b/files/rpms-suse/cinder
deleted file mode 100644
index b39cc79..0000000
--- a/files/rpms-suse/cinder
+++ /dev/null
@@ -1,3 +0,0 @@
-lvm2
-qemu-tools
-tgt # NOPRIME
diff --git a/files/rpms-suse/dstat b/files/rpms-suse/dstat
deleted file mode 100644
index 2b643b8..0000000
--- a/files/rpms-suse/dstat
+++ /dev/null
@@ -1 +0,0 @@
-dstat
diff --git a/files/rpms-suse/general b/files/rpms-suse/general
deleted file mode 100644
index f636110..0000000
--- a/files/rpms-suse/general
+++ /dev/null
@@ -1,34 +0,0 @@
-apache2
-apache2-devel
-bc
-ca-certificates-mozilla
-curl
-gawk
-gcc
-gcc-c++
-git-core
-graphviz # docs
-iputils
-libffi-devel  # pyOpenSSL
-libjpeg8-devel # Pillow 3.0.0
-libopenssl-devel # to rebuild pyOpenSSL if needed
-libxslt-devel  # lxml
-lsof # useful when debugging
-make
-net-tools
-openssh
-openssl
-pcre-devel # python-pcre
-postgresql-devel  # psycopg2
-psmisc
-python3-systemd
-python-cmd2 # dist:opensuse-12.3
-python-devel  # pyOpenSSL
-python-xml
-tar
-tcpdump
-unzip
-util-linux
-wget
-which
-zlib-devel
diff --git a/files/rpms-suse/horizon b/files/rpms-suse/horizon
deleted file mode 100644
index 753ea76..0000000
--- a/files/rpms-suse/horizon
+++ /dev/null
@@ -1,2 +0,0 @@
-apache2-mod_wsgi  # NOPRIME
-apache2  # NOPRIME
diff --git a/files/rpms-suse/keystone b/files/rpms-suse/keystone
deleted file mode 100644
index 66cfc23..0000000
--- a/files/rpms-suse/keystone
+++ /dev/null
@@ -1,4 +0,0 @@
-cyrus-sasl-devel
-memcached
-openldap2-devel
-sqlite3
diff --git a/files/rpms-suse/ldap b/files/rpms-suse/ldap
deleted file mode 100644
index 46d26f0..0000000
--- a/files/rpms-suse/ldap
+++ /dev/null
@@ -1,3 +0,0 @@
-openldap2
-openldap2-client
-python-ldap
diff --git a/files/rpms-suse/n-api b/files/rpms-suse/n-api
deleted file mode 100644
index 0f08daa..0000000
--- a/files/rpms-suse/n-api
+++ /dev/null
@@ -1 +0,0 @@
-python-dateutil
diff --git a/files/rpms-suse/n-cpu b/files/rpms-suse/n-cpu
deleted file mode 100644
index 9c724cb..0000000
--- a/files/rpms-suse/n-cpu
+++ /dev/null
@@ -1,10 +0,0 @@
-cdrkit-cdrtools-compat # dist:sle12
-cryptsetup
-dosfstools
-libosinfo
-lvm2
-mkisofs # not:sle12
-open-iscsi
-sg3_utils
-# Stuff for diablo volumes
-sysfsutils
diff --git a/files/rpms-suse/neutron-agent b/files/rpms-suse/neutron-agent
deleted file mode 100644
index ea8819e..0000000
--- a/files/rpms-suse/neutron-agent
+++ /dev/null
@@ -1 +0,0 @@
-ipset
diff --git a/files/rpms-suse/neutron-common b/files/rpms-suse/neutron-common
deleted file mode 100644
index e3799a9..0000000
--- a/files/rpms-suse/neutron-common
+++ /dev/null
@@ -1,12 +0,0 @@
-acl
-dnsmasq
-dnsmasq-utils # dist:opensuse-12.3,opensuse-13.1
-ebtables
-haproxy # to serve as metadata proxy inside router/dhcp namespaces
-iptables
-iputils
-rabbitmq-server # NOPRIME
-radvd # NOPRIME
-sqlite3
-sudo
-vlan
diff --git a/files/rpms-suse/neutron-l3 b/files/rpms-suse/neutron-l3
deleted file mode 100644
index a7a190c..0000000
--- a/files/rpms-suse/neutron-l3
+++ /dev/null
@@ -1,2 +0,0 @@
-conntrack-tools
-keepalived
diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova
deleted file mode 100644
index 1cc2f62..0000000
--- a/files/rpms-suse/nova
+++ /dev/null
@@ -1,23 +0,0 @@
-cdrkit-cdrtools-compat # dist:sle12
-conntrack-tools
-curl
-dnsmasq
-dnsmasq-utils # dist:opensuse-12.3,opensuse-13.1
-ebtables
-iptables
-iputils
-kpartx
-kvm # NOPRIME
-libvirt # NOPRIME
-libvirt-python # NOPRIME
-# mkisofs is required for config_drive
-mkisofs # not:sle12
-parted
-polkit
-# qemu as fallback if kvm cannot be used
-qemu # NOPRIME
-rabbitmq-server # NOPRIME
-socat
-sqlite3
-sudo
-vlan
diff --git a/files/rpms-suse/openvswitch b/files/rpms-suse/openvswitch
deleted file mode 100644
index 53f8bb2..0000000
--- a/files/rpms-suse/openvswitch
+++ /dev/null
@@ -1,3 +0,0 @@
-
-openvswitch
-openvswitch-switch
diff --git a/files/rpms-suse/os-brick b/files/rpms-suse/os-brick
deleted file mode 100644
index 67b33a9..0000000
--- a/files/rpms-suse/os-brick
+++ /dev/null
@@ -1,2 +0,0 @@
-lsscsi
-open-iscsi
diff --git a/files/rpms-suse/q-agt b/files/rpms-suse/q-agt
deleted file mode 120000
index 99fe353..0000000
--- a/files/rpms-suse/q-agt
+++ /dev/null
@@ -1 +0,0 @@
-neutron-agent
\ No newline at end of file
diff --git a/files/rpms-suse/q-l3 b/files/rpms-suse/q-l3
deleted file mode 120000
index 0a5ca2a..0000000
--- a/files/rpms-suse/q-l3
+++ /dev/null
@@ -1 +0,0 @@
-neutron-l3
\ No newline at end of file
diff --git a/files/rpms-suse/swift b/files/rpms-suse/swift
deleted file mode 100644
index 3663b98..0000000
--- a/files/rpms-suse/swift
+++ /dev/null
@@ -1,6 +0,0 @@
-curl
-liberasurecode-devel
-memcached
-sqlite3
-xfsprogs
-xinetd
diff --git a/files/rpms/ceph b/files/rpms/ceph
index 93b5746..19f158f 100644
--- a/files/rpms/ceph
+++ b/files/rpms/ceph
@@ -1,3 +1,3 @@
 ceph    # NOPRIME
-redhat-lsb-core # not:rhel9,openEuler-20.03
+redhat-lsb-core # not:rhel9,openEuler-22.03
 xfsprogs
diff --git a/files/rpms/general b/files/rpms/general
index 163a7c8..8a5755c 100644
--- a/files/rpms/general
+++ b/files/rpms/general
@@ -6,9 +6,11 @@
 gcc-c++
 gettext  # used for compiling message catalogs
 git-core
+glibc-langpack-en # dist:rhel9
 graphviz # needed only for docs
 httpd
 httpd-devel
+iptables-nft # dist:rhel9
 iptables-services
 java-1.8.0-openjdk-headless
 libffi-devel
@@ -16,7 +18,7 @@
 libxml2-devel # lxml
 libxslt-devel # lxml
 libyaml-devel
-make # dist:openEuler-20.03
+mod_ssl # required for tls-proxy on centos 9 stream computes
 net-tools
 openssh-server
 openssl
@@ -26,10 +28,9 @@
 postgresql-devel  # psycopg2
 psmisc
 python3-devel
-python3-pip
+python3-pip # not:openEuler-22.03
 python3-systemd
-redhat-rpm-config # not:openEuler-20.03    missing dep for gcc hardening flags, see rhbz#1217376
-systemd-devel # dist:openEuler-20.03
+redhat-rpm-config # not:openEuler-22.03  missing dep for gcc hardening flags, see rhbz#1217376
 tar
 tcpdump
 unzip
diff --git a/files/rpms/nova b/files/rpms/nova
index 9e8621c..e0f13b8 100644
--- a/files/rpms/nova
+++ b/files/rpms/nova
@@ -1,12 +1,10 @@
 conntrack-tools
 curl
-dnsmasq # for q-dhcp
-dnsmasq-utils # for dhcp_release
 ebtables
 genisoimage # not:rhel9 required for config_drive
 iptables
 iputils
-kernel-modules # not:openEuler-20.03
+kernel-modules # not:openEuler-22.03
 kpartx
 parted
 polkit
diff --git a/files/rpms/swift b/files/rpms/swift
index a838d78..49a1833 100644
--- a/files/rpms/swift
+++ b/files/rpms/swift
@@ -1,7 +1,7 @@
 curl
-liberasurecode-devel # not:openEuler-20.03
+liberasurecode-devel
 memcached
 rsync-daemon
 sqlite
 xfsprogs
-xinetd # not:f35,rhel9
+xinetd # not:f36,rhel9
diff --git a/functions b/functions
index ccca5cd..42d08d7 100644
--- a/functions
+++ b/functions
@@ -118,7 +118,7 @@
         useimport="--import"
     fi
 
-    openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" --public --container-format "$container" --disk-format "$disk" $useimport $properties < "${image}"
+    openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name" --public --container-format "$container" --disk-format "$disk" $useimport $properties --file $(readlink -f "${image}")
 }
 
 # Retrieve an image from a URL and upload into Glance.
@@ -133,17 +133,28 @@
 
     local image image_fname image_name
 
+    local max_attempts=5
+
     # Create a directory for the downloaded image tarballs.
     mkdir -p $FILES/images
     image_fname=`basename "$image_url"`
     if [[ $image_url != file* ]]; then
         # Downloads the image (uec ami+akistyle), then extracts it.
         if [[ ! -f $FILES/$image_fname || "$(stat -c "%s" $FILES/$image_fname)" = "0" ]]; then
-            wget --progress=dot:giga -c $image_url -O $FILES/$image_fname
-            if [[ $? -ne 0 ]]; then
-                echo "Not found: $image_url"
-                return
-            fi
+            for attempt in `seq $max_attempts`; do
+                local rc=0
+                wget --progress=dot:giga -c $image_url -O $FILES/$image_fname || rc=$?
+                if [[ $rc -ne 0 ]]; then
+                    if [[ "$attempt" -eq "$max_attempts" ]]; then
+                        echo "Not found: $image_url"
+                        return
+                    fi
+                    echo "Download failed, retrying in $attempt second, attempt: $attempt"
+                    sleep $attempt
+                else
+                    break
+                fi
+            done
         fi
         image="$FILES/${image_fname}"
     else
@@ -414,10 +425,10 @@
         # kernel for use when uploading the root filesystem.
         local kernel_id="" ramdisk_id="";
         if [ -n "$kernel" ]; then
-            kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $(_image_properties_to_arg $img_property) --public --container-format aki --disk-format aki < "$kernel" | grep ' id ' | get_field 2)
+            kernel_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-kernel" $(_image_properties_to_arg $img_property) --public --container-format aki --disk-format aki --file $(readlink -f "$kernel") -f value -c id)
         fi
         if [ -n "$ramdisk" ]; then
-            ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $(_image_properties_to_arg $img_property) --public --container-format ari --disk-format ari < "$ramdisk" | grep ' id ' | get_field 2)
+            ramdisk_id=$(openstack --os-cloud=devstack-admin --os-region-name="$REGION_NAME" image create "$image_name-ramdisk" $(_image_properties_to_arg $img_property) --public --container-format ari --disk-format ari --file $(readlink -f "$ramdisk") -f value -c id)
         fi
         _upload_image "${image_name%.img}" ami ami "$image" ${kernel_id:+ kernel_id=$kernel_id} ${ramdisk_id:+ ramdisk_id=$ramdisk_id} $img_property
     fi
@@ -683,6 +694,8 @@
     iniset $conf_file DEFAULT logging_default_format_string "%(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s"
     iniset $conf_file DEFAULT logging_debug_format_suffix "from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d"
     iniset $conf_file DEFAULT logging_exception_prefix "%(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s"
+    # Enable or disable color for oslo.log
+    iniset $conf_file DEFAULT log_color $LOG_COLOR
 }
 
 function setup_systemd_logging {
@@ -704,6 +717,9 @@
     iniset $conf_file DEFAULT logging_context_format_string "%(color)s%(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(project_name)s %(user_name)s%(color)s] %(instance)s%(color)s%(message)s"
     iniset $conf_file DEFAULT logging_default_format_string "%(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s"
     iniset $conf_file DEFAULT logging_exception_prefix "ERROR %(name)s %(instance)s"
+
+    # Enable or disable color for oslo.log
+    iniset $conf_file DEFAULT log_color $LOG_COLOR
 }
 
 function setup_standard_logging_identity {
diff --git a/functions-common b/functions-common
index 8651604..e265256 100644
--- a/functions-common
+++ b/functions-common
@@ -49,7 +49,7 @@
 STACK_ENV_VARS="BASE_SQL_CONN DATA_DIR DEST ENABLED_SERVICES HOST_IP \
     KEYSTONE_SERVICE_URI \
     LOGFILE OS_CACERT SERVICE_HOST STACK_USER TLS_IP \
-    HOST_IPV6 SERVICE_IP_VERSION"
+    HOST_IPV6 SERVICE_IP_VERSION TUNNEL_ENDPOINT_IP TUNNEL_IP_VERSION"
 
 
 # Saves significant environment variables to .stackenv for later use
@@ -236,6 +236,27 @@
     $xtrace
 }
 
+# bool_to_int <True|False>
+#
+# Convert True|False to int 1 or 0
+# This function can be used to convert the output of trueorfalse
+# to an int follow c conventions where false is 0 and 1 it true.
+function bool_to_int {
+    local xtrace
+    xtrace=$(set +o | grep xtrace)
+    set +o xtrace
+    if [ -z $1 ]; then
+        die $LINENO "Bool value required"
+    fi
+    if [[ $1 == "True" ]] ; then
+        echo '1'
+    else
+        echo '0'
+    fi
+    $xtrace
+}
+
+
 function isset {
     [[ -v "$1" ]]
 }
@@ -380,9 +401,9 @@
 # such as "install_package" further abstract things in better ways.
 #
 # ``os_VENDOR`` - vendor name: ``Ubuntu``, ``Fedora``, etc
-# ``os_RELEASE`` - major release: ``16.04`` (Ubuntu), ``23`` (Fedora)
+# ``os_RELEASE`` - major release: ``22.04`` (Ubuntu), ``23`` (Fedora)
 # ``os_PACKAGE`` - package type: ``deb`` or ``rpm``
-# ``os_CODENAME`` - vendor's codename for release: ``xenial``
+# ``os_CODENAME`` - vendor's codename for release: ``jammy``
 
 declare -g os_VENDOR os_RELEASE os_PACKAGE os_CODENAME
 
@@ -412,12 +433,15 @@
 #  - os_VENDOR
 #  - os_PACKAGE
 function GetOSVersion {
-    # CentOS Stream 9 does not provide lsb_release
+    # CentOS Stream 9 and RHEL 9 do not provide lsb_release
     source /etc/os-release
-    if [[ "${ID}${VERSION}" == "centos9" ]]; then
+    if [[ "${ID}${VERSION}" == "centos9" ]] || [[ "${ID}${VERSION}" =~ "rhel9" ]]; then
         os_RELEASE=${VERSION_ID}
         os_CODENAME="n/a"
         os_VENDOR=$(echo $NAME | tr -d '[:space:]')
+    elif [[ "${ID}${VERSION}" =~ "rocky9" ]]; then
+        os_VENDOR="Rocky"
+        os_RELEASE=${VERSION_ID}
     else
         _ensure_lsb_release
 
@@ -426,7 +450,7 @@
         os_VENDOR=$(lsb_release -i -s)
     fi
 
-    if [[ $os_VENDOR =~ (Debian|Ubuntu|LinuxMint) ]]; then
+    if [[ $os_VENDOR =~ (Debian|Ubuntu) ]]; then
         os_PACKAGE="deb"
     else
         os_PACKAGE="rpm"
@@ -444,36 +468,24 @@
 
 function GetDistro {
     GetOSVersion
-    if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) || \
-            "$os_VENDOR" =~ (LinuxMint) ]]; then
-        # 'Everyone' refers to Ubuntu / Debian / Mint releases by
+    if [[ "$os_VENDOR" =~ (Ubuntu) || "$os_VENDOR" =~ (Debian) ]]; then
+        # 'Everyone' refers to Ubuntu / Debian releases by
         # the code name adjective
         DISTRO=$os_CODENAME
     elif [[ "$os_VENDOR" =~ (Fedora) ]]; then
         # For Fedora, just use 'f' and the release
         DISTRO="f$os_RELEASE"
-    elif is_opensuse; then
-        DISTRO="opensuse-$os_RELEASE"
-        # Tumbleweed uses "n/a" as a codename, and the release is a datestring
-        # like 20180218, so not very useful. Leap however uses a release
-        # with a "dot", so for example 15.0
-        [ "$os_CODENAME" = "n/a" -a "$os_RELEASE" = "${os_RELEASE/\./}" ] && \
-            DISTRO="opensuse-tumbleweed"
-    elif is_suse_linux_enterprise; then
-        # just use major release
-        DISTRO="sle${os_RELEASE%.*}"
     elif [[ "$os_VENDOR" =~ (Red.*Hat) || \
         "$os_VENDOR" =~ (CentOS) || \
         "$os_VENDOR" =~ (AlmaLinux) || \
         "$os_VENDOR" =~ (Scientific) || \
         "$os_VENDOR" =~ (OracleServer) || \
+        "$os_VENDOR" =~ (Rocky) || \
         "$os_VENDOR" =~ (Virtuozzo) ]]; then
         # Drop the . release as we assume it's compatible
         # XXX re-evaluate when we get RHEL10
         DISTRO="rhel${os_RELEASE::1}"
     elif [[ "$os_VENDOR" =~ (openEuler) ]]; then
-        # The DISTRO here is `openEuler-20.03`. While, actually only openEuler
-        # 20.03 LTS SP2 is fully tested. Other SP version maybe have bugs.
         DISTRO="openEuler-$os_RELEASE"
     else
         # We can't make a good choice here.  Setting a sensible DISTRO
@@ -518,7 +530,7 @@
 
 
 # Determine if current distribution is a Fedora-based distribution
-# (Fedora, RHEL, CentOS, etc).
+# (Fedora, RHEL, CentOS, Rocky, etc).
 # is_fedora
 function is_fedora {
     if [[ -z "$os_VENDOR" ]]; then
@@ -529,43 +541,14 @@
         [ "$os_VENDOR" = "openEuler" ] || \
         [ "$os_VENDOR" = "RedHatEnterpriseServer" ] || \
         [ "$os_VENDOR" = "RedHatEnterprise" ] || \
+        [ "$os_VENDOR" = "RedHatEnterpriseLinux" ] || \
+        [ "$os_VENDOR" = "Rocky" ] || \
         [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "CentOSStream" ] || \
         [ "$os_VENDOR" = "AlmaLinux" ] || \
         [ "$os_VENDOR" = "OracleServer" ] || [ "$os_VENDOR" = "Virtuozzo" ]
 }
 
 
-# Determine if current distribution is a SUSE-based distribution
-# (openSUSE, SLE).
-# is_suse
-function is_suse {
-    is_opensuse || is_suse_linux_enterprise
-}
-
-
-# Determine if current distribution is an openSUSE distribution
-# is_opensuse
-function is_opensuse {
-    if [[ -z "$os_VENDOR" ]]; then
-        GetOSVersion
-    fi
-
-    [[ "$os_VENDOR" =~ (openSUSE) ]]
-}
-
-
-# Determine if current distribution is a SUSE Linux Enterprise (SLE)
-# distribution
-# is_suse_linux_enterprise
-function is_suse_linux_enterprise {
-    if [[ -z "$os_VENDOR" ]]; then
-        GetOSVersion
-    fi
-
-    [[ "$os_VENDOR" =~ (^SUSE) ]]
-}
-
-
 # Determine if current distribution is an Ubuntu-based distribution
 # It will also detect non-Ubuntu but Debian-based distros
 # is_ubuntu
@@ -576,6 +559,8 @@
     [ "$os_PACKAGE" = "deb" ]
 }
 
+# Determine if current distribution is an openEuler distribution
+# is_openeuler
 function is_openeuler {
     if [[ -z "$os_PACKAGE" ]]; then
         GetOSVersion
@@ -646,8 +631,10 @@
                 echo "the project to the \$PROJECTS variable in the job definition."
                 die $LINENO "ERROR_ON_CLONE is set to True so cloning not allowed in this configuration"
             fi
-            # '--branch' can also take tags
-            git_timed clone $git_clone_flags $git_remote $git_dest --branch $git_ref
+            git_timed clone --no-checkout $git_clone_flags $git_remote $git_dest
+            cd $git_dest
+            git_timed fetch $git_clone_flags origin $git_ref
+            git_timed checkout FETCH_HEAD
         elif [[ "$RECLONE" = "True" ]]; then
             # if it does exist then simulate what clone does if asked to RECLONE
             cd $git_dest
@@ -657,7 +644,7 @@
             # remove the existing ignored files (like pyc) as they cause breakage
             # (due to the py files having older timestamps than our pyc, so python
             # thinks the pyc files are correct using them)
-            find $git_dest -name '*.pyc' -delete
+            sudo find $git_dest -name '*.pyc' -delete
 
             # handle git_ref accordingly to type (tag, branch)
             if [[ -n "`git show-ref refs/tags/$git_ref`" ]]; then
@@ -784,7 +771,7 @@
     if [ -z "$host_ip" -o "$host_ip" == "dhcp" ]; then
         host_ip=""
         # Find the interface used for the default route
-        host_ip_iface=${host_ip_iface:-$(ip -f $af route | awk '/default/ {print $5}' | head -1)}
+        host_ip_iface=${host_ip_iface:-$(ip -f $af route list match default table all | grep via | awk '/default/ {print $5}' | head -1)}
         local host_ips
         host_ips=$(LC_ALL=C ip -f $af addr show ${host_ip_iface} | sed /temporary/d |awk /$af'/ {split($2,parts,"/");  print parts[1]}')
         local ip
@@ -887,14 +874,9 @@
 # Usage: get_or_create_domain <name> <description>
 function get_or_create_domain {
     local domain_id
-    # Gets domain id
     domain_id=$(
-        # Gets domain id
-        openstack --os-cloud devstack-system-admin domain show $1 \
-            -f value -c id 2>/dev/null ||
-        # Creates new domain
         openstack --os-cloud devstack-system-admin domain create $1 \
-            --description "$2" \
+            --description "$2" --or-show \
             -f value -c id
     )
     echo $domain_id
@@ -983,29 +965,22 @@
 # Usage: get_or_add_user_project_role <role> <user> <project> [<user_domain> <project_domain>]
 function get_or_add_user_project_role {
     local user_role_id
+    local domain_args
 
     domain_args=$(_get_domain_args $4 $5)
 
-    # Gets user role id
+    # Note this is idempotent so we are safe across multiple
+    # duplicate calls.
+    openstack --os-cloud devstack-system-admin role add $1 \
+        --user $2 \
+        --project $3 \
+        $domain_args
     user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
         --role $1 \
         --user $2 \
         --project $3 \
         $domain_args \
-        | grep '^|\s[a-f0-9]\+' | get_field 1)
-    if [[ -z "$user_role_id" ]]; then
-        # Adds role to user and get it
-        openstack --os-cloud devstack-system-admin role add $1 \
-            --user $2 \
-            --project $3 \
-            $domain_args
-        user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
-            --role $1 \
-            --user $2 \
-            --project $3 \
-            $domain_args \
-            | grep '^|\s[a-f0-9]\+' | get_field 1)
-    fi
+        -c Role -f value)
     echo $user_role_id
 }
 
@@ -1013,23 +988,18 @@
 # Usage: get_or_add_user_domain_role <role> <user> <domain>
 function get_or_add_user_domain_role {
     local user_role_id
-    # Gets user role id
+
+    # Note this is idempotent so we are safe across multiple
+    # duplicate calls.
+    openstack --os-cloud devstack-system-admin role add $1 \
+        --user $2 \
+        --domain $3
     user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
         --role $1 \
         --user $2 \
         --domain $3 \
-        | grep '^|\s[a-f0-9]\+' | get_field 1)
-    if [[ -z "$user_role_id" ]]; then
-        # Adds role to user and get it
-        openstack --os-cloud devstack-system-admin role add $1 \
-            --user $2 \
-            --domain $3
-        user_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
-            --role $1 \
-            --user $2 \
-            --domain $3 \
-            | grep '^|\s[a-f0-9]\+' | get_field 1)
-    fi
+        -c Role -f value)
+
     echo $user_role_id
 }
 
@@ -1068,23 +1038,18 @@
 # Usage: get_or_add_group_project_role <role> <group> <project>
 function get_or_add_group_project_role {
     local group_role_id
-    # Gets group role id
+
+    # Note this is idempotent so we are safe across multiple
+    # duplicate calls.
+    openstack role add $1 \
+        --group $2 \
+        --project $3
     group_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
         --role $1 \
         --group $2 \
         --project $3 \
-        -f value)
-    if [[ -z "$group_role_id" ]]; then
-        # Adds role to group and get it
-        openstack --os-cloud devstack-system-admin role add $1 \
-            --group $2 \
-            --project $3
-        group_role_id=$(openstack --os-cloud devstack-system-admin role assignment list \
-            --role $1 \
-            --group $2 \
-            --project $3 \
-            -f value)
-    fi
+        -f value -c Role)
+
     echo $group_role_id
 }
 
@@ -1166,7 +1131,13 @@
 }
 
 function is_ironic_enforce_scope {
-    is_service_enabled ironic && [[ "$IRONIC_ENFORCE_SCOPE" == "True" ]] && return 0
+    is_service_enabled ironic && [[ "$IRONIC_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]] && return 0
+    return 1
+}
+
+function is_ironic_sharded {
+    # todo(JayF): Support >1 shard with multiple n-cpu instances for each
+    is_service_enabled ironic && [[ "$IRONIC_SHARDS" == "1" ]] && return 0
     return 1
 }
 
@@ -1186,8 +1157,6 @@
         pkg_dir=$base_dir/debs
     elif is_fedora; then
         pkg_dir=$base_dir/rpms
-    elif is_suse; then
-        pkg_dir=$base_dir/rpms-suse
     else
         exit_distro_not_supported "list of packages"
     fi
@@ -1462,8 +1431,6 @@
         apt_get install "$@"
     elif is_fedora; then
         yum_install "$@"
-    elif is_suse; then
-        zypper_install "$@"
     else
         exit_distro_not_supported "installing packages"
     fi
@@ -1505,8 +1472,6 @@
         apt_get purge "$@"
     elif is_fedora; then
         sudo dnf remove -y "$@" ||:
-    elif is_suse; then
-        sudo zypper remove -y "$@" ||:
     else
         exit_distro_not_supported "uninstalling packages"
     fi
@@ -1575,6 +1540,7 @@
     local command="$2"
     local group=$3
     local user=$4
+    local env_vars="$5"
     local extra=""
     if [[ -n "$group" ]]; then
         extra="Group=$group"
@@ -1583,11 +1549,15 @@
     mkdir -p $SYSTEMD_DIR
 
     iniset -sudo $unitfile "Unit" "Description" "Devstack $service"
+    iniset -sudo $unitfile "Service" "Environment" "\"PATH=$PATH\""
     iniset -sudo $unitfile "Service" "User" "$user"
     iniset -sudo $unitfile "Service" "ExecStart" "$command"
     iniset -sudo $unitfile "Service" "KillMode" "process"
     iniset -sudo $unitfile "Service" "TimeoutStopSec" "300"
     iniset -sudo $unitfile "Service" "ExecReload" "$KILL_PATH -HUP \$MAINPID"
+    if [[ -n "$env_vars" ]] ; then
+        iniset -sudo $unitfile "Service" "Environment" "$env_vars"
+    fi
     if [[ -n "$group" ]]; then
         iniset -sudo $unitfile "Service" "Group" "$group"
     fi
@@ -1602,10 +1572,12 @@
     local command="$2"
     local group=$3
     local user=$4
+    local env_vars="$5"
     local unitfile="$SYSTEMD_DIR/$service"
     mkdir -p $SYSTEMD_DIR
 
     iniset -sudo $unitfile "Unit" "Description" "Devstack $service"
+    iniset -sudo $unitfile "Service" "Environment" "\"PATH=$PATH\""
     iniset -sudo $unitfile "Service" "SyslogIdentifier" "$service"
     iniset -sudo $unitfile "Service" "User" "$user"
     iniset -sudo $unitfile "Service" "ExecStart" "$command"
@@ -1616,6 +1588,9 @@
     iniset -sudo $unitfile "Service" "NotifyAccess" "all"
     iniset -sudo $unitfile "Service" "RestartForceExitStatus" "100"
 
+    if [[ -n "$env_vars" ]] ; then
+        iniset -sudo $unitfile "Service" "Environment" "$env_vars"
+    fi
     if [[ -n "$group" ]]; then
         iniset -sudo $unitfile "Service" "Group" "$group"
     fi
@@ -1663,10 +1638,17 @@
     local systemd_service="devstack@$service.service"
     local group=$3
     local user=${4:-$STACK_USER}
+    if [[ -z "$user" ]]; then
+        user=$STACK_USER
+    fi
+    local env_vars="$5"
     if [[ "$command" =~ "uwsgi" ]] ; then
-        write_uwsgi_user_unit_file $systemd_service "$cmd" "$group" "$user"
+        if [[ "$GLOBAL_VENV" == "True" ]] ; then
+            cmd="$cmd --venv $DEVSTACK_VENV"
+        fi
+        write_uwsgi_user_unit_file $systemd_service "$cmd" "$group" "$user" "$env_vars"
     else
-        write_user_unit_file $systemd_service "$cmd" "$group" "$user"
+        write_user_unit_file $systemd_service "$cmd" "$group" "$user" "$env_vars"
     fi
 
     $SYSTEMCTL enable $systemd_service
@@ -1687,18 +1669,20 @@
 # If the command includes shell metachatacters (;<>*) it must be run using a shell
 # If an optional group is provided sg will be used to run the
 # command as that group.
-# run_process service "command-line" [group] [user]
+# run_process service "command-line" [group] [user] [env_vars]
+# env_vars must be a space separated list of variable assigments, ie: "A=1 B=2"
 function run_process {
     local service=$1
     local command="$2"
     local group=$3
     local user=$4
+    local env_vars="$5"
 
     local name=$service
 
     time_start "run_process"
     if is_service_enabled $service; then
-        _run_under_systemd "$name" "$command" "$group" "$user"
+        _run_under_systemd "$name" "$command" "$group" "$user" "$env_vars"
     fi
     time_stop "run_process"
 }
@@ -2454,6 +2438,11 @@
     _TIME_TOTAL[$name]=$(($total + $elapsed_time))
 }
 
+function install_openstack_cli_server {
+    export PATH=$TOP_DIR/files/openstack-cli-server:$PATH
+    run_process openstack-cli-server "$PYTHON $TOP_DIR/files/openstack-cli-server/openstack-cli-server"
+}
+
 function oscwrap {
     local xtrace
     xtrace=$(set +o | grep xtrace)
@@ -2549,6 +2538,11 @@
     fi
 }
 
+function is_fips_enabled {
+    fips=`cat /proc/sys/crypto/fips_enabled`
+    [ "$fips" == "1" ]
+}
+
 # Restore xtrace
 $_XTRACE_FUNCTIONS_COMMON
 
diff --git a/inc/ini-config b/inc/ini-config
index 7993682..f65e42d 100644
--- a/inc/ini-config
+++ b/inc/ini-config
@@ -189,6 +189,9 @@
     local option=$3
     local value=$4
 
+    # Escape the ampersand character (&)
+    value=$(echo $value | sed -e 's/&/\\&/g')
+
     if [[ -z $section || -z $option ]]; then
         $xtrace
         return
diff --git a/inc/python b/inc/python
index 9382d35..1fd4147 100644
--- a/inc/python
+++ b/inc/python
@@ -7,7 +7,6 @@
 # External functions used:
 # - GetOSVersion
 # - is_fedora
-# - is_suse
 # - safe_chown
 
 # Save trace setting
@@ -33,6 +32,26 @@
 # Python Functions
 # ================
 
+# Setup the global devstack virtualenvs and the associated environment
+# updates.
+function setup_devstack_virtualenv {
+    # We run devstack out of a global virtualenv.
+    if [[ ! -d $DEVSTACK_VENV ]] ; then
+        # Using system site packages to enable nova to use libguestfs.
+        # This package is currently installed via the distro and not
+        # available on pypi.
+        python$PYTHON3_VERSION -m venv --system-site-packages $DEVSTACK_VENV
+        pip_install -U pip setuptools
+        #NOTE(rpittau): workaround for simplejson removal in osc
+        #  https://review.opendev.org/c/openstack/python-openstackclient/+/920001
+        pip_install -U simplejson
+    fi
+    if [[ ":$PATH:" != *":$DEVSTACK_VENV/bin:"* ]] ; then
+        export PATH="$DEVSTACK_VENV/bin:$PATH"
+        export PYTHON="$DEVSTACK_VENV/bin/python3"
+    fi
+}
+
 # Get the path to the pip command.
 # get_pip_command
 function get_pip_command {
@@ -61,9 +80,11 @@
     fi
     $xtrace
 
-    local PYTHON_PATH=/usr/local/bin
-    is_suse && PYTHON_PATH=/usr/bin
-    echo $PYTHON_PATH
+    if [[ "$GLOBAL_VENV" == "True" ]] ; then
+        echo "$DEVSTACK_VENV/bin"
+    else
+        echo "/usr/local/bin"
+    fi
 }
 
 # Wrapper for ``pip install`` that only installs versions of libraries
@@ -168,6 +189,14 @@
     if [[ -n ${PIP_VIRTUAL_ENV:=} && -d ${PIP_VIRTUAL_ENV} ]]; then
         local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip
         local sudo_pip="env"
+    elif [[ "${GLOBAL_VENV}" == "True" && -d ${DEVSTACK_VENV} ]] ; then
+        # We have to check that the DEVSTACK_VENV exists because early
+        # devstack boostrapping needs to operate in a system context
+        # too bootstrap pip. Once pip is bootstrapped we create the
+        # global venv and can start to use it.
+        local cmd_pip=$DEVSTACK_VENV/bin/pip
+        local sudo_pip="env"
+        echo "Using python $PYTHON3_VERSION to install $package_dir"
     else
         local cmd_pip="python$PYTHON3_VERSION -m pip"
         # See
@@ -186,15 +215,11 @@
 
     $xtrace
 
-    # adding SETUPTOOLS_SYS_PATH_TECHNIQUE is a workaround to keep
-    # the same behaviour of setuptools before version 25.0.0.
-    # related issue: https://github.com/pypa/pip/issues/3874
     $sudo_pip \
         http_proxy="${http_proxy:-}" \
         https_proxy="${https_proxy:-}" \
         no_proxy="${no_proxy:-}" \
         PIP_FIND_LINKS=$PIP_FIND_LINKS \
-        SETUPTOOLS_SYS_PATH_TECHNIQUE=rewrite \
         $cmd_pip $upgrade \
         $@
     result=$?
@@ -383,6 +408,9 @@
         # source we are about to do.
         local name
         name=$(awk '/^name.*=/ {print $3}' $project_dir/setup.cfg)
+        if [ -z $name ]; then
+            name=$(awk '/^name =/ {gsub(/"/, "", $3); print $3}' $project_dir/pyproject.toml)
+        fi
         $REQUIREMENTS_DIR/.venv/bin/edit-constraints \
             $REQUIREMENTS_DIR/upper-constraints.txt -- $name
     fi
@@ -445,7 +473,7 @@
 
     pip_install $flags "$project_dir$extras"
     # ensure that further actions can do things like setup.py sdist
-    if [[ "$flags" == "-e" ]]; then
+    if [[ "$flags" == "-e" && "$GLOBAL_VENV" == "False" ]]; then
         safe_chown -R $STACK_USER $1/*.egg-info
     fi
 }
@@ -466,8 +494,6 @@
 function install_python3 {
     if is_ubuntu; then
         apt_get install python${PYTHON3_VERSION} python${PYTHON3_VERSION}-dev
-    elif is_suse; then
-        install_package python3-devel python3-dbm
     elif is_fedora; then
         if [ "$os_VENDOR" = "Fedora" ]; then
             install_package python${PYTHON3_VERSION//.}
diff --git a/inc/rootwrap b/inc/rootwrap
index 2a6e4b6..4c65440 100644
--- a/inc/rootwrap
+++ b/inc/rootwrap
@@ -60,6 +60,11 @@
     sudo install -o root -g root -m 644 $rootwrap_conf_src_dir/rootwrap.conf /etc/${project}/rootwrap.conf
     sudo sed -e "s:^filters_path=.*$:filters_path=/etc/${project}/rootwrap.d:" -i /etc/${project}/rootwrap.conf
 
+    # Rely on $PATH set by devstack to determine what is safe to execute
+    # by rootwrap rather than use explicit whitelist of paths in
+    # rootwrap.conf
+    sudo sed -e 's/^exec_dirs=.*/#&/' -i /etc/${project}/rootwrap.conf
+
     # Set up the rootwrap sudoers
     local tempfile
     tempfile=$(mktemp)
diff --git a/lib/apache b/lib/apache
index 02827d1..1420f76 100644
--- a/lib/apache
+++ b/lib/apache
@@ -44,10 +44,6 @@
     APACHE_NAME=httpd
     APACHE_CONF_DIR=${APACHE_CONF_DIR:-/etc/$APACHE_NAME/conf.d}
     APACHE_SETTINGS_DIR=${APACHE_SETTINGS_DIR:-/etc/$APACHE_NAME/conf.d}
-elif is_suse; then
-    APACHE_NAME=apache2
-    APACHE_CONF_DIR=${APACHE_CONF_DIR:-/etc/$APACHE_NAME/vhosts.d}
-    APACHE_SETTINGS_DIR=${APACHE_SETTINGS_DIR:-/etc/$APACHE_NAME/conf.d}
 fi
 APACHE_LOG_DIR="/var/log/${APACHE_NAME}"
 
@@ -65,11 +61,6 @@
             sudo a2enmod $mod
             restart_apache_server
         fi
-    elif is_suse; then
-        if ! a2enmod -q $mod ; then
-            sudo a2enmod $mod
-            restart_apache_server
-        fi
     elif is_fedora; then
         # pass
         true
@@ -95,7 +86,7 @@
     # didn't fix Python 3.10 compatibility before release.  Should be
     # fixed in uwsgi 4.9.0; can remove this when packages available
     # or we drop this release
-    elif is_fedora && ! is_openeuler && ! [[ $DISTRO =~ f35 ]]; then
+    elif is_fedora && ! is_openeuler && ! [[ $DISTRO =~ f36 ]]; then
         # Note httpd comes with mod_proxy_uwsgi and it is loaded by
         # default; the mod_proxy_uwsgi package actually conflicts now.
         # See:
@@ -104,10 +95,6 @@
         # Thus there is nothing else to do after this install
         install_package uwsgi \
                         uwsgi-plugin-python3
-    elif [[ $os_VENDOR =~ openSUSE ]]; then
-        install_package uwsgi \
-                        uwsgi-python3 \
-                        apache2-mod_uwsgi
     else
         # Compile uwsgi from source.
         local dir
@@ -125,7 +112,7 @@
         sudo rm -rf $dir
     fi
 
-    if is_ubuntu || is_suse ; then
+    if is_ubuntu; then
         # we've got to enable proxy and proxy_uwsgi for this to work
         sudo a2enmod proxy
         sudo a2enmod proxy_uwsgi
@@ -150,13 +137,13 @@
     elif is_fedora; then
         sudo rm -f /etc/httpd/conf.d/000-*
         install_package httpd python3-mod_wsgi
+        # rpm distros dont enable httpd by default so enable it to support reboots.
+        sudo systemctl enable httpd
         # For consistency with Ubuntu, switch to the worker mpm, as
         # the default is event
         sudo sed -i '/mod_mpm_prefork.so/s/^/#/g' /etc/httpd/conf.modules.d/00-mpm.conf
         sudo sed -i '/mod_mpm_event.so/s/^/#/g' /etc/httpd/conf.modules.d/00-mpm.conf
         sudo sed -i '/mod_mpm_worker.so/s/^#//g' /etc/httpd/conf.modules.d/00-mpm.conf
-    elif is_suse; then
-        install_package apache2 apache2-mod_wsgi
     else
         exit_distro_not_supported "apache wsgi installation"
     fi
@@ -171,7 +158,7 @@
 # recognise it. a2ensite and a2dissite ignore the .conf suffix used as parameter. The default sites'
 # files are 000-default.conf and default-ssl.conf.
 #
-# On Fedora and openSUSE, any file in /etc/httpd/conf.d/ whose name ends with .conf is enabled.
+# On Fedora, any file in /etc/httpd/conf.d/ whose name ends with .conf is enabled.
 #
 # On RHEL and CentOS, things should hopefully work as in Fedora.
 #
@@ -187,7 +174,7 @@
     if is_ubuntu; then
         # Ubuntu 14.04 - Apache 2.4
         echo $APACHE_CONF_DIR/${site}.conf
-    elif is_fedora || is_suse; then
+    elif is_fedora; then
         # fedora conf.d is only imported if it ends with .conf so this is approx the same
         local enabled_site_file="$APACHE_CONF_DIR/${site}.conf"
         if [ -f $enabled_site_file ]; then
@@ -205,7 +192,7 @@
     enable_apache_mod version
     if is_ubuntu; then
         sudo a2ensite ${site}
-    elif is_fedora || is_suse; then
+    elif is_fedora; then
         local enabled_site_file="$APACHE_CONF_DIR/${site}.conf"
         # Do nothing if site already enabled or no site config exists
         if [[ -f ${enabled_site_file}.disabled ]] && [[ ! -f ${enabled_site_file} ]]; then
@@ -219,7 +206,7 @@
     local site=$@
     if is_ubuntu; then
         sudo a2dissite ${site} || true
-    elif is_fedora || is_suse; then
+    elif is_fedora; then
         local enabled_site_file="$APACHE_CONF_DIR/${site}.conf"
         # Do nothing if no site config exists
         if [[ -f ${enabled_site_file} ]]; then
@@ -250,13 +237,17 @@
     restart_service $APACHE_NAME
 }
 
+# write_uwsgi_config() - Create a new uWSGI config file
 function write_uwsgi_config {
-    local file=$1
+    local conf=$1
     local wsgi=$2
     local url=$3
     local http=$4
-    local name=""
-    name=$(basename $wsgi)
+    local name=$5
+
+    if [ -z "$name" ]; then
+        name=$(basename $wsgi)
+    fi
 
     # create a home for the sockets; note don't use /tmp -- apache has
     # a private view of it on some platforms.
@@ -271,39 +262,47 @@
     local socket="$socket_dir/${name}.socket"
 
     # always cleanup given that we are using iniset here
-    rm -rf $file
-    iniset "$file" uwsgi wsgi-file "$wsgi"
-    iniset "$file" uwsgi processes $API_WORKERS
+    rm -rf $conf
+    # Set either the module path or wsgi script path depending on what we've
+    # been given. Note that the regex isn't exhaustive - neither Python modules
+    # nor Python variables can start with a number - but it's "good enough"
+    if [[ "$wsgi" =~ ^[a-zA-Z0-9_.]+:[a-zA-Z0-9_]+$ ]]; then
+        iniset "$conf" uwsgi module "$wsgi"
+    else
+        deprecated 'Configuring uWSGI with a WSGI file is deprecated, use module paths instead'
+        iniset "$conf" uwsgi wsgi-file "$wsgi"
+    fi
+    iniset "$conf" uwsgi processes $API_WORKERS
     # This is running standalone
-    iniset "$file" uwsgi master true
+    iniset "$conf" uwsgi master true
     # Set die-on-term & exit-on-reload so that uwsgi shuts down
-    iniset "$file" uwsgi die-on-term true
-    iniset "$file" uwsgi exit-on-reload false
+    iniset "$conf" uwsgi die-on-term true
+    iniset "$conf" uwsgi exit-on-reload false
     # Set worker-reload-mercy so that worker will not exit till the time
     # configured after graceful shutdown
-    iniset "$file" uwsgi worker-reload-mercy $WORKER_TIMEOUT
-    iniset "$file" uwsgi enable-threads true
-    iniset "$file" uwsgi plugins http,python3
+    iniset "$conf" uwsgi worker-reload-mercy $WORKER_TIMEOUT
+    iniset "$conf" uwsgi enable-threads true
+    iniset "$conf" uwsgi plugins http,python3
     # uwsgi recommends this to prevent thundering herd on accept.
-    iniset "$file" uwsgi thunder-lock true
+    iniset "$conf" uwsgi thunder-lock true
     # Set hook to trigger graceful shutdown on SIGTERM
-    iniset "$file" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all"
+    iniset "$conf" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all"
     # Override the default size for headers from the 4k default.
-    iniset "$file" uwsgi buffer-size 65535
+    iniset "$conf" uwsgi buffer-size 65535
     # Make sure the client doesn't try to re-use the connection.
-    iniset "$file" uwsgi add-header "Connection: close"
+    iniset "$conf" uwsgi add-header "Connection: close"
     # This ensures that file descriptors aren't shared between processes.
-    iniset "$file" uwsgi lazy-apps true
+    iniset "$conf" uwsgi lazy-apps true
 
     # If we said bind directly to http, then do that and don't start the apache proxy
     if [[ -n "$http" ]]; then
-        iniset "$file" uwsgi http $http
+        iniset "$conf" uwsgi http $http
     else
         local apache_conf=""
         apache_conf=$(apache_site_config_for $name)
-        iniset "$file" uwsgi socket "$socket"
-        iniset "$file" uwsgi chmod-socket 666
-        echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}\" retry=0 " | sudo tee -a $apache_conf
+        iniset "$conf" uwsgi socket "$socket"
+        iniset "$conf" uwsgi chmod-socket 666
+        echo "ProxyPass \"${url}\" \"unix:${socket}|uwsgi://uwsgi-uds-${name}\" retry=0 acquire=1 " | sudo tee -a $apache_conf
         enable_apache_site $name
         restart_apache_server
     fi
@@ -316,47 +315,58 @@
 # but that involves having apache buffer the request before sending it to
 # uwsgi.
 function write_local_uwsgi_http_config {
-    local file=$1
+    local conf=$1
     local wsgi=$2
     local url=$3
-    name=$(basename $wsgi)
+    local name=$4
+
+    if [ -z "$name" ]; then
+        name=$(basename $wsgi)
+    fi
 
     # create a home for the sockets; note don't use /tmp -- apache has
     # a private view of it on some platforms.
 
     # always cleanup given that we are using iniset here
-    rm -rf $file
-    iniset "$file" uwsgi wsgi-file "$wsgi"
+    rm -rf $conf
+    # Set either the module path or wsgi script path depending on what we've
+    # been given
+    if [[ "$wsgi" =~ ^[a-zA-Z0-9_.]+:[a-zA-Z0-9_]+$ ]]; then
+        iniset "$conf" uwsgi module "$wsgi"
+    else
+        deprecated 'Configuring uWSGI with a WSGI file is deprecated, use module paths instead'
+        iniset "$conf" uwsgi wsgi-file "$wsgi"
+    fi
     port=$(get_random_port)
-    iniset "$file" uwsgi http-socket "$APACHE_LOCAL_HOST:$port"
-    iniset "$file" uwsgi processes $API_WORKERS
+    iniset "$conf" uwsgi http-socket "$APACHE_LOCAL_HOST:$port"
+    iniset "$conf" uwsgi processes $API_WORKERS
     # This is running standalone
-    iniset "$file" uwsgi master true
+    iniset "$conf" uwsgi master true
     # Set die-on-term & exit-on-reload so that uwsgi shuts down
-    iniset "$file" uwsgi die-on-term true
-    iniset "$file" uwsgi exit-on-reload false
-    iniset "$file" uwsgi enable-threads true
-    iniset "$file" uwsgi plugins http,python3
-    # uwsgi recommends this to prevent thundering herd on accept.
-    iniset "$file" uwsgi thunder-lock true
-    # Set hook to trigger graceful shutdown on SIGTERM
-    iniset "$file" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all"
+    iniset "$conf" uwsgi die-on-term true
+    iniset "$conf" uwsgi exit-on-reload false
     # Set worker-reload-mercy so that worker will not exit till the time
     # configured after graceful shutdown
-    iniset "$file" uwsgi worker-reload-mercy $WORKER_TIMEOUT
+    iniset "$conf" uwsgi worker-reload-mercy $WORKER_TIMEOUT
+    iniset "$conf" uwsgi enable-threads true
+    iniset "$conf" uwsgi plugins http,python3
+    # uwsgi recommends this to prevent thundering herd on accept.
+    iniset "$conf" uwsgi thunder-lock true
+    # Set hook to trigger graceful shutdown on SIGTERM
+    iniset "$conf" uwsgi hook-master-start "unix_signal:15 gracefully_kill_them_all"
     # Override the default size for headers from the 4k default.
-    iniset "$file" uwsgi buffer-size 65535
+    iniset "$conf" uwsgi buffer-size 65535
     # Make sure the client doesn't try to re-use the connection.
-    iniset "$file" uwsgi add-header "Connection: close"
+    iniset "$conf" uwsgi add-header "Connection: close"
     # This ensures that file descriptors aren't shared between processes.
-    iniset "$file" uwsgi lazy-apps true
-    iniset "$file" uwsgi chmod-socket 666
-    iniset "$file" uwsgi http-raw-body true
-    iniset "$file" uwsgi http-chunked-input true
-    iniset "$file" uwsgi http-auto-chunked true
-    iniset "$file" uwsgi http-keepalive false
+    iniset "$conf" uwsgi lazy-apps true
+    iniset "$conf" uwsgi chmod-socket 666
+    iniset "$conf" uwsgi http-raw-body true
+    iniset "$conf" uwsgi http-chunked-input true
+    iniset "$conf" uwsgi http-auto-chunked true
+    iniset "$conf" uwsgi http-keepalive false
     # Increase socket timeout for slow chunked uploads
-    iniset "$file" uwsgi socket-timeout 30
+    iniset "$conf" uwsgi socket-timeout 30
 
     enable_apache_mod proxy
     enable_apache_mod proxy_http
@@ -364,7 +374,7 @@
     apache_conf=$(apache_site_config_for $name)
     echo "KeepAlive Off" | sudo tee $apache_conf
     echo "SetEnv proxy-sendchunked 1" | sudo tee -a $apache_conf
-    echo "ProxyPass \"${url}\" \"http://$APACHE_LOCAL_HOST:$port\" retry=0 " | sudo tee -a $apache_conf
+    echo "ProxyPass \"${url}\" \"http://$APACHE_LOCAL_HOST:$port\" retry=0 acquire=1 " | sudo tee -a $apache_conf
     enable_apache_site $name
     restart_apache_server
 }
@@ -383,18 +393,24 @@
 
     echo "KeepAlive Off" | sudo tee $apache_conf
     echo "SetEnv proxy-sendchunked 1" | sudo tee -a $apache_conf
-    echo "ProxyPass \"${loc}\" \"$url\" retry=0 " | sudo tee -a $apache_conf
+    echo "ProxyPass \"${loc}\" \"$url\" retry=0 acquire=1 " | sudo tee -a $apache_conf
     enable_apache_site $name
     restart_apache_server
 }
 
 function remove_uwsgi_config {
-    local file=$1
+    local conf=$1
     local wsgi=$2
     local name=""
+    # TODO(stephenfin): Remove this call when everyone is using module path
+    # configuration instead of file path configuration
     name=$(basename $wsgi)
 
-    rm -rf $file
+    if [[ "$wsgi" = /* ]]; then
+        deprecated "Passing a wsgi script to remove_uwsgi_config is deprecated, pass an application name instead"
+    fi
+
+    rm -rf $conf
     disable_apache_site $name
 }
 
diff --git a/lib/cinder b/lib/cinder
index b029fa0..0adca4f 100644
--- a/lib/cinder
+++ b/lib/cinder
@@ -43,6 +43,13 @@
 GITDIR["python-brick-cinderclient-ext"]=$DEST/python-brick-cinderclient-ext
 CINDER_DIR=$DEST/cinder
 
+if [[ $SERVICE_IP_VERSION == 6 ]]; then
+    CINDER_MY_IP="$HOST_IPV6"
+else
+    CINDER_MY_IP="$HOST_IP"
+fi
+
+
 # Cinder virtual environment
 if [[ ${USE_VENV} = True ]]; then
     PROJECT_VENV["cinder"]=${CINDER_DIR}.venv
@@ -69,6 +76,11 @@
 CINDER_SERVICE_PROTOCOL=${CINDER_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
 CINDER_SERVICE_LISTEN_ADDRESS=${CINDER_SERVICE_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)}
 
+# We do not need to report service status every 10s for devstack-like
+# deployments. In the gate this generates extra work for the services and the
+# database which are already taxed.
+CINDER_SERVICE_REPORT_INTERVAL=${CINDER_SERVICE_REPORT_INTERVAL:-120}
+
 # What type of LVM device should Cinder use for LVM backend
 # Defaults to auto, which will do thin provisioning if it's a fresh
 # volume group, otherwise it will do thick. The other valid choices are
@@ -76,6 +88,10 @@
 # thin provisioning.
 CINDER_LVM_TYPE=${CINDER_LVM_TYPE:-auto}
 
+# ``CINDER_USE_SERVICE_TOKEN`` is a mode where service token is passed along with
+# user token while communicating to external REST APIs like Glance.
+CINDER_USE_SERVICE_TOKEN=$(trueorfalse True CINDER_USE_SERVICE_TOKEN)
+
 # Default backends
 # The backend format is type:name where type is one of the supported backend
 # types (lvm, nfs, etc) and name is the identifier used in the Cinder
@@ -88,13 +104,33 @@
 CINDER_VOLUME_CLEAR=${CINDER_VOLUME_CLEAR:-${CINDER_VOLUME_CLEAR_DEFAULT:-zero}}
 CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]')
 
-# Default to lioadm
-CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-lioadm}
+VOLUME_TYPE_MULTIATTACH=${VOLUME_TYPE_MULTIATTACH:-multiattach}
 
-# EL and SUSE should only use lioadm
-if is_fedora || is_suse; then
-    if [[ ${CINDER_ISCSI_HELPER} != "lioadm" ]]; then
-        die "lioadm is the only valid Cinder target_helper config on this platform"
+if [[ -n "$CINDER_ISCSI_HELPER" ]]; then
+    if [[ -z "$CINDER_TARGET_HELPER" ]]; then
+        deprecated 'Using CINDER_ISCSI_HELPER is deprecated, use CINDER_TARGET_HELPER instead'
+        CINDER_TARGET_HELPER="$CINDER_ISCSI_HELPER"
+    else
+        deprecated 'Deprecated CINDER_ISCSI_HELPER is set, but is being overwritten by CINDER_TARGET_HELPER'
+    fi
+fi
+CINDER_TARGET_HELPER=${CINDER_TARGET_HELPER:-lioadm}
+
+if [[ $CINDER_TARGET_HELPER == 'nvmet' ]]; then
+    CINDER_TARGET_PROTOCOL=${CINDER_TARGET_PROTOCOL:-'nvmet_rdma'}
+    CINDER_TARGET_PREFIX=${CINDER_TARGET_PREFIX:-'nvme-subsystem-1'}
+    CINDER_TARGET_PORT=${CINDER_TARGET_PORT:-4420}
+else
+    CINDER_TARGET_PROTOCOL=${CINDER_TARGET_PROTOCOL:-'iscsi'}
+    CINDER_TARGET_PREFIX=${CINDER_TARGET_PREFIX:-'iqn.2010-10.org.openstack:'}
+    CINDER_TARGET_PORT=${CINDER_TARGET_PORT:-3260}
+fi
+
+
+# EL should only use lioadm
+if is_fedora; then
+    if [[ ${CINDER_TARGET_HELPER} != "lioadm" && ${CINDER_TARGET_HELPER} != 'nvmet' ]]; then
+        die "lioadm and nvmet are the only valid Cinder target_helper config on this platform"
     fi
 fi
 
@@ -151,6 +187,12 @@
 # Environment variables to configure the image-volume cache
 CINDER_IMG_CACHE_ENABLED=${CINDER_IMG_CACHE_ENABLED:-True}
 
+# Environment variables to configure the optimized volume upload
+CINDER_UPLOAD_OPTIMIZED=${CINDER_UPLOAD_OPTIMIZED:-False}
+
+# Environment variables to configure the internal tenant during optimized volume upload
+CINDER_UPLOAD_INTERNAL_TENANT=${CINDER_UPLOAD_INTERNAL_TENANT:-False}
+
 # For limits, if left unset, it will use cinder defaults of 0 for unlimited
 CINDER_IMG_CACHE_SIZE_GB=${CINDER_IMG_CACHE_SIZE_GB:-}
 CINDER_IMG_CACHE_SIZE_COUNT=${CINDER_IMG_CACHE_SIZE_COUNT:-}
@@ -160,6 +202,11 @@
 # enable the cache for all cinder backends.
 CINDER_CACHE_ENABLED_FOR_BACKENDS=${CINDER_CACHE_ENABLED_FOR_BACKENDS:-$CINDER_ENABLED_BACKENDS}
 
+# Configure which cinder backends will have optimized volume upload, this takes the same
+# form as the CINDER_ENABLED_BACKENDS config option. By default it will
+# enable the cache for all cinder backends.
+CINDER_UPLOAD_OPTIMIZED_BACKENDS=${CINDER_UPLOAD_OPTIMIZED_BACKENDS:-$CINDER_ENABLED_BACKENDS}
+
 # Flag to set the oslo_policy.enforce_scope. This is used to switch
 # the  Volume API policies to start checking the scope of token. by default,
 # this flag is False.
@@ -187,7 +234,7 @@
 function cleanup_cinder {
     # ensure the volume group is cleared up because fails might
     # leave dead volumes in the group
-    if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then
+    if [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then
         local targets
         targets=$(sudo tgtadm --op show --mode target)
         if [ $? -ne 0 ]; then
@@ -215,8 +262,14 @@
         else
             stop_service tgtd
         fi
-    else
+    elif [ "$CINDER_TARGET_HELPER" = "lioadm" ]; then
         sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete
+    elif [ "$CINDER_TARGET_HELPER" = "nvmet" ]; then
+        # If we don't disconnect everything vgremove will block
+        sudo nvme disconnect-all
+        sudo nvmetcli clear
+    else
+        die $LINENO "Unknown value \"$CINDER_TARGET_HELPER\" for CINDER_TARGET_HELPER"
     fi
 
     if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
@@ -237,7 +290,7 @@
     fi
 
     stop_process "c-api"
-    remove_uwsgi_config "$CINDER_UWSGI_CONF" "$CINDER_UWSGI"
+    remove_uwsgi_config "$CINDER_UWSGI_CONF" "cinder-wsgi"
 }
 
 # configure_cinder() - Set config files, create data dirs, etc
@@ -267,7 +320,7 @@
 
     iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
 
-    iniset $CINDER_CONF DEFAULT target_helper "$CINDER_ISCSI_HELPER"
+    iniset $CINDER_CONF DEFAULT target_helper "$CINDER_TARGET_HELPER"
     iniset $CINDER_CONF database connection `database_connection_url cinder`
     iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI
     iniset $CINDER_CONF DEFAULT rootwrap_config "$CINDER_CONF_DIR/rootwrap.conf"
@@ -275,11 +328,7 @@
     iniset $CINDER_CONF DEFAULT osapi_volume_listen $CINDER_SERVICE_LISTEN_ADDRESS
     iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH
     iniset $CINDER_CONF oslo_concurrency lock_path $CINDER_STATE_PATH
-    if [[ $SERVICE_IP_VERSION == 6 ]]; then
-        iniset $CINDER_CONF DEFAULT my_ip "$HOST_IPV6"
-    else
-        iniset $CINDER_CONF DEFAULT my_ip "$HOST_IP"
-    fi
+    iniset $CINDER_CONF DEFAULT my_ip "$CINDER_MY_IP"
     iniset $CINDER_CONF key_manager backend cinder.keymgr.conf_key_mgr.ConfKeyManager
     iniset $CINDER_CONF key_manager fixed_key $(openssl rand -hex 16)
     if [[ -n "$CINDER_ALLOWED_DIRECT_URL_SCHEMES" ]]; then
@@ -296,6 +345,9 @@
     # details and example failures.
     iniset $CINDER_CONF DEFAULT rpc_response_timeout 120
 
+    iniset $CINDER_CONF DEFAULT report_interval $CINDER_SERVICE_REPORT_INTERVAL
+    iniset $CINDER_CONF DEFAULT service_down_time $(($CINDER_SERVICE_REPORT_INTERVAL * 6))
+
     if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then
         local enabled_backends=""
         local default_name=""
@@ -316,6 +368,14 @@
             iniset $CINDER_CONF DEFAULT default_volume_type ${default_name}
         fi
         configure_cinder_image_volume_cache
+
+        # The upload optimization uses Cinder's clone volume functionality to
+        # clone the Image-Volume from source volume hence can only be
+        # performed when glance is using cinder as it's backend.
+        if [[ "$USE_CINDER_FOR_GLANCE" == "True" ]]; then
+            # Configure optimized volume upload
+            configure_cinder_volume_upload
+        fi
     fi
 
     if is_service_enabled c-bak && [[ -n "$CINDER_BACKUP_DRIVER" ]]; then
@@ -377,27 +437,44 @@
     if [[ ! -z "$CINDER_COORDINATION_URL" ]]; then
         iniset $CINDER_CONF coordination backend_url "$CINDER_COORDINATION_URL"
     elif is_service_enabled etcd3; then
-        iniset $CINDER_CONF coordination backend_url "etcd3+http://${SERVICE_HOST}:$ETCD_PORT"
+        # NOTE(jan.gutter): api_version can revert to default once tooz is
+        # updated with the etcd v3.4 defaults
+        iniset $CINDER_CONF coordination backend_url "etcd3+http://${SERVICE_HOST}:$ETCD_PORT?api_version=v3"
     fi
 
-    if [[ "$CINDER_ENFORCE_SCOPE" == True ]] ; then
+    if [[ "$CINDER_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
         iniset $CINDER_CONF oslo_policy enforce_scope true
         iniset $CINDER_CONF oslo_policy enforce_new_defaults true
+    else
+        iniset $CINDER_CONF oslo_policy enforce_scope false
+        iniset $CINDER_CONF oslo_policy enforce_new_defaults false
+    fi
+
+    if [ "$CINDER_USE_SERVICE_TOKEN" == "True" ]; then
+        init_cinder_service_user_conf
     fi
 }
 
 # create_cinder_accounts() - Set up common required cinder accounts
 
-# Tenant               User       Roles
+# Project              User       Roles
 # ------------------------------------------------------------------
-# service              cinder     admin        # if enabled
+# SERVICE_PROJECT_NAME cinder     service
+# SERVICE_PROJECT_NAME cinder     creator (if Barbican is enabled)
 
 # Migrated from keystone_data.sh
 function create_cinder_accounts {
     # Cinder
     if [[ "$ENABLED_SERVICES" =~ "c-api" ]]; then
 
-        create_service_user "cinder"
+        local extra_role=""
+
+        # cinder needs the "creator" role in order to interact with barbican
+        if is_service_enabled barbican; then
+            extra_role=$(get_or_create_role "creator")
+        fi
+
+        create_service_user "cinder" $extra_role
 
         # block-storage is the official service type
         get_or_create_service "cinder" "block-storage" "Cinder Volume Service"
@@ -465,9 +542,9 @@
 function install_cinder {
     git_clone $CINDER_REPO $CINDER_DIR $CINDER_BRANCH
     setup_develop $CINDER_DIR
-    if [[ "$CINDER_ISCSI_HELPER" == "tgtadm" ]]; then
+    if [[ "$CINDER_TARGET_HELPER" == "tgtadm" ]]; then
         install_package tgt
-    elif [[ "$CINDER_ISCSI_HELPER" == "lioadm" ]]; then
+    elif [[ "$CINDER_TARGET_HELPER" == "lioadm" ]]; then
         if is_ubuntu; then
             # TODO(frickler): Workaround for https://launchpad.net/bugs/1819819
             sudo mkdir -p /etc/target
@@ -476,6 +553,43 @@
         else
             install_package targetcli
         fi
+    elif [[ "$CINDER_TARGET_HELPER" == "nvmet" ]]; then
+        install_package nvme-cli
+
+        # TODO: Remove manual installation of the dependency when the
+        # requirement is added to nvmetcli:
+        # http://lists.infradead.org/pipermail/linux-nvme/2022-July/033576.html
+        if is_ubuntu; then
+            install_package python3-configshell-fb
+        else
+            install_package python3-configshell
+        fi
+        # Install from source because Ubuntu doesn't have the package and some packaged versions didn't work on Python 3
+        pip_install git+git://git.infradead.org/users/hch/nvmetcli.git
+
+        sudo modprobe nvmet
+        sudo modprobe nvme-fabrics
+
+        if [[ $CINDER_TARGET_PROTOCOL == 'nvmet_rdma' ]]; then
+            install_package rdma-core
+            sudo modprobe nvme-rdma
+
+            # Create the Soft-RoCE device over the networking interface
+            local iface=${HOST_IP_IFACE:-`ip -br -$SERVICE_IP_VERSION a | grep $CINDER_MY_IP | awk '{print $1}'`}
+            if [[ -z "$iface" ]]; then
+                die $LINENO "Cannot find interface to bind Soft-RoCE"
+            fi
+
+            if ! sudo rdma link | grep $iface ; then
+                sudo rdma link add rxe_$iface type rxe netdev $iface
+            fi
+
+        elif [[ $CINDER_TARGET_PROTOCOL == 'nvmet_tcp' ]]; then
+            sudo modprobe nvme-tcp
+
+        else  # 'nvmet_fc'
+            sudo modprobe nvme-fc
+        fi
     fi
 }
 
@@ -512,18 +626,13 @@
         service_port=$CINDER_SERVICE_PORT_INT
         service_protocol="http"
     fi
-    if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then
+    if [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then
         if is_service_enabled c-vol; then
             # Delete any old stack.conf
             sudo rm -f /etc/tgt/conf.d/stack.conf
             _configure_tgt_for_config_d
             if is_ubuntu; then
                 sudo service tgt restart
-            elif is_suse; then
-                # NOTE(dmllr): workaround restart bug
-                # https://bugzilla.suse.com/show_bug.cgi?id=934642
-                stop_service tgtd
-                start_service tgtd
             else
                 restart_service tgtd
             fi
@@ -552,8 +661,13 @@
     fi
 
     run_process c-sch "$CINDER_BIN_DIR/cinder-scheduler --config-file $CINDER_CONF"
-    run_process c-bak "$CINDER_BIN_DIR/cinder-backup --config-file $CINDER_CONF"
-    run_process c-vol "$CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF"
+    # Tune glibc for Python Services using single malloc arena for all threads
+    # and disabling dynamic thresholds to reduce memory usage when using native
+    # threads directly or via eventlet.tpool
+    # https://www.gnu.org/software/libc/manual/html_node/Memory-Allocation-Tunables.html
+    malloc_tuning="MALLOC_ARENA_MAX=1 MALLOC_MMAP_THRESHOLD_=131072 MALLOC_TRIM_THRESHOLD_=262144"
+    run_process c-bak "$CINDER_BIN_DIR/cinder-backup --config-file $CINDER_CONF" "" "" "$malloc_tuning"
+    run_process c-vol "$CINDER_BIN_DIR/cinder-volume --config-file $CINDER_CONF" "" "" "$malloc_tuning"
 
     # NOTE(jdg): For cinder, startup order matters.  To ensure that repor_capabilities is received
     # by the scheduler start the cinder-volume service last (or restart it) after the scheduler
@@ -568,6 +682,23 @@
     stop_process c-vol
 }
 
+function create_one_type {
+    type_name=$1
+    property_key=$2
+    property_value=$3
+    # NOTE (e0ne): openstack client doesn't work with cinder in noauth mode
+    if is_service_enabled keystone; then
+        openstack --os-region-name="$REGION_NAME" volume type create --property $property_key="$property_value" $type_name
+    else
+        # TODO (e0ne): use openstack client once it will support cinder in noauth mode:
+        # https://bugs.launchpad.net/python-cinderclient/+bug/1755279
+        local cinder_url
+        cinder_url=$CINDER_SERVICE_PROTOCOL://$SERVICE_HOST:$CINDER_SERVICE_PORT/v3
+        OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-create $type_name
+        OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-key $type_name set $property_key="$property_value"
+    fi
+}
+
 # create_volume_types() - Create Cinder's configured volume types
 function create_volume_types {
     # Create volume types
@@ -575,19 +706,13 @@
         local be be_name
         for be in ${CINDER_ENABLED_BACKENDS//,/ }; do
             be_name=${be##*:}
-            # NOTE (e0ne): openstack client doesn't work with cinder in noauth mode
-            if is_service_enabled keystone; then
-                openstack --os-region-name="$REGION_NAME" volume type create --property volume_backend_name="${be_name}" ${be_name}
-            else
-                # TODO (e0ne): use openstack client once it will support cinder in noauth mode:
-                # https://bugs.launchpad.net/python-cinderclient/+bug/1755279
-                local cinder_url
-                cinder_url=$CINDER_SERVICE_PROTOCOL://$SERVICE_HOST:$CINDER_SERVICE_PORT/v3
-                OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-create ${be_name}
-                OS_USER_ID=$OS_USERNAME OS_PROJECT_ID=$OS_PROJECT_NAME cinder --os-auth-type noauth --os-endpoint=$cinder_url type-key ${be_name} set volume_backend_name=${be_name}
-            fi
+            create_one_type $be_name "volume_backend_name" $be_name
         done
 
+        if [[ $ENABLE_VOLUME_MULTIATTACH == "True" ]]; then
+            create_one_type $VOLUME_TYPE_MULTIATTACH $VOLUME_TYPE_MULTIATTACH "<is> True"
+        fi
+
         # Increase quota for the service project if glance is using cinder,
         # since it's likely to occasionally go above the default 10 in parallel
         # test execution.
@@ -631,6 +756,24 @@
     done
 }
 
+function configure_cinder_volume_upload {
+    # Expect UPLOAD_VOLUME_OPTIMIZED_FOR_BACKENDS to be a list of backends
+    # similar to CINDER_ENABLED_BACKENDS with NAME:TYPE where NAME will
+    # be the backend specific configuration stanza in cinder.conf.
+    local be be_name
+    for be in ${CINDER_UPLOAD_OPTIMIZED_BACKENDS//,/ }; do
+        be_name=${be##*:}
+
+        iniset $CINDER_CONF $be_name image_upload_use_cinder_backend $CINDER_UPLOAD_OPTIMIZED
+        iniset $CINDER_CONF $be_name image_upload_use_internal_tenant $CINDER_UPLOAD_INTERNAL_TENANT
+    done
+}
+
+function init_cinder_service_user_conf {
+    configure_keystone_authtoken_middleware $CINDER_CONF cinder service_user
+    iniset $CINDER_CONF service_user send_service_user_token True
+    iniset $CINDER_CONF service_user auth_strategy keystone
+}
 
 # Restore xtrace
 $_XTRACE_CINDER
diff --git a/lib/cinder_backends/fake_gate b/lib/cinder_backends/fake_gate
index 3ffd9a6..3b9f1d1 100644
--- a/lib/cinder_backends/fake_gate
+++ b/lib/cinder_backends/fake_gate
@@ -50,7 +50,7 @@
     iniset $CINDER_CONF $be_name volume_backend_name $be_name
     iniset $CINDER_CONF $be_name volume_driver "cinder.tests.fake_driver.FakeGateDriver"
     iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name
-    iniset $CINDER_CONF $be_name target_helper "$CINDER_ISCSI_HELPER"
+    iniset $CINDER_CONF $be_name target_helper "$CINDER_TARGET_HELPER"
     iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE"
 
     if [[ "$CINDER_VOLUME_CLEAR" == "non" ]]; then
diff --git a/lib/cinder_backends/lvm b/lib/cinder_backends/lvm
index e03ef14..4286511 100644
--- a/lib/cinder_backends/lvm
+++ b/lib/cinder_backends/lvm
@@ -50,7 +50,10 @@
     iniset $CINDER_CONF $be_name volume_backend_name $be_name
     iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.lvm.LVMVolumeDriver"
     iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name
-    iniset $CINDER_CONF $be_name target_helper "$CINDER_ISCSI_HELPER"
+    iniset $CINDER_CONF $be_name target_helper "$CINDER_TARGET_HELPER"
+    iniset $CINDER_CONF $be_name target_protocol "$CINDER_TARGET_PROTOCOL"
+    iniset $CINDER_CONF $be_name target_port "$CINDER_TARGET_PORT"
+    iniset $CINDER_CONF $be_name target_prefix "$CINDER_TARGET_PREFIX"
     iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE"
     iniset $CINDER_CONF $be_name volume_clear "$CINDER_VOLUME_CLEAR"
 }
diff --git a/lib/cinder_backends/nfs b/lib/cinder_backends/nfs
index 89a37a1..f3fcbef 100644
--- a/lib/cinder_backends/nfs
+++ b/lib/cinder_backends/nfs
@@ -32,6 +32,15 @@
     iniset $CINDER_CONF $be_name volume_backend_name $be_name
     iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.nfs.NfsDriver"
     iniset $CINDER_CONF $be_name nfs_shares_config "$CINDER_CONF_DIR/nfs-shares-$be_name.conf"
+    iniset $CINDER_CONF $be_name nas_host localhost
+    iniset $CINDER_CONF $be_name nas_share_path ${NFS_EXPORT_DIR}
+    iniset $CINDER_CONF $be_name nas_secure_file_operations \
+        ${NFS_SECURE_FILE_OPERATIONS}
+    iniset $CINDER_CONF $be_name nas_secure_file_permissions \
+        ${NFS_SECURE_FILE_PERMISSIONS}
+
+    # NFS snapshot support is currently opt-in only.
+    iniset $CINDER_CONF $be_name nfs_snapshot_support True
 
     echo "$CINDER_NFS_SERVERPATH" | tee "$CINDER_CONF_DIR/nfs-shares-$be_name.conf"
 }
diff --git a/lib/cinder_backups/ceph b/lib/cinder_backups/ceph
index e4003c0..4b18049 100644
--- a/lib/cinder_backups/ceph
+++ b/lib/cinder_backups/ceph
@@ -26,12 +26,15 @@
 
 
 function configure_cinder_backup_ceph {
-    sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP}
-    if [[ "$REMOTE_CEPH" = "False" && "$CEPH_REPLICAS" -ne 1 ]]; then
-        sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
+    # Execute this part only when cephadm is not used
+    if [[ "$CEPHADM_DEPLOY" = "False" ]]; then
+        sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_BAK_CEPH_POOL} ${CINDER_BAK_CEPH_POOL_PG} ${CINDER_BAK_CEPH_POOL_PGP}
+        if [[ "$REMOTE_CEPH" = "False" && "$CEPH_REPLICAS" -ne 1 ]]; then
+            sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${CINDER_BAK_CEPH_POOL} crush_ruleset ${RULE_ID}
+        fi
+        sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
+        sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
     fi
-    sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_BAK_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_BAK_CEPH_POOL}, allow rwx pool=${CINDER_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
-    sudo chown $(whoami):$(whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_BAK_CEPH_USER}.keyring
 
     iniset $CINDER_CONF DEFAULT backup_driver "cinder.backup.drivers.ceph.CephBackupDriver"
     iniset $CINDER_CONF DEFAULT backup_ceph_conf "$CEPH_CONF_FILE"
diff --git a/lib/databases/mysql b/lib/databases/mysql
index 0f45273..e069e12 100644
--- a/lib/databases/mysql
+++ b/lib/databases/mysql
@@ -20,13 +20,7 @@
     MYSQL_SERVICE_NAME=mysql
     if is_fedora && ! is_oraclelinux; then
         MYSQL_SERVICE_NAME=mariadb
-    elif is_suse && systemctl list-unit-files | grep -q 'mariadb\.service'; then
-        # Older mariadb packages on SLES 12 provided mysql.service.  The
-        # newer ones on SLES 12 and 15 use mariadb.service; they also
-        # provide a mysql.service symlink for backwards-compatibility, but
-        # let's not rely on that.
-        MYSQL_SERVICE_NAME=mariadb
-    elif [[ "$DISTRO" == "bullseye" ]]; then
+    elif [[ "$DISTRO" =~ bookworm|bullseye ]]; then
         MYSQL_SERVICE_NAME=mariadb
     fi
 fi
@@ -54,7 +48,7 @@
     elif is_oraclelinux; then
         uninstall_package mysql-community-server
         sudo rm -rf /var/lib/mysql
-    elif is_suse || is_fedora; then
+    elif is_fedora; then
         uninstall_package mariadb-server
         sudo rm -rf /var/lib/mysql
     else
@@ -69,12 +63,12 @@
 }
 
 function configure_database_mysql {
-    local my_conf mysql slow_log
+    local my_conf mysql slow_log my_client_conf
     echo_summary "Configuring and starting MySQL"
 
     if is_ubuntu; then
         my_conf=/etc/mysql/my.cnf
-    elif is_suse || is_oraclelinux; then
+    elif is_oraclelinux; then
         my_conf=/etc/my.cnf
     elif is_fedora; then
         my_conf=/etc/my.cnf
@@ -86,11 +80,20 @@
         exit_distro_not_supported "mysql configuration"
     fi
 
+    # Set fips mode on
+    if is_ubuntu; then
+        if is_fips_enabled; then
+            my_client_conf=/etc/mysql/mysql.conf.d/mysql.cnf
+            iniset -sudo $my_client_conf mysql ssl-fips-mode "on"
+            iniset -sudo $my_conf mysqld ssl-fips-mode "on"
+        fi
+    fi
+
     # Change bind-address from localhost (127.0.0.1) to any (::)
     iniset -sudo $my_conf mysqld bind-address "$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)"
 
     # (Re)Start mysql-server
-    if is_fedora || is_suse; then
+    if is_fedora; then
         # service is not started by default
         start_service $MYSQL_SERVICE_NAME
     elif is_ubuntu; then
@@ -100,8 +103,13 @@
 
     # Set the root password - only works the first time. For Ubuntu, we already
     # did that with debconf before installing the package, but we still try,
-    # because the package might have been installed already.
-    sudo mysqladmin -u root password $DATABASE_PASSWORD || true
+    # because the package might have been installed already. We don't do this
+    # for Ubuntu 22.04 (jammy) because the authorization model change in
+    # version 10.4 of mariadb. See
+    # https://mariadb.org/authentication-in-mariadb-10-4/
+    if ! (is_ubuntu && [[ "$DISTRO" == "jammy" ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then
+        sudo mysqladmin -u root password $DATABASE_PASSWORD || true
+    fi
 
     # In case of Mariadb, giving hostname in arguments causes permission
     # problems as it expects connection through socket
@@ -114,14 +122,23 @@
     # In mariadb e.g. on Ubuntu socket plugin is used for authentication
     # as root so it works only as sudo. To restore old "mysql like" behaviour,
     # we need to change auth plugin for root user
-    if is_ubuntu && [[ "$DISTRO" != "bullseye" ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then
-        sudo mysql $cmd_args -e "UPDATE mysql.user SET plugin='' WHERE user='$DATABASE_USER' AND host='localhost';"
-        sudo mysql $cmd_args -e "FLUSH PRIVILEGES;"
+    # TODO(frickler): simplify this logic
+    if is_ubuntu && [[ ! "$DISTRO" =~ bookworm|bullseye ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]; then
+        if [[ "$DISTRO" == "jammy" ]]; then
+            # For Ubuntu 22.04 (jammy) we follow the model outlined in
+            # https://mariadb.org/authentication-in-mariadb-10-4/
+            sudo mysql -e "ALTER USER $DATABASE_USER@localhost IDENTIFIED VIA mysql_native_password USING PASSWORD('$DATABASE_PASSWORD');"
+        else
+            sudo mysql $cmd_args -e "UPDATE mysql.user SET plugin='' WHERE user='$DATABASE_USER' AND host='localhost';"
+            sudo mysql $cmd_args -e "FLUSH PRIVILEGES;"
+        fi
     fi
-    # Create DB user if it does not already exist
-    sudo mysql $cmd_args -e "CREATE USER IF NOT EXISTS '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';"
-    # Update the DB to give user '$DATABASE_USER'@'%' full control of the all databases:
-    sudo mysql $cmd_args -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%';"
+    if ! (is_ubuntu && [[ "$DISTRO" == "jammy" ]] && [ "$MYSQL_SERVICE_NAME" == "mariadb" ]); then
+        # Create DB user if it does not already exist
+        sudo mysql $cmd_args -e "CREATE USER IF NOT EXISTS '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';"
+        # Update the DB to give user '$DATABASE_USER'@'%' full control of the all databases:
+        sudo mysql $cmd_args -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%';"
+    fi
 
     # Now update ``my.cnf`` for some local needs and restart the mysql service
 
@@ -150,6 +167,29 @@
         iniset -sudo $my_conf mysqld log-queries-not-using-indexes 1
     fi
 
+    if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" ]]; then
+        echo "enabling MySQL performance counting"
+
+        # Install our sqlalchemy plugin
+        pip_install ${TOP_DIR}/tools/dbcounter
+
+        # Create our stats database for accounting
+        recreate_database stats
+        mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST -e \
+              "CREATE TABLE queries (db VARCHAR(32), op VARCHAR(32),
+                count INT, PRIMARY KEY (db, op)) ENGINE MEMORY" stats
+    fi
+
+    if [[ "$MYSQL_REDUCE_MEMORY" == "True" ]]; then
+        iniset -sudo $my_conf mysqld read_buffer_size 64K
+        iniset -sudo $my_conf mysqld innodb_buffer_pool_size 16M
+        iniset -sudo $my_conf mysqld thread_stack 192K
+        iniset -sudo $my_conf mysqld thread_cache_size 8
+        iniset -sudo $my_conf mysqld tmp_table_size 8M
+        iniset -sudo $my_conf mysqld sort_buffer_size 8M
+        iniset -sudo $my_conf mysqld max_allowed_packet 8M
+    fi
+
     restart_service $MYSQL_SERVICE_NAME
 }
 
@@ -186,9 +226,6 @@
         elif is_fedora; then
             install_package mariadb-server mariadb-devel mariadb
             sudo systemctl enable $MYSQL_SERVICE_NAME
-        elif is_suse; then
-            install_package mariadb-server
-            sudo systemctl enable $MYSQL_SERVICE_NAME
         elif is_ubuntu; then
             install_package $MYSQL_SERVICE_NAME-server
         else
@@ -209,7 +246,17 @@
 
 function database_connection_url_mysql {
     local db=$1
-    echo "$BASE_SQL_CONN/$db?charset=utf8"
+    local plugin
+
+    # NOTE(danms): We don't enable perf on subnodes yet because the
+    # plugin is not installed there
+    if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" ]]; then
+        if is_service_enabled mysql; then
+            plugin="&plugin=dbcounter"
+        fi
+    fi
+
+    echo "$BASE_SQL_CONN/$db?charset=utf8$plugin"
 }
 
 
diff --git a/lib/databases/postgresql b/lib/databases/postgresql
index 4f0a5a0..b21418b 100644
--- a/lib/databases/postgresql
+++ b/lib/databases/postgresql
@@ -32,7 +32,7 @@
         # Get ruthless with mysql
         apt_get purge -y postgresql*
         return
-    elif is_fedora || is_suse; then
+    elif is_fedora; then
         uninstall_package postgresql-server
     else
         return
@@ -66,11 +66,6 @@
         pg_dir=`find /etc/postgresql -name pg_hba.conf|xargs dirname`
         pg_hba=$pg_dir/pg_hba.conf
         pg_conf=$pg_dir/postgresql.conf
-    elif is_suse; then
-        pg_hba=/var/lib/pgsql/data/pg_hba.conf
-        pg_conf=/var/lib/pgsql/data/postgresql.conf
-        # initdb is called when postgresql is first started
-        sudo [ -e $pg_hba ] || start_service postgresql
     else
         exit_distro_not_supported "postgresql configuration"
     fi
@@ -107,7 +102,7 @@
     if [[ "$INSTALL_DATABASE_SERVER_PACKAGES" == "True" ]]; then
         if is_ubuntu; then
             install_package postgresql
-        elif is_fedora || is_suse; then
+        elif is_fedora; then
             install_package postgresql-server
             if is_fedora; then
                 sudo systemctl enable postgresql
diff --git a/lib/dstat b/lib/dstat
index eb03ae0..870c901 100644
--- a/lib/dstat
+++ b/lib/dstat
@@ -40,12 +40,18 @@
     if is_service_enabled peakmem_tracker; then
         die $LINENO "The peakmem_tracker service has been removed, use memory_tracker instead"
     fi
+
+    # To enable file_tracker add:
+    #    enable_service file_tracker
+    # to your localrc
+    run_process file_tracker "$TOP_DIR/tools/file_tracker.sh"
 }
 
 # stop_dstat() stop dstat process
 function stop_dstat {
     stop_process dstat
     stop_process memory_tracker
+    stop_process file_tracker
 }
 
 # Restore xtrace
diff --git a/lib/etcd3 b/lib/etcd3
index 4f3a7a4..0d22de8 100644
--- a/lib/etcd3
+++ b/lib/etcd3
@@ -51,7 +51,7 @@
     fi
     cmd+=" --listen-client-urls http://$SERVICE_HOST:$ETCD_PORT"
     if [ "$ENABLE_DEBUG_LOG_LEVEL" == "True" ]; then
-        cmd+=" --debug"
+        cmd+=" --log-level=debug"
     fi
 
     local unitfile="$SYSTEMD_DIR/$ETCD_SYSTEMD_SERVICE"
diff --git a/lib/glance b/lib/glance
index b94c06d..2746871 100644
--- a/lib/glance
+++ b/lib/glance
@@ -47,9 +47,8 @@
 # from CINDER_ENABLED_BACKENDS
 GLANCE_CINDER_DEFAULT_BACKEND=${GLANCE_CINDER_DEFAULT_BACKEND:-lvmdriver-1}
 GLANCE_STORE_ROOTWRAP_BASE_DIR=/usr/local/etc/glance
-# NOTE (abhishekk): For opensuse data files are stored in different directory
-if is_opensuse; then
-    GLANCE_STORE_ROOTWRAP_BASE_DIR=/usr/etc/glance
+if [[ "$GLOBAL_VENV" == "True" ]] ; then
+    GLANCE_STORE_ROOTWRAP_BASE_DIR=${DEVSTACK_VENV}/etc/glance
 fi
 # When Cinder is used as a glance store, you can optionally configure cinder to
 # optimize bootable volume creation by allowing volumes to be cloned directly
@@ -76,6 +75,7 @@
 GLANCE_DEFAULT_BACKEND=${GLANCE_DEFAULT_BACKEND:-fast}
 
 GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache}
+GLANCE_CACHE_DRIVER=${GLANCE_CACHE_DRIVER:-centralized_db}
 
 # Full Glance functionality requires running in standalone mode. If we are
 # not in uwsgi mode, then we are standalone, otherwise allow separate control.
@@ -99,10 +99,10 @@
 GLANCE_ENABLE_QUOTAS=$(trueorfalse True GLANCE_ENABLE_QUOTAS)
 
 # Flag to set the oslo_policy.enforce_scope. This is used to switch
-# the Image API policies to start checking the scope of token. By Default,
-# this flag is False.
+# This is used to disable the Image API policies scope and new defaults.
+# By Default, it is True.
 # For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope
-GLANCE_ENFORCE_SCOPE=$(trueorfalse False GLANCE_ENFORCE_SCOPE)
+GLANCE_ENFORCE_SCOPE=$(trueorfalse True GLANCE_ENFORCE_SCOPE)
 
 GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance}
 GLANCE_METADEF_DIR=$GLANCE_CONF_DIR/metadefs
@@ -168,6 +168,7 @@
         # Cleanup reserved stores directories
         sudo rm -rf $GLANCE_STAGING_DIR $GLANCE_TASKS_DIR
     fi
+    remove_uwsgi_config "$GLANCE_UWSGI_CONF" "glance-wsgi-api"
 }
 
 # Set multiple cinder store related config options for each of the cinder store
@@ -330,6 +331,7 @@
     iniset $GLANCE_API_CONF database connection $dburl
     iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG
     iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/
+    iniset $GLANCE_API_CONF DEFAULT image_cache_driver $GLANCE_CACHE_DRIVER
     iniset $GLANCE_API_CONF oslo_concurrency lock_path $GLANCE_LOCK_DIR
     iniset $GLANCE_API_CONF paste_deploy flavor keystone+cachemanagement
     configure_keystone_authtoken_middleware $GLANCE_API_CONF glance
@@ -393,6 +395,7 @@
     iniset $GLANCE_CACHE_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
     iniset $GLANCE_CACHE_CONF DEFAULT use_syslog $SYSLOG
     iniset $GLANCE_CACHE_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/
+    iniset $GLANCE_CACHE_CONF DEFAULT image_cache_driver $GLANCE_CACHE_DRIVER
     iniset $GLANCE_CACHE_CONF DEFAULT auth_url $KEYSTONE_SERVICE_URI
     iniset $GLANCE_CACHE_CONF DEFAULT admin_tenant_name $SERVICE_PROJECT_NAME
     iniset $GLANCE_CACHE_CONF DEFAULT admin_user glance
@@ -430,12 +433,17 @@
         iniset $GLANCE_API_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS
         iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT
         iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS"
+        iniset $GLANCE_API_CONF DEFAULT worker_self_reference_url $GLANCE_URL
     fi
 
-    if [[ "$GLANCE_ENFORCE_SCOPE" == True ]] ; then
+    if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
         iniset $GLANCE_API_CONF oslo_policy enforce_scope true
         iniset $GLANCE_API_CONF oslo_policy enforce_new_defaults true
         iniset $GLANCE_API_CONF DEFAULT enforce_secure_rbac true
+    else
+        iniset $GLANCE_API_CONF oslo_policy enforce_scope false
+        iniset $GLANCE_API_CONF oslo_policy enforce_new_defaults false
+        iniset $GLANCE_API_CONF DEFAULT enforce_secure_rbac false
     fi
 }
 
@@ -540,7 +548,7 @@
 # start_glance_remote_clone() - Clone the regular glance api worker
 function start_glance_remote_clone {
     local glance_remote_conf_dir glance_remote_port remote_data
-    local glance_remote_uwsgi
+    local glance_remote_uwsgi venv
 
     glance_remote_conf_dir="$(glance_remote_conf "")"
     glance_remote_port=$(get_random_port)
@@ -578,12 +586,16 @@
     # We need to create the systemd service for the clone, but then
     # change it to include an Environment line to point the WSGI app
     # at the alternate config directory.
+    if [[ "$GLOBAL_VENV" == True ]]; then
+        venv="--venv $DEVSTACK_VENV"
+    fi
     write_uwsgi_user_unit_file devstack@g-api-r.service "$(which uwsgi) \
                                --procname-prefix \
                                glance-api-remote \
-                               --ini $glance_remote_uwsgi" \
+                               --ini $glance_remote_uwsgi \
+                               $venv" \
                                "" "$STACK_USER"
-    iniset -sudo ${SYSTEMD_DIR}/devstack@g-api-r.service \
+    iniadd -sudo ${SYSTEMD_DIR}/devstack@g-api-r.service \
            "Service" "Environment" \
            "OS_GLANCE_CONFIG_DIR=$glance_remote_conf_dir"
 
diff --git a/lib/horizon b/lib/horizon
index b2bf7bc..7c0d443 100644
--- a/lib/horizon
+++ b/lib/horizon
@@ -109,12 +109,21 @@
         _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT "True"
     fi
 
+    if is_service_enabled c-bak; then
+        _horizon_config_set $local_settings OPENSTACK_CINDER_FEATURES enable_backup "True"
+    fi
+
     # Create an empty directory that apache uses as docroot
     sudo mkdir -p $HORIZON_DIR/.blackhole
 
     local horizon_conf
     horizon_conf=$(apache_site_config_for horizon)
 
+    local wsgi_venv_config=""
+    if [[ "$GLOBAL_VENV" == "True" ]] ; then
+        wsgi_venv_config="WSGIPythonHome $DEVSTACK_VENV"
+    fi
+
     # Configure apache to run horizon
     # Set up the django horizon application to serve via apache/wsgi
     sudo sh -c "sed -e \"
@@ -124,12 +133,13 @@
         s,%APACHE_NAME%,$APACHE_NAME,g;
         s,%DEST%,$DEST,g;
         s,%WEBROOT%,$HORIZON_APACHE_ROOT,g;
+        s,%WSGIPYTHONHOME%,$wsgi_venv_config,g;
     \" $FILES/apache-horizon.template >$horizon_conf"
 
     if is_ubuntu; then
         disable_apache_site 000-default
         sudo touch $horizon_conf
-    elif is_fedora || is_suse; then
+    elif is_fedora; then
         : # nothing to do
     else
         exit_distro_not_supported "horizon apache configuration"
@@ -163,6 +173,10 @@
     # Apache installation, because we mark it NOPRIME
     install_apache_wsgi
 
+    # Install the memcache library so that horizon can use memcached as its
+    # cache backend
+    pip_install_gr pymemcache
+
     git_clone $HORIZON_REPO $HORIZON_DIR $HORIZON_BRANCH
 }
 
diff --git a/lib/host b/lib/host
new file mode 100644
index 0000000..a812c39
--- /dev/null
+++ b/lib/host
@@ -0,0 +1,98 @@
+#!/bin/bash
+
+# Kernel Samepage Merging (KSM)
+# -----------------------------
+
+# Processes that mark their memory as mergeable can share identical memory
+# pages if KSM is enabled. This is particularly useful for nova + libvirt
+# backends but any other setup that marks its memory as mergeable can take
+# advantage. The drawback is there is higher cpu load; however, we tend to
+# be memory bound not cpu bound so enable KSM by default but allow people
+# to opt out if the CPU time is more important to them.
+ENABLE_KSM=$(trueorfalse True ENABLE_KSM)
+ENABLE_KSMTUNED=$(trueorfalse True ENABLE_KSMTUNED)
+function configure_ksm {
+    if [[ $ENABLE_KSMTUNED == "True" ]] ; then
+        install_package "ksmtuned"
+    fi
+    if [[ -f /sys/kernel/mm/ksm/run ]] ; then
+        echo $(bool_to_int ENABLE_KSM) | sudo tee /sys/kernel/mm/ksm/run
+    fi
+}
+
+# Compressed swap (ZSWAP)
+#------------------------
+
+# as noted in the kernel docs https://docs.kernel.org/admin-guide/mm/zswap.html
+# Zswap is a lightweight compressed cache for swap pages.
+# It takes pages that are in the process of being swapped out and attempts
+# to compress them into a dynamically allocated RAM-based memory pool.
+# zswap basically trades CPU cycles for potentially reduced swap I/O.
+# This trade-off can also result in a significant performance improvement
+# if reads from the compressed cache are faster than reads from a swap device.
+
+ENABLE_ZSWAP=$(trueorfalse False ENABLE_ZSWAP)
+# lz4 is very fast although it does not have the best compression
+# zstd has much better compression but more latency
+ZSWAP_COMPRESSOR=${ZSWAP_COMPRESSOR:="lz4"}
+ZSWAP_ZPOOL=${ZSWAP_ZPOOL:="z3fold"}
+function configure_zswap {
+    if [[ $ENABLE_ZSWAP == "True" ]] ; then
+        # Centos 9 stream seems to only support enabling but not run time
+        # tuning so dont try to choose better default on centos
+        if is_ubuntu; then
+            echo ${ZSWAP_COMPRESSOR} | sudo tee /sys/module/zswap/parameters/compressor
+            echo ${ZSWAP_ZPOOL} | sudo tee /sys/module/zswap/parameters/zpool
+        fi
+        echo 1 | sudo tee /sys/module/zswap/parameters/enabled
+        # print curent zswap kernel config
+        sudo grep -R . /sys/module/zswap/parameters || /bin/true
+    fi
+}
+
+ENABLE_SYSCTL_MEM_TUNING=$(trueorfalse False ENABLE_SYSCTL_MEM_TUNING)
+function configure_sysctl_mem_parmaters {
+    if [[ $ENABLE_SYSCTL_MEM_TUNING == "True" ]] ; then
+        # defer write when memory is available
+        sudo sysctl -w vm.dirty_ratio=60
+        sudo sysctl -w vm.dirty_background_ratio=10
+        sudo sysctl -w vm.vfs_cache_pressure=50
+        # assume swap is compressed so on new kernels
+        # give it equal priority as page cache which is
+        # uncompressed. on kernels < 5.8 the max is 100
+        # not 200 so it will strongly prefer swapping.
+        sudo sysctl -w vm.swappiness=100
+        sudo grep -R . /proc/sys/vm/  || /bin/true
+    fi
+}
+
+function configure_host_mem {
+    configure_zswap
+    configure_ksm
+    configure_sysctl_mem_parmaters
+}
+
+ENABLE_SYSCTL_NET_TUNING=$(trueorfalse False ENABLE_SYSCTL_NET_TUNING)
+function configure_sysctl_net_parmaters {
+    if [[ $ENABLE_SYSCTL_NET_TUNING == "True" ]] ; then
+        # detect dead TCP connections after 120 seconds
+        sudo sysctl -w net.ipv4.tcp_keepalive_time=60
+        sudo sysctl -w net.ipv4.tcp_keepalive_intvl=10
+        sudo sysctl -w net.ipv4.tcp_keepalive_probes=6
+        # reudce network latency for new connections
+        sudo sysctl -w net.ipv4.tcp_fastopen=3
+        # print tcp options
+        sudo grep -R . /proc/sys/net/ipv4/tcp* || /bin/true
+        # disable qos by default
+        sudo sysctl -w net.core.default_qdisc=pfifo_fast
+    fi
+}
+
+function configure_host_net {
+    configure_sysctl_net_parmaters
+}
+
+function tune_host {
+    configure_host_mem
+    configure_host_net
+}
diff --git a/lib/infra b/lib/infra
index b983f2b..2aad003 100644
--- a/lib/infra
+++ b/lib/infra
@@ -31,7 +31,7 @@
     local PIP_VIRTUAL_ENV="$REQUIREMENTS_DIR/.venv"
     [ ! -d $PIP_VIRTUAL_ENV ] && ${VIRTUALENV_CMD} $PIP_VIRTUAL_ENV
     # We don't care about testing git pbr in the requirements venv.
-    PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install -U pbr
+    PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install -U pbr setuptools
     PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install $REQUIREMENTS_DIR
 
     # Unset the PIP_VIRTUAL_ENV so that PBR does not end up trapped
diff --git a/lib/keystone b/lib/keystone
index a4c8a52..7d6b05f 100644
--- a/lib/keystone
+++ b/lib/keystone
@@ -150,7 +150,7 @@
         sudo rm -f $(apache_site_config_for keystone)
     else
         stop_process "keystone"
-        remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "$KEYSTONE_PUBLIC_UWSGI"
+        remove_uwsgi_config "$KEYSTONE_PUBLIC_UWSGI_CONF" "keystone-wsgi-public"
         sudo rm -f $(apache_site_config_for keystone-wsgi-public)
     fi
 }
@@ -265,10 +265,15 @@
         iniset $KEYSTONE_CONF security_compliance lockout_duration $KEYSTONE_LOCKOUT_DURATION
         iniset $KEYSTONE_CONF security_compliance unique_last_password_count $KEYSTONE_UNIQUE_LAST_PASSWORD_COUNT
     fi
-    if [[ "$KEYSTONE_ENFORCE_SCOPE" == True ]] ; then
+
+    iniset $KEYSTONE_CONF oslo_policy policy_file policy.yaml
+
+    if [[ "$KEYSTONE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
         iniset $KEYSTONE_CONF oslo_policy enforce_scope true
         iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults true
-        iniset $KEYSTONE_CONF oslo_policy policy_file policy.yaml
+    else
+        iniset $KEYSTONE_CONF oslo_policy enforce_scope false
+        iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults false
     fi
 }
 
diff --git a/lib/ldap b/lib/ldap
index ea5faa1..b0195db 100644
--- a/lib/ldap
+++ b/lib/ldap
@@ -39,13 +39,6 @@
     LDAP_OLCDB_NUMBER=2
     LDAP_OLCDB_TYPE=hdb
     LDAP_ROOTPW_COMMAND=add
-elif is_suse; then
-    # SUSE has slappasswd in /usr/sbin/
-    PATH=$PATH:/usr/sbin/
-    LDAP_OLCDB_NUMBER=1
-    LDAP_OLCDB_TYPE=hdb
-    LDAP_ROOTPW_COMMAND=add
-    LDAP_SERVICE_NAME=ldap
 fi
 
 
@@ -76,8 +69,6 @@
         sudo rm -rf /etc/ldap/ldap.conf /var/lib/ldap
     elif is_fedora; then
         sudo rm -rf /etc/openldap /var/lib/ldap
-    elif is_suse; then
-        sudo rm -rf /var/lib/ldap
     fi
 }
 
@@ -126,11 +117,6 @@
         configure_ldap
     elif is_fedora; then
         start_ldap
-    elif is_suse; then
-        _ldap_varsubst $FILES/ldap/suse-base-config.ldif.in >$tmp_ldap_dir/suse-base-config.ldif
-        sudo slapadd -F /etc/openldap/slapd.d/ -bcn=config -l $tmp_ldap_dir/suse-base-config.ldif
-        sudo sed -i '/^OPENLDAP_START_LDAPI=/s/"no"/"yes"/g' /etc/sysconfig/openldap
-        start_ldap
     fi
 
     echo "LDAP_PASSWORD is $LDAP_PASSWORD"
diff --git a/lib/lvm b/lib/lvm
index d3f6bf1..b7e84d9 100644
--- a/lib/lvm
+++ b/lib/lvm
@@ -129,19 +129,25 @@
     local vg=$1
     local size=$2
 
-    # Start the tgtd service on Fedora and SUSE if tgtadm is used
-    if  is_fedora || is_suse  && [[ "$CINDER_ISCSI_HELPER" = "tgtadm" ]]; then
+    # Start the tgtd service on Fedora if tgtadm is used
+    if  is_fedora; then
         start_service tgtd
     fi
 
     # Start with a clean volume group
     _create_lvm_volume_group $vg $size
 
-    # Remove iscsi targets
-    if [ "$CINDER_ISCSI_HELPER" = "lioadm" ]; then
-        sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete
-    else
-        sudo tgtadm --op show --mode target | awk '/Target/ {print $3}' | sudo xargs -r -n1 tgt-admin --delete
+    if is_service_enabled cinder; then
+        # Remove iscsi targets
+        if [ "$CINDER_TARGET_HELPER" = "lioadm" ]; then
+            sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete
+        elif [ "$CINDER_TARGET_HELPER" = "tgtadm" ]; then
+            sudo tgtadm --op show --mode target | awk '/Target/ {print $3}' | sudo xargs -r -n1 tgt-admin --delete
+        elif [ "$CINDER_TARGET_HELPER" = "nvmet" ]; then
+            # If we don't disconnect everything vgremove will block
+            sudo nvme disconnect-all
+            sudo nvmetcli clear
+        fi
     fi
     _clean_lvm_volume_group $vg
 }
@@ -194,7 +200,7 @@
     filter_string=$filter_string$filter_suffix
 
     clean_lvm_filter
-    sudo sed -i "/# global_filter = \[*\]/a\    $global_filter$filter_string" /etc/lvm/lvm.conf
+    sudo sed -i "/# global_filter = \[.*\]/a\        $filter_string" /etc/lvm/lvm.conf
     echo_summary "set lvm.conf device global_filter to: $filter_string"
 }
 
diff --git a/lib/neutron b/lib/neutron
index e7719d4..bcef8a5 100644
--- a/lib/neutron
+++ b/lib/neutron
@@ -1,122 +1,327 @@
 #!/bin/bash
 #
 # lib/neutron
-# Install and start **Neutron** network services
+# functions - functions specific to neutron
 
 # Dependencies:
-#
 # ``functions`` file
 # ``DEST`` must be defined
+# ``STACK_USER`` must be defined
 
 # ``stack.sh`` calls the entry points in this order:
 #
-# - is_XXXX_enabled
-# - install_XXXX
-# - configure_XXXX
-# - init_XXXX
-# - start_XXXX
-# - stop_XXXX
-# - cleanup_XXXX
+# - install_neutron_agent_packages
+# - install_neutronclient
+# - install_neutron
+# - install_neutron_third_party
+# - configure_neutron
+# - init_neutron
+# - configure_neutron_third_party
+# - init_neutron_third_party
+# - start_neutron_third_party
+# - create_nova_conf_neutron
+# - configure_neutron_after_post_config
+# - start_neutron_service_and_check
+# - check_neutron_third_party_integration
+# - start_neutron_agents
+# - create_neutron_initial_network
+#
+# ``unstack.sh`` calls the entry points in this order:
+#
+# - stop_neutron
+# - stop_neutron_third_party
+# - cleanup_neutron
 
-# Save trace setting
-XTRACE=$(set +o | grep xtrace)
-set +o xtrace
+# Functions in lib/neutron are classified into the following categories:
+#
+# - entry points (called from stack.sh or unstack.sh)
+# - internal functions
+# - neutron exercises
+# - 3rd party programs
 
-# Defaults
+
+# Neutron Networking
+# ------------------
+
+# Make sure that neutron is enabled in ``ENABLED_SERVICES``.  If you want
+# to run Neutron on this host, make sure that q-svc is also in
+# ``ENABLED_SERVICES``.
+#
+# See "Neutron Network Configuration" below for additional variables
+# that must be set in localrc for connectivity across hosts with
+# Neutron.
+
+# Settings
 # --------
 
+
+# Neutron Network Configuration
+# -----------------------------
+
+if is_service_enabled tls-proxy; then
+    Q_PROTOCOL="https"
+fi
+
+
 # Set up default directories
 GITDIR["python-neutronclient"]=$DEST/python-neutronclient
 
+
+NEUTRON_DIR=$DEST/neutron
+NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas
+
+# Support entry points installation of console scripts
+if [[ -d $NEUTRON_DIR/bin/neutron-server ]]; then
+    NEUTRON_BIN_DIR=$NEUTRON_DIR/bin
+else
+    NEUTRON_BIN_DIR=$(get_python_exec_prefix)
+fi
+
+NEUTRON_CONF_DIR=/etc/neutron
+NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf
+export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"}
+
 # NEUTRON_DEPLOY_MOD_WSGI defines how neutron is deployed, allowed values:
 # - False (default) : Run neutron under Eventlet
 # - True : Run neutron under uwsgi
 # TODO(annp): Switching to uwsgi in next cycle if things turn out to be stable
 # enough
 NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse False NEUTRON_DEPLOY_MOD_WSGI)
-NEUTRON_AGENT=${NEUTRON_AGENT:-openvswitch}
-NEUTRON_DIR=$DEST/neutron
+
+NEUTRON_UWSGI=neutron.wsgi.api:application
+NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini
 
 # If NEUTRON_ENFORCE_SCOPE == True, it will set "enforce_scope"
 # and "enforce_new_defaults" to True in the Neutron's config to enforce usage
-# of the new RBAC policies and scopes.
-NEUTRON_ENFORCE_SCOPE=$(trueorfalse False NEUTRON_ENFORCE_SCOPE)
+# of the new RBAC policies and scopes. Set it to False if you do not
+# want to run Neutron with new RBAC.
+NEUTRON_ENFORCE_SCOPE=$(trueorfalse True NEUTRON_ENFORCE_SCOPE)
 
-NEUTRON_DISTRIBUTED_ROUTING=$(trueorfalse False NEUTRON_DISTRIBUTED_ROUTING)
+# Agent binaries.  Note, binary paths for other agents are set in per-service
+# scripts in lib/neutron_plugins/services/
+AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent"
+AGENT_L3_BINARY=${AGENT_L3_BINARY:-"$NEUTRON_BIN_DIR/neutron-l3-agent"}
+AGENT_META_BINARY="$NEUTRON_BIN_DIR/neutron-metadata-agent"
+
+# Agent config files. Note, plugin-specific Q_PLUGIN_CONF_FILE is set and
+# loaded from per-plugin  scripts in lib/neutron_plugins/
+Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini
+# NOTE(slaweq): NEUTRON_DHCP_CONF is used e.g. in neutron repository,
+# it was previously defined in the lib/neutron module which is now deleted.
+NEUTRON_DHCP_CONF=$Q_DHCP_CONF_FILE
+Q_L3_CONF_FILE=$NEUTRON_CONF_DIR/l3_agent.ini
+# NOTE(slaweq): NEUTRON_L3_CONF is used e.g. in neutron repository,
+# it was previously defined in the lib/neutron module which is now deleted.
+NEUTRON_L3_CONF=$Q_L3_CONF_FILE
+Q_META_CONF_FILE=$NEUTRON_CONF_DIR/metadata_agent.ini
+
+# Default name for Neutron database
+Q_DB_NAME=${Q_DB_NAME:-neutron}
+# Default Neutron Plugin
+Q_PLUGIN=${Q_PLUGIN:-ml2}
+# Default Neutron Port
+Q_PORT=${Q_PORT:-9696}
+# Default Neutron Internal Port when using TLS proxy
+Q_PORT_INT=${Q_PORT_INT:-19696}
+# Default Neutron Host
+Q_HOST=${Q_HOST:-$SERVICE_HOST}
+# Default protocol
+Q_PROTOCOL=${Q_PROTOCOL:-$SERVICE_PROTOCOL}
+# Default listen address
+Q_LISTEN_ADDRESS=${Q_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)}
+# Default admin username
+Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-neutron}
+# Default auth strategy
+Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone}
+# RHEL's support for namespaces requires using veths with ovs
+Q_OVS_USE_VETH=${Q_OVS_USE_VETH:-False}
+Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True}
+Q_USE_ROOTWRAP_DAEMON=$(trueorfalse True Q_USE_ROOTWRAP_DAEMON)
+# Meta data IP
+Q_META_DATA_IP=${Q_META_DATA_IP:-$(ipv6_unquote $SERVICE_HOST)}
+# Allow Overlapping IP among subnets
+Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True}
+Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True}
+Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True}
+VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True}
+VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300}
+
+# Allow to skip stopping of OVN services
+SKIP_STOP_OVN=${SKIP_STOP_OVN:-False}
+
+# The directory which contains files for Q_PLUGIN_EXTRA_CONF_FILES.
+# /etc/neutron is assumed by many of devstack plugins.  Do not change.
+_Q_PLUGIN_EXTRA_CONF_PATH=/etc/neutron
+
+# The name of the service in the endpoint URL
+NEUTRON_ENDPOINT_SERVICE_NAME=${NEUTRON_ENDPOINT_SERVICE_NAME-"networking"}
+if [[ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" && -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then
+    NEUTRON_ENDPOINT_SERVICE_NAME="networking"
+fi
+
+# Source install libraries
+ALEMBIC_REPO=${ALEMBIC_REPO:-https://github.com/sqlalchemy/alembic.git}
+ALEMBIC_DIR=${ALEMBIC_DIR:-$DEST/alembic}
+ALEMBIC_BRANCH=${ALEMBIC_BRANCH:-main}
+SQLALCHEMY_REPO=${SQLALCHEMY_REPO:-https://github.com/sqlalchemy/sqlalchemy.git}
+SQLALCHEMY_DIR=${SQLALCHEMY_DIR:-$DEST/sqlalchemy}
+SQLALCHEMY_BRANCH=${SQLALCHEMY_BRANCH:-main}
+
+# List of config file names in addition to the main plugin config file
+# To add additional plugin config files, use ``neutron_server_config_add``
+# utility function.  For example:
+#
+#    ``neutron_server_config_add file1``
+#
+# These config files are relative to ``/etc/neutron``.  The above
+# example would specify ``--config-file /etc/neutron/file1`` for
+# neutron server.
+declare -a -g Q_PLUGIN_EXTRA_CONF_FILES
+
+# same as Q_PLUGIN_EXTRA_CONF_FILES, but with absolute path.
+declare -a -g _Q_PLUGIN_EXTRA_CONF_FILES_ABS
+
+
+Q_RR_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf
+if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then
+    Q_RR_COMMAND="sudo"
+else
+    NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron)
+    Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE"
+    if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
+        Q_RR_DAEMON_COMMAND="sudo $NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE"
+    fi
+fi
+
+
 # Distributed Virtual Router (DVR) configuration
 # Can be:
-# - ``legacy``          - No DVR functionality
-# - ``dvr_snat``        - Controller or single node DVR
-# - ``dvr``             - Compute node in multi-node DVR
+# - ``legacy``   - No DVR functionality
+# - ``dvr_snat`` - Controller or single node DVR
+# - ``dvr``      - Compute node in multi-node DVR
 # - ``dvr_no_external`` - Compute node in multi-node DVR, no external network
 #
-# Default is 'dvr_snat' since it can handle both DVR and legacy routers
-NEUTRON_DVR_MODE=${NEUTRON_DVR_MODE:-dvr_snat}
-
-NEUTRON_BIN_DIR=$(get_python_exec_prefix)
-NEUTRON_DHCP_BINARY="neutron-dhcp-agent"
-
-NEUTRON_CONF_DIR=/etc/neutron
-NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf
-NEUTRON_META_CONF=$NEUTRON_CONF_DIR/metadata_agent.ini
-NEUTRON_META_DATA_HOST=${NEUTRON_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)}
-
-NEUTRON_DHCP_CONF=$NEUTRON_CONF_DIR/dhcp_agent.ini
-NEUTRON_L3_CONF=$NEUTRON_CONF_DIR/l3_agent.ini
-NEUTRON_AGENT_CONF=$NEUTRON_CONF_DIR/
-NEUTRON_CREATE_INITIAL_NETWORKS=${NEUTRON_CREATE_INITIAL_NETWORKS:-True}
-
-NEUTRON_STATE_PATH=${NEUTRON_STATE_PATH:=$DATA_DIR/neutron}
-
-NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini
-
-# By default, use the ML2 plugin
-NEUTRON_CORE_PLUGIN=${NEUTRON_CORE_PLUGIN:-ml2}
-NEUTRON_CORE_PLUGIN_CONF_FILENAME=${NEUTRON_CORE_PLUGIN_CONF_FILENAME:-ml2_conf.ini}
-NEUTRON_CORE_PLUGIN_CONF_PATH=$NEUTRON_CONF_DIR/plugins/$NEUTRON_CORE_PLUGIN
-NEUTRON_CORE_PLUGIN_CONF=$NEUTRON_CORE_PLUGIN_CONF_PATH/$NEUTRON_CORE_PLUGIN_CONF_FILENAME
-
-NEUTRON_METERING_AGENT_CONF_FILENAME=${NEUTRON_METERING_AGENT_CONF_FILENAME:-metering_agent.ini}
-NEUTRON_METERING_AGENT_CONF=$NEUTRON_CONF_DIR/$NEUTRON_METERING_AGENT_CONF_FILENAME
-
-NEUTRON_AGENT_BINARY=${NEUTRON_AGENT_BINARY:-neutron-$NEUTRON_AGENT-agent}
-NEUTRON_L3_BINARY=${NEUTRON_L3_BINARY:-neutron-l3-agent}
-NEUTRON_META_BINARY=${NEUTRON_META_BINARY:-neutron-metadata-agent}
-NEUTRON_METERING_BINARY=${NEUTRON_METERING_BINARY:-neutron-metering-agent}
-
-# Public facing bits
-if is_service_enabled tls-proxy; then
-    NEUTRON_SERVICE_PROTOCOL="https"
+Q_DVR_MODE=${Q_DVR_MODE:-legacy}
+if [[ "$Q_DVR_MODE" != "legacy" ]]; then
+    Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,l2population
 fi
-NEUTRON_SERVICE_HOST=${NEUTRON_SERVICE_HOST:-$SERVICE_HOST}
-NEUTRON_SERVICE_PORT=${NEUTRON_SERVICE_PORT:-9696}
-NEUTRON_SERVICE_PORT_INT=${NEUTRON_SERVICE_PORT_INT:-19696}
-NEUTRON_SERVICE_PROTOCOL=${NEUTRON_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
 
-NEUTRON_AUTH_STRATEGY=${NEUTRON_AUTH_STRATEGY:-keystone}
-NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron)
-NEUTRON_ROOTWRAP_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf
-NEUTRON_ROOTWRAP_CMD="$NEUTRON_ROOTWRAP $NEUTRON_ROOTWRAP_CONF_FILE"
-NEUTRON_ROOTWRAP_DAEMON_CMD="$NEUTRON_ROOTWRAP-daemon $NEUTRON_ROOTWRAP_CONF_FILE"
+# Provider Network Configurations
+# --------------------------------
 
-# This is needed because _neutron_ovs_base_configure_l3_agent uses it to create
-# an external network bridge
-PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex}
-PUBLIC_BRIDGE_MTU=${PUBLIC_BRIDGE_MTU:-1500}
+# The following variables control the Neutron ML2 plugins' allocation
+# of tenant networks and availability of provider networks. If these
+# are not configured in ``localrc``, tenant networks will be local to
+# the host (with no remote connectivity), and no physical resources
+# will be available for the allocation of provider networks.
 
-# Network type - default vxlan, however enables vlan based jobs to override
-# using the legacy environment variable as well as a new variable in greater
-# alignment with the naming scheme of this plugin.
-NEUTRON_TENANT_NETWORK_TYPE=${NEUTRON_TENANT_NETWORK_TYPE:-vxlan}
+# To disable tunnels (GRE or VXLAN) for tenant networks,
+# set to False in ``local.conf``.
+# GRE tunnels are only supported by the openvswitch.
+ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-True}
 
-NEUTRON_TENANT_VLAN_RANGE=${NEUTRON_TENANT_VLAN_RANGE:-${TENANT_VLAN_RANGE:-100:150}}
+# If using GRE, VXLAN or GENEVE tunnels for tenant networks,
+# specify the range of IDs from which tenant networks are
+# allocated. Can be overridden in ``localrc`` if necessary.
+TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGES:-1:1000}
 
-# Physical network for VLAN network usage.
-NEUTRON_PHYSICAL_NETWORK=${NEUTRON_PHYSICAL_NETWORK:-}
+# To use VLANs for tenant networks, set to True in localrc. VLANs
+# are supported by the ML2 plugins, requiring additional configuration
+# described below.
+ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False}
 
+# If using VLANs for tenant networks, set in ``localrc`` to specify
+# the range of VLAN VIDs from which tenant networks are
+# allocated. An external network switch must be configured to
+# trunk these VLANs between hosts for multi-host connectivity.
+#
+# Example: ``TENANT_VLAN_RANGE=1000:1999``
+TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-}
 
-# Additional neutron api config files
-declare -a -g _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS
+# If using VLANs for tenant networks, or if using flat or VLAN
+# provider networks, set in ``localrc`` to the name of the physical
+# network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the
+# openvswitch agent or ``LB_PHYSICAL_INTERFACE`` for the linuxbridge
+# agent, as described below.
+#
+# Example: ``PHYSICAL_NETWORK=default``
+PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-public}
+
+# With the openvswitch agent, if using VLANs for tenant networks,
+# or if using flat or VLAN provider networks, set in ``localrc`` to
+# the name of the OVS bridge to use for the physical network. The
+# bridge will be created if it does not already exist, but a
+# physical interface must be manually added to the bridge as a
+# port for external connectivity.
+#
+# Example: ``OVS_PHYSICAL_BRIDGE=br-eth1``
+OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-br-ex}
+
+# With the linuxbridge agent, if using VLANs for tenant networks,
+# or if using flat or VLAN provider networks, set in ``localrc`` to
+# the name of the network interface to use for the physical
+# network.
+#
+# Example: ``LB_PHYSICAL_INTERFACE=eth1``
+if [[ $Q_AGENT == "linuxbridge" && -z ${LB_PHYSICAL_INTERFACE} ]]; then
+    default_route_dev=$( (ip route; ip -6 route) | grep ^default | head -n 1 | awk '{print $5}')
+    die_if_not_set $LINENO default_route_dev "Failure retrieving default route device"
+    LB_PHYSICAL_INTERFACE=$default_route_dev
+fi
+
+# With the openvswitch plugin, set to True in ``localrc`` to enable
+# provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False.
+#
+# Example: ``OVS_ENABLE_TUNNELING=True``
+OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS}
+
+# Use DHCP agent for providing metadata service in the case of
+# without L3 agent (No Route Agent), set to True in localrc.
+ENABLE_ISOLATED_METADATA=${ENABLE_ISOLATED_METADATA:-False}
+
+# Add a static route as dhcp option, so the request to 169.254.169.254
+# will be able to reach through a route(DHCP agent)
+# This option require ENABLE_ISOLATED_METADATA = True
+ENABLE_METADATA_NETWORK=${ENABLE_METADATA_NETWORK:-False}
+# Neutron plugin specific functions
+# ---------------------------------
+
+# Please refer to ``lib/neutron_plugins/README.md`` for details.
+if [ -f $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN ]; then
+    source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN
+fi
+
+# Agent metering service plugin functions
+# -------------------------------------------
+
+# Hardcoding for 1 service plugin for now
+source $TOP_DIR/lib/neutron_plugins/services/metering
+
+# L3 Service functions
+source $TOP_DIR/lib/neutron_plugins/services/l3
+
+# Additional Neutron service plugins
+source $TOP_DIR/lib/neutron_plugins/services/placement
+source $TOP_DIR/lib/neutron_plugins/services/trunk
+source $TOP_DIR/lib/neutron_plugins/services/qos
+source $TOP_DIR/lib/neutron_plugins/services/segments
+
+# Use security group or not
+if has_neutron_plugin_security_group; then
+    Q_USE_SECGROUP=${Q_USE_SECGROUP:-True}
+else
+    Q_USE_SECGROUP=False
+fi
+
+# OVN_BRIDGE_MAPPINGS - ovn-bridge-mappings
+# NOTE(hjensas): Initialize after sourcing neutron_plugins/services/l3
+# which initialize PUBLIC_BRIDGE.
+OVN_BRIDGE_MAPPINGS=${OVN_BRIDGE_MAPPINGS:-$PHYSICAL_NETWORK:$PUBLIC_BRIDGE}
+
+# Save trace setting
+_XTRACE_NEUTRON=$(set +o | grep xtrace)
+set +o xtrace
+
 
 # Functions
 # ---------
@@ -130,303 +335,216 @@
 }
 
 # Test if any Neutron services are enabled
-# is_neutron_enabled
+# TODO(slaweq): this is not really needed now and we should remove it as soon
+# as it will not be called from any other Devstack plugins, like e.g. Neutron
+# plugin
 function is_neutron_legacy_enabled {
-    # first we need to remove all "neutron-" from DISABLED_SERVICES list
-    disabled_services_copy=$(echo $DISABLED_SERVICES | sed 's/neutron-//g')
-    [[ ,${disabled_services_copy} =~ ,"neutron" ]] && return 1
-    [[ ,${ENABLED_SERVICES} =~ ,"q-" ]] && return 0
-    return 1
+    return 0
 }
 
-if is_neutron_legacy_enabled; then
-    source $TOP_DIR/lib/neutron-legacy
-fi
-
-# cleanup_neutron() - Remove residual data files, anything left over from previous
-# runs that a clean run would need to clean up
-function cleanup_neutron_new {
-    deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
-    source $TOP_DIR/lib/neutron_plugins/${NEUTRON_AGENT}_agent
-    if is_neutron_ovs_base_plugin; then
-        neutron_ovs_base_cleanup
+function _determine_config_server {
+    if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" != '' ]]; then
+        if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" = "$_Q_PLUGIN_EXTRA_CONF_PATH" ]]; then
+            deprecated "Q_PLUGIN_EXTRA_CONF_PATH is deprecated"
+        else
+            die $LINENO "Q_PLUGIN_EXTRA_CONF_PATH is deprecated"
+        fi
     fi
-
-    if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then
-        neutron_lb_cleanup
+    if [[ ${#Q_PLUGIN_EXTRA_CONF_FILES[@]} > 0 ]]; then
+        deprecated "Q_PLUGIN_EXTRA_CONF_FILES is deprecated.  Use neutron_server_config_add instead."
     fi
-    # delete all namespaces created by neutron
-    for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|qlbaas|fip|snat)-[0-9a-f-]*'); do
-        sudo ip netns delete ${ns}
+    for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do
+        _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($_Q_PLUGIN_EXTRA_CONF_PATH/$cfg_file)
     done
+
+    local cfg_file
+    local opts="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE"
+    for cfg_file in ${_Q_PLUGIN_EXTRA_CONF_FILES_ABS[@]}; do
+        opts+=" --config-file $cfg_file"
+    done
+    echo "$opts"
 }
 
-# configure_root_helper_options() - Configure agent rootwrap helper options
-function configure_root_helper_options {
-    local conffile=$1
-    iniset $conffile agent root_helper "sudo $NEUTRON_ROOTWRAP_CMD"
-    iniset $conffile agent root_helper_daemon "sudo $NEUTRON_ROOTWRAP_DAEMON_CMD"
+function _determine_config_l3 {
+    local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE"
+    echo "$opts"
 }
 
-# configure_neutron() - Set config files, create data dirs, etc
-function configure_neutron_new {
-    deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
-    sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR
-
-    (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh)
-
-    cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF
-
-    configure_neutron_rootwrap
-
-    mkdir -p $NEUTRON_CORE_PLUGIN_CONF_PATH
-
-    # NOTE(yamamoto): A decomposed plugin should prepare the config file in
-    # its devstack plugin.
-    if [ -f $NEUTRON_DIR/etc/neutron/plugins/$NEUTRON_CORE_PLUGIN/$NEUTRON_CORE_PLUGIN_CONF_FILENAME.sample ]; then
-        cp $NEUTRON_DIR/etc/neutron/plugins/$NEUTRON_CORE_PLUGIN/$NEUTRON_CORE_PLUGIN_CONF_FILENAME.sample $NEUTRON_CORE_PLUGIN_CONF
+function _enable_ovn_maintenance {
+    if [[ $Q_AGENT == "ovn" ]]; then
+        enable_service neutron-ovn-maintenance-worker
     fi
+}
 
-    iniset $NEUTRON_CONF database connection `database_connection_url neutron`
-    iniset $NEUTRON_CONF DEFAULT state_path $NEUTRON_STATE_PATH
-    iniset $NEUTRON_CONF oslo_concurrency lock_path $NEUTRON_STATE_PATH/lock
-    iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG
+function _run_ovn_maintenance {
+    if [[ $Q_AGENT == "ovn" ]]; then
+        run_process neutron-ovn-maintenance-worker "$NEUTRON_BIN_DIR/neutron-ovn-maintenance-worker $cfg_file_options"
+    fi
+}
 
-    iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+function _stop_ovn_maintenance {
+    if [[ $Q_AGENT == "ovn" ]]; then
+        stop_process neutron-ovn-maintenance-worker
+    fi
+}
 
+# For services and agents that require it, dynamically construct a list of
+# --config-file arguments that are passed to the binary.
+function determine_config_files {
+    local opts=""
+    case "$1" in
+        "neutron-server") opts="$(_determine_config_server)" ;;
+        "neutron-l3-agent") opts="$(_determine_config_l3)" ;;
+    esac
+    if [ -z "$opts" ] ; then
+        die $LINENO "Could not determine config files for $1."
+    fi
+    echo "$opts"
+}
+
+# configure_neutron()
+# Set common config for all neutron server and agents.
+function configure_neutron {
+    _configure_neutron_common
     iniset_rpc_backend neutron $NEUTRON_CONF
 
-    # Neutron API server & Neutron plugin
-    if is_service_enabled neutron-api; then
-        local policy_file=$NEUTRON_CONF_DIR/policy.json
-        # Allow neutron user to administer neutron to match neutron account
-        # NOTE(amotoki): This is required for nova works correctly with neutron.
-        if [ -f $NEUTRON_DIR/etc/policy.json ]; then
-            cp $NEUTRON_DIR/etc/policy.json $policy_file
-            sed -i 's/"context_is_admin":  "role:admin"/"context_is_admin":  "role:admin or user_name:neutron"/g' $policy_file
-        else
-            echo '{"context_is_admin":  "role:admin or user_name:neutron"}' > $policy_file
-        fi
-
-        cp $NEUTRON_DIR/etc/api-paste.ini $NEUTRON_CONF_DIR/api-paste.ini
-
-        iniset $NEUTRON_CONF DEFAULT core_plugin $NEUTRON_CORE_PLUGIN
-
-        iniset $NEUTRON_CONF DEFAULT policy_file $policy_file
-        iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips True
-        iniset $NEUTRON_CONF DEFAULT router_distributed $NEUTRON_DISTRIBUTED_ROUTING
-
-        iniset $NEUTRON_CONF DEFAULT auth_strategy $NEUTRON_AUTH_STRATEGY
-        configure_keystone_authtoken_middleware $NEUTRON_CONF neutron
-        configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova
-
-        # Configure tenant network type
-        iniset $NEUTRON_CORE_PLUGIN_CONF ml2 tenant_network_types $NEUTRON_TENANT_NETWORK_TYPE
-
-        local mech_drivers="openvswitch"
-        if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then
-            mech_drivers+=",l2population"
-        else
-            mech_drivers+=",linuxbridge"
-        fi
-        iniset $NEUTRON_CORE_PLUGIN_CONF ml2 mechanism_drivers $mech_drivers
-
-        iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vxlan vni_ranges 1001:2000
-        iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_flat flat_networks $PUBLIC_NETWORK_NAME
-        if [[ "$NEUTRON_TENANT_NETWORK_TYPE" =~ "vlan" ]] && [[ "$NEUTRON_PHYSICAL_NETWORK" != "" ]]; then
-            iniset $NEUTRON_CORE_PLUGIN_CONF ml2_type_vlan network_vlan_ranges ${NEUTRON_PHYSICAL_NETWORK}:${NEUTRON_TENANT_VLAN_RANGE}
-        fi
-        if [[ "$NEUTRON_PORT_SECURITY" = "True" ]]; then
-            neutron_ml2_extension_driver_add port_security
-        fi
-        configure_rbac_policies
+    if is_service_enabled q-metering neutron-metering; then
+        _configure_neutron_metering
+    fi
+    if is_service_enabled q-agt neutron-agent; then
+        _configure_neutron_plugin_agent
+    fi
+    if is_service_enabled q-dhcp neutron-dhcp; then
+        _configure_neutron_dhcp_agent
+    fi
+    if is_service_enabled q-l3 neutron-l3; then
+        _configure_neutron_l3_agent
+    fi
+    if is_service_enabled q-meta neutron-metadata-agent; then
+        _configure_neutron_metadata_agent
     fi
 
-    # Neutron OVS or LB agent
-    if is_service_enabled neutron-agent; then
-        iniset $NEUTRON_CORE_PLUGIN_CONF agent tunnel_types vxlan
-        iniset $NEUTRON_CORE_PLUGIN_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-        configure_root_helper_options $NEUTRON_CORE_PLUGIN_CONF
+    if [[ "$Q_DVR_MODE" != "legacy" ]]; then
+        _configure_dvr
+    fi
+    if is_service_enabled ceilometer; then
+        _configure_neutron_ceilometer_notifications
+    fi
 
-        # Configure the neutron agent
-        if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then
-            iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver iptables
-            iniset $NEUTRON_CORE_PLUGIN_CONF vxlan local_ip $HOST_IP
-        elif [[ $NEUTRON_AGENT == "openvswitch" ]]; then
-            iniset $NEUTRON_CORE_PLUGIN_CONF securitygroup firewall_driver openvswitch
-            iniset $NEUTRON_CORE_PLUGIN_CONF ovs local_ip $HOST_IP
+    if [[ $Q_AGENT == "ovn" ]]; then
+        configure_ovn
+        configure_ovn_plugin
+    fi
 
-            if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then
-                iniset $NEUTRON_CORE_PLUGIN_CONF agent l2_population True
-                iniset $NEUTRON_CORE_PLUGIN_CONF agent enable_distributed_routing True
-                iniset $NEUTRON_CORE_PLUGIN_CONF agent arp_responder True
-            fi
-        fi
-
-        if ! running_in_container; then
-            enable_kernel_bridge_firewall
+    # Configure Neutron's advanced services
+    if is_service_enabled q-placement neutron-placement; then
+        configure_placement_extension
+    fi
+    if is_service_enabled q-trunk neutron-trunk; then
+        configure_trunk_extension
+    fi
+    if is_service_enabled q-qos neutron-qos; then
+        configure_qos
+        if is_service_enabled q-l3 neutron-l3; then
+            configure_l3_agent_extension_fip_qos
+            configure_l3_agent_extension_gateway_ip_qos
         fi
     fi
-
-    # DHCP Agent
-    if is_service_enabled neutron-dhcp; then
-        cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $NEUTRON_DHCP_CONF
-
-        iniset $NEUTRON_DHCP_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-        # make it so we have working DNS from guests
-        iniset $NEUTRON_DHCP_CONF DEFAULT dnsmasq_local_resolv True
-
-        configure_root_helper_options $NEUTRON_DHCP_CONF
-        iniset $NEUTRON_DHCP_CONF DEFAULT interface_driver $NEUTRON_AGENT
-        neutron_plugin_configure_dhcp_agent $NEUTRON_DHCP_CONF
+    if is_service_enabled neutron-segments; then
+        configure_placement_neutron
+        configure_segments_extension
     fi
 
-    if is_service_enabled neutron-l3; then
-        cp $NEUTRON_DIR/etc/l3_agent.ini.sample $NEUTRON_L3_CONF
-        iniset $NEUTRON_L3_CONF DEFAULT interface_driver $NEUTRON_AGENT
-        neutron_service_plugin_class_add router
-        configure_root_helper_options $NEUTRON_L3_CONF
-        iniset $NEUTRON_L3_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-        neutron_plugin_configure_l3_agent $NEUTRON_L3_CONF
-
-        # Configure the neutron agent to serve external network ports
-        if [[ $NEUTRON_AGENT == "linuxbridge" ]]; then
-            iniset $NEUTRON_CORE_PLUGIN_CONF linux_bridge bridge_mappings "$PUBLIC_NETWORK_NAME:$PUBLIC_BRIDGE"
-        else
-            iniset $NEUTRON_CORE_PLUGIN_CONF ovs bridge_mappings "$PUBLIC_NETWORK_NAME:$PUBLIC_BRIDGE"
-        fi
-
-        if [[ "$NEUTRON_DISTRIBUTED_ROUTING" = "True" ]]; then
-            iniset $NEUTRON_L3_CONF DEFAULT agent_mode $NEUTRON_DVR_MODE
-        fi
+    # Finally configure Neutron server and core plugin
+    if is_service_enabled q-agt neutron-agent q-svc neutron-api; then
+        _configure_neutron_service
     fi
 
-    # Metadata
-    if is_service_enabled neutron-metadata-agent; then
-        cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $NEUTRON_META_CONF
+    iniset $NEUTRON_CONF DEFAULT api_workers "$API_WORKERS"
+    # devstack is not a tool for running uber scale OpenStack
+    # clouds, therefore running without a dedicated RPC worker
+    # for state reports is more than adequate.
+    iniset $NEUTRON_CONF DEFAULT rpc_state_report_workers 0
 
-        iniset $NEUTRON_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-        iniset $NEUTRON_META_CONF DEFAULT nova_metadata_host $NEUTRON_META_DATA_HOST
-        iniset $NEUTRON_META_CONF DEFAULT metadata_workers $API_WORKERS
-        # TODO(ihrachys) do we really need to set rootwrap for metadata agent?
-        configure_root_helper_options $NEUTRON_META_CONF
-
-        # TODO(dtroyer): remove the v2.0 hard code below
-        iniset $NEUTRON_META_CONF DEFAULT auth_url $KEYSTONE_SERVICE_URI
-        configure_keystone_authtoken_middleware $NEUTRON_META_CONF neutron DEFAULT
-    fi
-
-    # Format logging
-    setup_logging $NEUTRON_CONF
-
-    if is_service_enabled tls-proxy && [ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" ]; then
-        # Set the service port for a proxy to take the original
-        iniset $NEUTRON_CONF DEFAULT bind_port "$NEUTRON_SERVICE_PORT_INT"
-        iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True
-    fi
-
-    # Metering
-    if is_service_enabled neutron-metering; then
-        cp $NEUTRON_DIR/etc/metering_agent.ini.sample $NEUTRON_METERING_AGENT_CONF
-        neutron_service_plugin_class_add metering
+    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+        write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_UWSGI" "/networking" "" "neutron-api"
     fi
 }
 
-# configure_neutron_rootwrap() - configure Neutron's rootwrap
-function configure_neutron_rootwrap {
-    # Deploy new rootwrap filters files (owned by root).
-    # Wipe any existing rootwrap.d files first
-    if [[ -d $NEUTRON_CONF_DIR/rootwrap.d ]]; then
-        sudo rm -rf $NEUTRON_CONF_DIR/rootwrap.d
+function configure_neutron_nova {
+    create_nova_conf_neutron $NOVA_CONF
+    if [[ "${CELLSV2_SETUP}" == "superconductor" ]]; then
+        for i in $(seq 1 $NOVA_NUM_CELLS); do
+            local conf
+            conf=$(conductor_conf $i)
+            create_nova_conf_neutron $conf
+        done
     fi
-
-    # Deploy filters to /etc/neutron/rootwrap.d
-    sudo install -d -o root -g root -m 755 $NEUTRON_CONF_DIR/rootwrap.d
-    sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/neutron/rootwrap.d/*.filters $NEUTRON_CONF_DIR/rootwrap.d
-
-    # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d``
-    sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $NEUTRON_CONF_DIR
-    sudo sed -e "s:^filters_path=.*$:filters_path=$NEUTRON_CONF_DIR/rootwrap.d:" -i $NEUTRON_CONF_DIR/rootwrap.conf
-
-    # Set up the rootwrap sudoers for Neutron
-    tempfile=`mktemp`
-    echo "$STACK_USER ALL=(root) NOPASSWD: $NEUTRON_ROOTWRAP_CMD *" >$tempfile
-    echo "$STACK_USER ALL=(root) NOPASSWD: $NEUTRON_ROOTWRAP_DAEMON_CMD" >>$tempfile
-    chmod 0440 $tempfile
-    sudo chown root:root $tempfile
-    sudo mv $tempfile /etc/sudoers.d/neutron-rootwrap
 }
 
-# Make Neutron-required changes to nova.conf
-# Takes a single optional argument which is the config file to update,
-# if not passed $NOVA_CONF is used.
-function configure_neutron_nova_new {
-    deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
+function create_nova_conf_neutron {
     local conf=${1:-$NOVA_CONF}
     iniset $conf neutron auth_type "password"
     iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI"
-    iniset $conf neutron username neutron
+    iniset $conf neutron username nova
     iniset $conf neutron password "$SERVICE_PASSWORD"
-    iniset $conf neutron user_domain_name "Default"
-    iniset $conf neutron project_name "$SERVICE_TENANT_NAME"
-    iniset $conf neutron project_domain_name "Default"
-    iniset $conf neutron auth_strategy $NEUTRON_AUTH_STRATEGY
+    iniset $conf neutron user_domain_name "$SERVICE_DOMAIN_NAME"
+    iniset $conf neutron project_name "$SERVICE_PROJECT_NAME"
+    iniset $conf neutron project_domain_name "$SERVICE_DOMAIN_NAME"
+    iniset $conf neutron auth_strategy "$Q_AUTH_STRATEGY"
     iniset $conf neutron region_name "$REGION_NAME"
 
     # optionally set options in nova_conf
     neutron_plugin_create_nova_conf $conf
 
-    if is_service_enabled neutron-metadata-agent; then
+    if is_service_enabled q-meta neutron-metadata-agent; then
         iniset $conf neutron service_metadata_proxy "True"
     fi
 
+    iniset $conf DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL"
+    iniset $conf DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT"
 }
 
+# create_neutron_accounts() - Set up common required neutron accounts
+
 # Tenant               User       Roles
 # ------------------------------------------------------------------
 # service              neutron    admin        # if enabled
 
-# create_neutron_accounts() - Create required service accounts
-function create_neutron_accounts_new {
-    deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
+# Migrated from keystone_data.sh
+function create_neutron_accounts {
     local neutron_url
-
     if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
-        neutron_url=$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST/networking/
+        neutron_url=$Q_PROTOCOL://$SERVICE_HOST/
     else
-        neutron_url=$NEUTRON_SERVICE_PROTOCOL://$NEUTRON_SERVICE_HOST:$NEUTRON_SERVICE_PORT/
+        neutron_url=$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/
+    fi
+    if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then
+        neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME
     fi
 
-
-    if [[ "$ENABLED_SERVICES" =~ "neutron-api" ]]; then
+    if is_service_enabled q-svc neutron-api; then
 
         create_service_user "neutron"
 
-        neutron_service=$(get_or_create_service "neutron" \
-            "network" "Neutron Service")
-        get_or_create_endpoint $neutron_service \
+        get_or_create_service "neutron" "network" "Neutron Service"
+        get_or_create_endpoint \
+            "network" \
             "$REGION_NAME" "$neutron_url"
     fi
 }
 
 # init_neutron() - Initialize databases, etc.
-function init_neutron_new {
-
-    deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
-    recreate_database neutron
-
+function init_neutron {
+    recreate_database $Q_DB_NAME
     time_start "dbsync"
     # Run Neutron db migrations
-    $NEUTRON_BIN_DIR/neutron-db-manage upgrade heads
+    $NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head
     time_stop "dbsync"
 }
 
 # install_neutron() - Collect source and prepare
-function install_neutron_new {
-    deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
-    git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH
-    setup_develop $NEUTRON_DIR
-
+function install_neutron {
     # Install neutron-lib from git so we make sure we're testing
     # the latest code.
     if use_library_from_git "neutron-lib"; then
@@ -434,17 +552,23 @@
         setup_dev_lib "neutron-lib"
     fi
 
-    # L3 service requires radvd
-    if is_service_enabled neutron-l3; then
-        install_package radvd
+    # Install SQLAlchemy and alembic from git when these are required
+    # see https://bugs.launchpad.net/neutron/+bug/2042941
+    if use_library_from_git "sqlalchemy"; then
+        git_clone $SQLALCHEMY_REPO $SQLALCHEMY_DIR $SQLALCHEMY_BRANCH
+        setup_develop $SQLALCHEMY_DIR
+    fi
+    if use_library_from_git "alembic"; then
+        git_clone $ALEMBIC_REPO $ALEMBIC_DIR $ALEMBIC_BRANCH
+        setup_develop $ALEMBIC_DIR
     fi
 
-    if is_service_enabled neutron-agent neutron-dhcp neutron-l3; then
-        #TODO(sc68cal) - kind of ugly
-        source $TOP_DIR/lib/neutron_plugins/${NEUTRON_AGENT}_agent
-        neutron_plugin_install_agent_packages
-    fi
+    git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH
+    setup_develop $NEUTRON_DIR
 
+    if [[ $Q_AGENT == "ovn" ]]; then
+        install_ovn
+    fi
 }
 
 # install_neutronclient() - Collect source and prepare
@@ -452,187 +576,33 @@
     if use_library_from_git "python-neutronclient"; then
         git_clone_by_name "python-neutronclient"
         setup_dev_lib "python-neutronclient"
-        sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-neutronclient"]}/tools/,/etc/bash_completion.d/}neutron.bash_completion
     fi
 }
 
-# start_neutron_api() - Start the API process ahead of other things
-function start_neutron_api {
-    local service_port=$NEUTRON_SERVICE_PORT
-    local service_protocol=$NEUTRON_SERVICE_PROTOCOL
-    local neutron_url
-    if is_service_enabled tls-proxy; then
-        service_port=$NEUTRON_SERVICE_PORT_INT
-        service_protocol="http"
+# install_neutron_agent_packages() - Collect source and prepare
+function install_neutron_agent_packages {
+    # radvd doesn't come with the OS. Install it if the l3 service is enabled.
+    if is_service_enabled q-l3 neutron-l3; then
+        install_package radvd
     fi
-
-    local opts=""
-    opts+=" --config-file $NEUTRON_CONF"
-    opts+=" --config-file $NEUTRON_CORE_PLUGIN_CONF"
-    local cfg_file
-    for cfg_file in ${_NEUTRON_SERVER_EXTRA_CONF_FILES_ABS[@]}; do
-        opts+=" --config-file $cfg_file"
-    done
-
-    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
-        run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF"
-        neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST/networking/
-        enable_service neutron-rpc-server
-        run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $opts"
-    else
-        # Start the Neutron service
-        # TODO(sc68cal) Stop hard coding this
-        run_process neutron-api "$NEUTRON_BIN_DIR/neutron-server $opts"
-        neutron_url=$service_protocol://$NEUTRON_SERVICE_HOST:$service_port
-        # Start proxy if enabled
-        if is_service_enabled tls-proxy; then
-            start_tls_proxy neutron '*' $NEUTRON_SERVICE_PORT $NEUTRON_SERVICE_HOST $NEUTRON_SERVICE_PORT_INT
-        fi
-    fi
-
-    if ! wait_for_service $SERVICE_TIMEOUT $neutron_url; then
-        die $LINENO "neutron-api did not start"
+    # install packages that are specific to plugin agent(s)
+    if is_service_enabled q-agt neutron-agent q-dhcp neutron-dhcp q-l3 neutron-l3; then
+        neutron_plugin_install_agent_packages
     fi
 }
 
-# start_neutron() - Start running processes
-function start_neutron_new {
-    deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
-    # Start up the neutron agents if enabled
-    # TODO(sc68cal) Make this pluggable so different DevStack plugins for different Neutron plugins
-    # can resolve the $NEUTRON_AGENT_BINARY
-    if is_service_enabled neutron-agent; then
-        # TODO(ihrachys) stop loading ml2_conf.ini into agents, instead load agent specific files
-        run_process neutron-agent "$NEUTRON_BIN_DIR/$NEUTRON_AGENT_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_CORE_PLUGIN_CONF"
+# Finish neutron configuration
+function configure_neutron_after_post_config {
+    if [[ $Q_SERVICE_PLUGIN_CLASSES != '' ]]; then
+        iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES
     fi
-    if is_service_enabled neutron-dhcp; then
-        neutron_plugin_configure_dhcp_agent $NEUTRON_DHCP_CONF
-        run_process neutron-dhcp "$NEUTRON_BIN_DIR/$NEUTRON_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_DHCP_CONF"
-    fi
-    if is_service_enabled neutron-l3; then
-        run_process neutron-l3 "$NEUTRON_BIN_DIR/$NEUTRON_L3_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_L3_CONF"
-    fi
-    if is_service_enabled neutron-api && [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" == "True" ]]; then
-        # XXX(sc68cal) - Here's where plugins can wire up their own networks instead
-        # of the code in lib/neutron_plugins/services/l3
-        if type -p neutron_plugin_create_initial_networks > /dev/null; then
-            neutron_plugin_create_initial_networks
-        else
-            # XXX(sc68cal) Load up the built in Neutron networking code and build a topology
-            source $TOP_DIR/lib/neutron_plugins/services/l3
-            # Create the networks using servic
-            create_neutron_initial_network
-        fi
-    fi
-    if is_service_enabled neutron-metadata-agent; then
-        run_process neutron-metadata-agent "$NEUTRON_BIN_DIR/$NEUTRON_META_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_META_CONF"
-    fi
-
-    if is_service_enabled neutron-metering; then
-        run_process neutron-metering "$NEUTRON_BIN_DIR/$NEUTRON_METERING_BINARY --config-file $NEUTRON_CONF --config-file $NEUTRON_METERING_AGENT_CONF"
-    fi
-}
-
-# stop_neutron() - Stop running processes
-function stop_neutron_new {
-    deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
-    for serv in neutron-api neutron-agent neutron-l3; do
-        stop_process $serv
-    done
-
-    if is_service_enabled neutron-rpc-server; then
-        stop_process neutron-rpc-server
-    fi
-
-    if is_service_enabled neutron-dhcp; then
-        stop_process neutron-dhcp
-        pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }')
-        [ ! -z "$pid" ] && sudo kill -9 $pid
-    fi
-
-    if is_service_enabled neutron-metadata-agent; then
-        sudo pkill -9 -f neutron-ns-metadata-proxy || :
-        stop_process neutron-metadata-agent
-    fi
-}
-
-# neutron_service_plugin_class_add() - add service plugin class
-function neutron_service_plugin_class_add_new {
-    deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
-    local service_plugin_class=$1
-    local plugins=""
-
-    plugins=$(iniget $NEUTRON_CONF DEFAULT service_plugins)
-    if [ $plugins ]; then
-        plugins+=","
-    fi
-    plugins+="${service_plugin_class}"
-    iniset $NEUTRON_CONF DEFAULT service_plugins $plugins
-}
-
-function _neutron_ml2_extension_driver_add {
-    local driver=$1
-    local drivers=""
-
-    drivers=$(iniget $NEUTRON_CORE_PLUGIN_CONF ml2 extension_drivers)
-    if [ $drivers ]; then
-        drivers+=","
-    fi
-    drivers+="${driver}"
-    iniset $NEUTRON_CORE_PLUGIN_CONF ml2 extension_drivers $drivers
-}
-
-function neutron_server_config_add_new {
-    deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
-    _NEUTRON_SERVER_EXTRA_CONF_FILES_ABS+=($1)
-}
-
-# neutron_deploy_rootwrap_filters() - deploy rootwrap filters
-function neutron_deploy_rootwrap_filters_new {
-    deprecated "Using lib/neutron is deprecated, and it will be removed in AA release!"
-    local srcdir=$1
-    sudo install -d -o root -g root -m 755 $NEUTRON_CONF_DIR/rootwrap.d
-    sudo install -o root -g root -m 644 $srcdir/etc/neutron/rootwrap.d/*.filters $NEUTRON_CONF_DIR/rootwrap.d
-}
-
-# Dispatch functions
-# These are needed for compatibility between the old and new implementations
-# where there are function name overlaps.  These will be removed when
-# neutron-legacy is removed.
-# TODO(sc68cal) Remove when neutron-legacy is no more.
-function cleanup_neutron {
-    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
-        stop_process neutron-api
-        stop_process neutron-rpc-server
-        remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api"
-        sudo rm -f $(apache_site_config_for neutron-api)
-    fi
-
-    if is_neutron_legacy_enabled; then
-        # Call back to old function
-        cleanup_mutnauq "$@"
-    else
-        cleanup_neutron_new "$@"
-    fi
-}
-
-function configure_neutron {
-    if is_neutron_legacy_enabled; then
-        # Call back to old function
-        configure_mutnauq "$@"
-    else
-        configure_neutron_new "$@"
-    fi
-
-    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
-        write_uwsgi_config "$NEUTRON_UWSGI_CONF" "$NEUTRON_BIN_DIR/neutron-api" "/networking"
-    fi
+    configure_rbac_policies
 }
 
 # configure_rbac_policies() - Configure Neutron to enforce new RBAC
 # policies and scopes if NEUTRON_ENFORCE_SCOPE == True
 function configure_rbac_policies {
-    if [ "$NEUTRON_ENFORCE_SCOPE" == "True" ]; then
+    if [[ "$NEUTRON_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == True ]]; then
         iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True
         iniset $NEUTRON_CONF oslo_policy enforce_scope True
     else
@@ -641,120 +611,606 @@
     fi
 }
 
-
-function configure_neutron_nova {
-    if is_neutron_legacy_enabled; then
-        # Call back to old function
-        create_nova_conf_neutron $NOVA_CONF
-        if [[ "${CELLSV2_SETUP}" == "superconductor" ]]; then
-            for i in $(seq 1 $NOVA_NUM_CELLS); do
-                local conf
-                conf=$(conductor_conf $i)
-                create_nova_conf_neutron $conf
-            done
+# Start running OVN processes
+function start_ovn_services {
+    if [[ $Q_AGENT == "ovn" ]]; then
+        if [ "$VIRT_DRIVER" != 'ironic' ]; then
+            # NOTE(TheJulia): Ironic's devstack plugin needs to perform
+            # additional networking configuration to setup a working test
+            # environment with test virtual machines to emulate baremetal,
+            # which requires OVN to be up and running earlier to complete
+            # that base configuration.
+            init_ovn
+            start_ovn
         fi
-    else
-        configure_neutron_nova_new $NOVA_CONF
-        if [[ "${CELLSV2_SETUP}" == "superconductor" ]]; then
-            for i in $(seq 1 $NOVA_NUM_CELLS); do
-                local conf
-                conf=$(conductor_conf $i)
-                configure_neutron_nova_new $conf
-            done
+        if [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]]; then
+            if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" != "True" ]]; then
+                echo "OVN_L3_CREATE_PUBLIC_NETWORK=True is being ignored "
+                echo "because NEUTRON_CREATE_INITIAL_NETWORKS is set to False"
+            else
+                create_public_bridge
+            fi
         fi
     fi
 }
 
-function create_neutron_accounts {
-    if is_neutron_legacy_enabled; then
-        # Call back to old function
-        create_mutnauq_accounts "$@"
-    else
-        create_neutron_accounts_new "$@"
-    fi
-}
+# Start running processes
+function start_neutron_service_and_check {
+    local service_port=$Q_PORT
+    local service_protocol=$Q_PROTOCOL
+    local cfg_file_options
+    local neutron_url
 
-function init_neutron {
-    if is_neutron_legacy_enabled; then
-        # Call back to old function
-        init_mutnauq "$@"
-    else
-        init_neutron_new "$@"
-    fi
-}
+    cfg_file_options="$(determine_config_files neutron-server)"
 
-function install_neutron {
-    if is_neutron_legacy_enabled; then
-        # Call back to old function
-        install_mutnauq "$@"
-    else
-        install_neutron_new "$@"
+    if is_service_enabled tls-proxy; then
+        service_port=$Q_PORT_INT
+        service_protocol="http"
     fi
-}
 
-function neutron_service_plugin_class_add {
-    if is_neutron_legacy_enabled; then
-        # Call back to old function
-        _neutron_service_plugin_class_add "$@"
-    else
-        neutron_service_plugin_class_add_new "$@"
-    fi
-}
+    # Start the Neutron service
+    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+        # The default value of "rpc_workers" is None (not defined). If
+        # "rpc_workers" is explicitly set to 0, the RPC workers process
+        # should not be executed.
+        local rpc_workers
+        rpc_workers=$(iniget_multiline $NEUTRON_CONF DEFAULT rpc_workers)
 
-function neutron_ml2_extension_driver_add {
-    if is_neutron_legacy_enabled; then
-        # Call back to old function
-        _neutron_ml2_extension_driver_add_old "$@"
+        enable_service neutron-api
+        run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF"
+        neutron_url=$Q_PROTOCOL://$Q_HOST/
+        if [ "$rpc_workers" != "0" ]; then
+            enable_service neutron-rpc-server
+        fi
+        enable_service neutron-periodic-workers
+        _enable_ovn_maintenance
+        if [ "$rpc_workers" != "0" ]; then
+            run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options"
+        fi
+        run_process neutron-periodic-workers "$NEUTRON_BIN_DIR/neutron-periodic-workers $cfg_file_options"
+        _run_ovn_maintenance
     else
-        _neutron_ml2_extension_driver_add "$@"
+        run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options"
+        neutron_url=$service_protocol://$Q_HOST:$service_port/
+        # Start proxy if enabled
+        if is_service_enabled tls-proxy; then
+            start_tls_proxy neutron '*' $Q_PORT $Q_HOST $Q_PORT_INT
+        fi
     fi
-}
+    if [ ! -z "$NEUTRON_ENDPOINT_SERVICE_NAME" ]; then
+        neutron_url=$neutron_url$NEUTRON_ENDPOINT_SERVICE_NAME
+    fi
+    echo "Waiting for Neutron to start..."
 
-function install_neutron_agent_packages {
-    if is_neutron_legacy_enabled; then
-        # Call back to old function
-        install_neutron_agent_packages_mutnauq "$@"
-    else
-        :
-    fi
-}
-
-function neutron_server_config_add {
-    if is_neutron_legacy_enabled; then
-        # Call back to old function
-        mutnauq_server_config_add "$@"
-    else
-        neutron_server_config_add_new "$@"
-    fi
+    local testcmd="wget ${ssl_ca} --no-proxy -q -O- $neutron_url"
+    test_with_retry "$testcmd" "Neutron did not start" $SERVICE_TIMEOUT
 }
 
 function start_neutron {
-    if is_neutron_legacy_enabled; then
-        # Call back to old function
-        start_mutnauq_l2_agent "$@"
-        start_mutnauq_other_agents "$@"
-    else
-        start_neutron_new "$@"
+    start_l2_agent "$@"
+    start_other_agents "$@"
+}
+
+# Control of the l2 agent is separated out to make it easier to test partial
+# upgrades (everything upgraded except the L2 agent)
+function start_l2_agent {
+    run_process q-agt "$AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE"
+
+    if is_provider_network && [[ $Q_AGENT == "openvswitch" ]]; then
+        sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE
+        sudo ip link set $OVS_PHYSICAL_BRIDGE up
+        sudo ip link set br-int up
+        sudo ip link set $PUBLIC_INTERFACE up
+        if is_ironic_hardware; then
+            for IP in $(ip addr show dev $PUBLIC_INTERFACE | grep ' inet ' | awk '{print $2}'); do
+                sudo ip addr del $IP dev $PUBLIC_INTERFACE
+                sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE
+            done
+            sudo ip route replace $FIXED_RANGE via $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE
+        fi
     fi
 }
 
+function start_other_agents {
+    run_process q-dhcp "$AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $Q_DHCP_CONF_FILE"
+
+    run_process q-l3 "$AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)"
+
+    run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file $Q_META_CONF_FILE"
+    run_process q-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME"
+}
+
+# Start running processes, including screen
+function start_neutron_agents {
+    # NOTE(slaweq): it's now just a wrapper for start_neutron function
+    start_neutron "$@"
+}
+
+function stop_l2_agent {
+    stop_process q-agt
+}
+
+# stop_other() - Stop running processes
+function stop_other {
+    if is_service_enabled q-dhcp neutron-dhcp; then
+        stop_process q-dhcp
+        pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }')
+        [ ! -z "$pid" ] && sudo kill -9 $pid
+    fi
+
+    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+        stop_process neutron-rpc-server
+        stop_process neutron-periodic-workers
+        stop_process neutron-api
+        _stop_ovn_maintenance
+    else
+        stop_process q-svc
+    fi
+
+    if is_service_enabled q-l3 neutron-l3; then
+        sudo pkill -f "radvd -C $DATA_DIR/neutron/ra"
+        stop_process q-l3
+    fi
+
+    if is_service_enabled q-meta neutron-metadata-agent; then
+        stop_process q-meta
+    fi
+
+    if is_service_enabled q-metering neutron-metering; then
+        neutron_metering_stop
+    fi
+
+    if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
+        # pkill takes care not to kill itself, but it may kill its parent
+        # sudo unless we use the "ps | grep [f]oo" trick
+        sudo pkill -9 -f "$NEUTRON_ROOTWRAP-[d]aemon" || :
+    fi
+}
+
+# stop_neutron() - Stop running processes (non-screen)
 function stop_neutron {
-    if is_neutron_legacy_enabled; then
-        # Call back to old function
-        stop_mutnauq "$@"
-    else
-        stop_neutron_new "$@"
+    stop_other
+    stop_l2_agent
+
+    if [[ $Q_AGENT == "ovn" && $SKIP_STOP_OVN != "True" ]]; then
+        stop_ovn
     fi
 }
 
-function neutron_deploy_rootwrap_filters {
-    if is_neutron_legacy_enabled; then
-        # Call back to old function
-        _neutron_deploy_rootwrap_filters "$@"
+# _move_neutron_addresses_route() - Move the primary IP to the OVS bridge
+# on startup, or back to the public interface on cleanup. If no IP is
+# configured on the interface, just add it as a port to the OVS bridge.
+function _move_neutron_addresses_route {
+    local from_intf=$1
+    local to_intf=$2
+    local add_ovs_port=$3
+    local del_ovs_port=$4
+    local af=$5
+
+    if [[ -n "$from_intf" && -n "$to_intf" ]]; then
+        # Remove the primary IP address from $from_intf and add it to $to_intf,
+        # along with the default route, if it exists.  Also, when called
+        # on configure we will also add $from_intf as a port on $to_intf,
+        # assuming it is an OVS bridge.
+
+        local IP_REPLACE=""
+        local IP_DEL=""
+        local IP_UP=""
+        local DEFAULT_ROUTE_GW
+        DEFAULT_ROUTE_GW=$(ip -f $af r | awk "/default.+$from_intf\s/ { print \$3; exit }")
+        local ADD_OVS_PORT=""
+        local DEL_OVS_PORT=""
+        local ARP_CMD=""
+
+        IP_BRD=$(ip -f $af a s dev $from_intf scope global primary | grep inet | awk '{ print $2, $3, $4; exit }')
+
+        if [ "$DEFAULT_ROUTE_GW" != "" ]; then
+            ADD_DEFAULT_ROUTE="sudo ip -f $af r replace default via $DEFAULT_ROUTE_GW dev $to_intf"
+        fi
+
+        if [[ "$add_ovs_port" == "True" ]]; then
+            ADD_OVS_PORT="sudo ovs-vsctl --may-exist add-port $to_intf $from_intf"
+        fi
+
+        if [[ "$del_ovs_port" == "True" ]]; then
+            DEL_OVS_PORT="sudo ovs-vsctl --if-exists del-port $from_intf $to_intf"
+        fi
+
+        if [[ "$IP_BRD" != "" ]]; then
+            IP_DEL="sudo ip addr del $IP_BRD dev $from_intf"
+            IP_REPLACE="sudo ip addr replace $IP_BRD dev $to_intf"
+            IP_UP="sudo ip link set $to_intf up"
+            if [[ "$af" == "inet" ]]; then
+                IP=$(echo $IP_BRD | awk '{ print $1; exit }' | grep -o -E '(.*)/' | cut -d "/" -f1)
+                ARP_CMD="sudo arping -A -c 3 -w 5 -I $to_intf $IP "
+            fi
+        fi
+
+        # The add/del OVS port calls have to happen either before or
+        # after the address is moved in order to not leave it orphaned.
+        $DEL_OVS_PORT; $IP_DEL; $IP_REPLACE; $IP_UP; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE; $ARP_CMD
+    fi
+}
+
+# _configure_public_network_connectivity() - Configures connectivity to the
+# external network using $PUBLIC_INTERFACE or NAT on the single interface
+# machines
+function _configure_public_network_connectivity {
+    # If we've given a PUBLIC_INTERFACE to take over, then we assume
+    # that we can own the whole thing, and privot it into the OVS
+    # bridge. If we are not, we're probably on a single interface
+    # machine, and we just setup NAT so that fixed guests can get out.
+    if [[ -n "$PUBLIC_INTERFACE" ]]; then
+        _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet"
+
+        if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then
+            _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6"
+        fi
     else
-        neutron_deploy_rootwrap_filters_new "$@"
+        for d in $default_v4_route_devs; do
+            sudo iptables -t nat -A POSTROUTING -o $d -s $FLOATING_RANGE -j MASQUERADE
+        done
+    fi
+}
+
+# cleanup_neutron() - Remove residual data files, anything left over from previous
+# runs that a clean run would need to clean up
+function cleanup_neutron {
+    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
+        stop_process neutron-api
+        stop_process neutron-rpc-server
+        stop_process neutron-periodic-workers
+        _stop_ovn_maintenance
+        remove_uwsgi_config "$NEUTRON_UWSGI_CONF" "neutron-api"
+        sudo rm -f $(apache_site_config_for neutron-api)
+    fi
+
+    if [[ -n "$OVS_PHYSICAL_BRIDGE" ]]; then
+        _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False True "inet"
+
+        if [[ $(ip -f inet6 a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then
+            # ip(8) wants the prefix length when deleting
+            local v6_gateway
+            v6_gateway=$(ip -6 a s dev $OVS_PHYSICAL_BRIDGE | grep $IPV6_PUBLIC_NETWORK_GATEWAY | awk '{ print $2 }')
+            sudo ip -6 addr del $v6_gateway dev $OVS_PHYSICAL_BRIDGE
+            _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False False "inet6"
+        fi
+
+        if is_provider_network && is_ironic_hardware; then
+            for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do
+                sudo ip addr del $IP dev $OVS_PHYSICAL_BRIDGE
+                sudo ip addr add $IP dev $PUBLIC_INTERFACE
+            done
+            sudo route del -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE
+        fi
+    fi
+
+    if is_neutron_ovs_base_plugin; then
+        neutron_ovs_base_cleanup
+    fi
+
+    if [[ $Q_AGENT == "linuxbridge" ]]; then
+        neutron_lb_cleanup
+    fi
+
+    # delete all namespaces created by neutron
+    for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|fip|snat)-[0-9a-f-]*'); do
+        sudo ip netns delete ${ns}
+    done
+
+    if [[ $Q_AGENT == "ovn" ]]; then
+        cleanup_ovn
+    fi
+}
+
+
+function _create_neutron_conf_dir {
+    # Put config files in ``NEUTRON_CONF_DIR`` for everyone to find
+    sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR
+}
+
+# _configure_neutron_common()
+# Set common config for all neutron server and agents.
+# This MUST be called before other ``_configure_neutron_*`` functions.
+function _configure_neutron_common {
+    _create_neutron_conf_dir
+
+    # Uses oslo config generator to generate core sample configuration files
+    (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh)
+
+    cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF
+
+    Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json
+
+    # allow neutron user to administer neutron to match neutron account
+    # NOTE(amotoki): This is required for nova works correctly with neutron.
+    if [ -f $NEUTRON_DIR/etc/policy.json ]; then
+        cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE
+        sed -i 's/"context_is_admin":  "role:admin"/"context_is_admin":  "role:admin or user_name:neutron"/g' $Q_POLICY_FILE
+    else
+        echo '{"context_is_admin":  "role:admin or user_name:neutron"}' > $Q_POLICY_FILE
+    fi
+
+    # Set plugin-specific variables ``Q_DB_NAME``, ``Q_PLUGIN_CLASS``.
+    # For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``.
+    neutron_plugin_configure_common
+
+    if [[ "$Q_PLUGIN_CONF_PATH" == '' || "$Q_PLUGIN_CONF_FILENAME" == '' || "$Q_PLUGIN_CLASS" == '' ]]; then
+        die $LINENO "Neutron plugin not set.. exiting"
+    fi
+
+    # If needed, move config file from ``$NEUTRON_DIR/etc/neutron`` to ``NEUTRON_CONF_DIR``
+    mkdir -p /$Q_PLUGIN_CONF_PATH
+    Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME
+    # NOTE(slaweq): NEUTRON_CORE_PLUGIN_CONF is used e.g. in neutron repository,
+    # it was previously defined in the lib/neutron module which is now deleted.
+    NEUTRON_CORE_PLUGIN_CONF=$Q_PLUGIN_CONF_FILE
+    # NOTE(hichihara): Some neutron vendor plugins were already decomposed and
+    # there is no config file in Neutron tree. They should prepare the file in each plugin.
+    if [ -f "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" ]; then
+        cp "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" /$Q_PLUGIN_CONF_FILE
+    elif [ -f $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE ]; then
+        cp $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE
+    fi
+
+    iniset $NEUTRON_CONF database connection `database_connection_url $Q_DB_NAME`
+    iniset $NEUTRON_CONF DEFAULT state_path $DATA_DIR/neutron
+    iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG
+    iniset $NEUTRON_CONF DEFAULT bind_host $Q_LISTEN_ADDRESS
+    iniset $NEUTRON_CONF oslo_concurrency lock_path $DATA_DIR/neutron/lock
+
+    # NOTE(freerunner): Need to adjust Region Name for nova in multiregion installation
+    iniset $NEUTRON_CONF nova region_name $REGION_NAME
+
+    if [ "$VIRT_DRIVER" = 'fake' ]; then
+        # Disable arbitrary limits
+        iniset $NEUTRON_CONF quotas quota_network -1
+        iniset $NEUTRON_CONF quotas quota_subnet -1
+        iniset $NEUTRON_CONF quotas quota_port -1
+        iniset $NEUTRON_CONF quotas quota_security_group -1
+        iniset $NEUTRON_CONF quotas quota_security_group_rule -1
+    fi
+
+    # Format logging
+    setup_logging $NEUTRON_CONF
+
+    if is_service_enabled tls-proxy && [ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" ]; then
+        # Set the service port for a proxy to take the original
+        iniset $NEUTRON_CONF DEFAULT bind_port "$Q_PORT_INT"
+        iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True
+    fi
+
+    _neutron_setup_rootwrap
+}
+
+function _configure_neutron_dhcp_agent {
+
+    cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $Q_DHCP_CONF_FILE
+
+    iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+    # make it so we have working DNS from guests
+    iniset $Q_DHCP_CONF_FILE DEFAULT dnsmasq_local_resolv True
+    configure_root_helper_options $Q_DHCP_CONF_FILE
+
+    if ! is_service_enabled q-l3 neutron-l3; then
+        if [[ "$ENABLE_ISOLATED_METADATA" = "True" ]]; then
+            iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata $ENABLE_ISOLATED_METADATA
+            iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network $ENABLE_METADATA_NETWORK
+        else
+            if [[ "$ENABLE_METADATA_NETWORK" = "True" ]]; then
+                die "$LINENO" "Enable isolated metadata is a must for metadata network"
+            fi
+        fi
+    fi
+
+    _neutron_setup_interface_driver $Q_DHCP_CONF_FILE
+
+    neutron_plugin_configure_dhcp_agent $Q_DHCP_CONF_FILE
+}
+
+
+function _configure_neutron_metadata_agent {
+    cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $Q_META_CONF_FILE
+
+    iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+    iniset $Q_META_CONF_FILE DEFAULT nova_metadata_host $Q_META_DATA_IP
+    iniset $Q_META_CONF_FILE DEFAULT metadata_workers $API_WORKERS
+    configure_root_helper_options $Q_META_CONF_FILE
+}
+
+function _configure_neutron_ceilometer_notifications {
+    iniset $NEUTRON_CONF oslo_messaging_notifications driver messagingv2
+}
+
+function _configure_neutron_metering {
+    neutron_agent_metering_configure_common
+    neutron_agent_metering_configure_agent
+}
+
+function _configure_dvr {
+    iniset $NEUTRON_CONF DEFAULT router_distributed True
+    iniset $Q_L3_CONF_FILE DEFAULT agent_mode $Q_DVR_MODE
+}
+
+
+# _configure_neutron_plugin_agent() - Set config files for neutron plugin agent
+# It is called when q-agt is enabled.
+function _configure_neutron_plugin_agent {
+    # Specify the default root helper prior to agent configuration to
+    # ensure that an agent's configuration can override the default
+    configure_root_helper_options /$Q_PLUGIN_CONF_FILE
+    iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+
+    # Configure agent for plugin
+    neutron_plugin_configure_plugin_agent
+}
+
+function _replace_api_paste_composite {
+    local sep
+    sep=$(echo -ne "\x01")
+    # Replace it
+    $sudo sed -i -e "s/\/\: neutronversions_composite/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/\: neutronversions_composite/" "$Q_API_PASTE_FILE"
+    $sudo sed -i -e "s/\/healthcheck\: healthcheck/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/healthcheck\: healthcheck/" "$Q_API_PASTE_FILE"
+    $sudo sed -i -e "s/\/v2.0\: neutronapi_v2_0/\/"${NEUTRON_ENDPOINT_SERVICE_NAME}"\/v2.0\: neutronapi_v2_0/" "$Q_API_PASTE_FILE"
+}
+
+# _configure_neutron_service() - Set config files for neutron service
+# It is called when q-svc is enabled.
+function _configure_neutron_service {
+    Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini
+    cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE
+
+    if [[ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" && -n "$NEUTRON_ENDPOINT_SERVICE_NAME" ]]; then
+        _replace_api_paste_composite
+    fi
+
+    # Update either configuration file with plugin
+    iniset $NEUTRON_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS
+
+    iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+    iniset $NEUTRON_CONF oslo_policy policy_file $Q_POLICY_FILE
+
+    iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY
+    configure_keystone_authtoken_middleware $NEUTRON_CONF $Q_ADMIN_USERNAME
+
+    # Configuration for neutron notifications to nova.
+    iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES
+    iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES
+
+    configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova
+
+    # Configuration for placement client
+    configure_keystone_authtoken_middleware $NEUTRON_CONF placement placement
+
+    # Configure plugin
+    neutron_plugin_configure_service
+}
+
+# Utility Functions
+#------------------
+
+# neutron_service_plugin_class_add() - add service plugin class
+function neutron_service_plugin_class_add {
+    local service_plugin_class=$1
+    if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then
+        Q_SERVICE_PLUGIN_CLASSES=$service_plugin_class
+    elif [[ ! ,${Q_SERVICE_PLUGIN_CLASSES}, =~ ,${service_plugin_class}, ]]; then
+        Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$service_plugin_class"
+    fi
+}
+
+# neutron_ml2_extension_driver_add() - add ML2 extension driver
+function neutron_ml2_extension_driver_add {
+    local extension=$1
+    if [[ $Q_ML2_PLUGIN_EXT_DRIVERS == '' ]]; then
+        Q_ML2_PLUGIN_EXT_DRIVERS=$extension
+    elif [[ ! ,${Q_ML2_PLUGIN_EXT_DRIVERS}, =~ ,${extension}, ]]; then
+        Q_ML2_PLUGIN_EXT_DRIVERS="$Q_ML2_PLUGIN_EXT_DRIVERS,$extension"
+    fi
+}
+
+# neutron_server_config_add() - add server config file
+function neutron_server_config_add {
+    _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($1)
+}
+
+# neutron_deploy_rootwrap_filters() - deploy rootwrap filters to $Q_CONF_ROOTWRAP_D (owned by root).
+function neutron_deploy_rootwrap_filters {
+    if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then
+        return
+    fi
+    local srcdir=$1
+    sudo install -d -o root -m 755 $Q_CONF_ROOTWRAP_D
+    sudo install -o root -m 644 $srcdir/etc/neutron/rootwrap.d/* $Q_CONF_ROOTWRAP_D/
+}
+
+# _neutron_setup_rootwrap() - configure Neutron's rootwrap
+function _neutron_setup_rootwrap {
+    if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then
+        return
+    fi
+    # Wipe any existing ``rootwrap.d`` files first
+    Q_CONF_ROOTWRAP_D=$NEUTRON_CONF_DIR/rootwrap.d
+    if [[ -d $Q_CONF_ROOTWRAP_D ]]; then
+        sudo rm -rf $Q_CONF_ROOTWRAP_D
+    fi
+
+    neutron_deploy_rootwrap_filters $NEUTRON_DIR
+
+    # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d``
+    # location moved in newer versions, prefer new location
+    if test -r $NEUTRON_DIR/etc/neutron/rootwrap.conf; then
+        sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/neutron/rootwrap.conf $Q_RR_CONF_FILE
+    else
+        sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE
+    fi
+    sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE
+    # Rely on $PATH set by devstack to determine what is safe to execute
+    # by rootwrap rather than use explicit whitelist of paths in
+    # rootwrap.conf
+    sudo sed -e 's/^exec_dirs=.*/#&/' -i $Q_RR_CONF_FILE
+
+    # Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap
+    ROOTWRAP_SUDOER_CMD="$NEUTRON_ROOTWRAP $Q_RR_CONF_FILE *"
+    ROOTWRAP_DAEMON_SUDOER_CMD="$NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE"
+
+    # Set up the rootwrap sudoers for neutron
+    TEMPFILE=`mktemp`
+    echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE
+    echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_DAEMON_SUDOER_CMD" >>$TEMPFILE
+    chmod 0440 $TEMPFILE
+    sudo chown root:root $TEMPFILE
+    sudo mv $TEMPFILE /etc/sudoers.d/neutron-rootwrap
+
+    # Update the root_helper
+    configure_root_helper_options $NEUTRON_CONF
+}
+
+function configure_root_helper_options {
+    local conffile=$1
+    iniset $conffile agent root_helper "$Q_RR_COMMAND"
+    if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
+        iniset $conffile agent root_helper_daemon "$Q_RR_DAEMON_COMMAND"
+    fi
+}
+
+function _neutron_setup_interface_driver {
+
+    # ovs_use_veth needs to be set before the plugin configuration
+    # occurs to allow plugins to override the setting.
+    iniset $1 DEFAULT ovs_use_veth $Q_OVS_USE_VETH
+
+    neutron_plugin_setup_interface_driver $1
+}
+# Functions for Neutron Exercises
+#--------------------------------
+
+# ssh check
+function _ssh_check_neutron {
+    local from_net=$1
+    local key_file=$2
+    local ip=$3
+    local user=$4
+    local timeout_sec=$5
+    local probe_cmd = ""
+    probe_cmd=`_get_probe_cmd_prefix $from_net`
+    local testcmd="$probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success"
+    test_with_retry "$testcmd" "server $ip didn't become ssh-able" $timeout_sec
+}
+
+function plugin_agent_add_l2_agent_extension {
+    local l2_agent_extension=$1
+    if [[ -z "$L2_AGENT_EXTENSIONS" ]]; then
+        L2_AGENT_EXTENSIONS=$l2_agent_extension
+    elif [[ ! ,${L2_AGENT_EXTENSIONS}, =~ ,${l2_agent_extension}, ]]; then
+        L2_AGENT_EXTENSIONS+=",$l2_agent_extension"
     fi
 }
 
 # Restore xtrace
-$XTRACE
+$_XTRACE_NEUTRON
+
+# Tell emacs to use shell-script-mode
+## Local variables:
+## mode: shell-script
+## End:
diff --git a/lib/neutron-legacy b/lib/neutron-legacy
index b906a1b..e90400f 100644
--- a/lib/neutron-legacy
+++ b/lib/neutron-legacy
@@ -1,1075 +1,6 @@
 #!/bin/bash
-#
-# lib/neutron
-# functions - functions specific to neutron
 
-# Dependencies:
-# ``functions`` file
-# ``DEST`` must be defined
-# ``STACK_USER`` must be defined
+# TODO(slaweq): remove that file when other projects, like e.g. Grenade will
+# be using lib/neutron
 
-# ``stack.sh`` calls the entry points in this order:
-#
-# - install_neutron_agent_packages
-# - install_neutronclient
-# - install_neutron
-# - install_neutron_third_party
-# - configure_neutron
-# - init_neutron
-# - configure_neutron_third_party
-# - init_neutron_third_party
-# - start_neutron_third_party
-# - create_nova_conf_neutron
-# - configure_neutron_after_post_config
-# - start_neutron_service_and_check
-# - check_neutron_third_party_integration
-# - start_neutron_agents
-# - create_neutron_initial_network
-#
-# ``unstack.sh`` calls the entry points in this order:
-#
-# - stop_neutron
-# - stop_neutron_third_party
-# - cleanup_neutron
-
-# Functions in lib/neutron are classified into the following categories:
-#
-# - entry points (called from stack.sh or unstack.sh)
-# - internal functions
-# - neutron exercises
-# - 3rd party programs
-
-
-# Neutron Networking
-# ------------------
-
-# Make sure that neutron is enabled in ``ENABLED_SERVICES``.  If you want
-# to run Neutron on this host, make sure that q-svc is also in
-# ``ENABLED_SERVICES``.
-#
-# See "Neutron Network Configuration" below for additional variables
-# that must be set in localrc for connectivity across hosts with
-# Neutron.
-
-# Settings
-# --------
-
-
-# Neutron Network Configuration
-# -----------------------------
-
-if is_service_enabled tls-proxy; then
-    Q_PROTOCOL="https"
-fi
-
-
-# Set up default directories
-GITDIR["python-neutronclient"]=$DEST/python-neutronclient
-
-
-NEUTRON_DIR=$DEST/neutron
-NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas
-
-# Support entry points installation of console scripts
-if [[ -d $NEUTRON_DIR/bin/neutron-server ]]; then
-    NEUTRON_BIN_DIR=$NEUTRON_DIR/bin
-else
-    NEUTRON_BIN_DIR=$(get_python_exec_prefix)
-fi
-
-NEUTRON_CONF_DIR=/etc/neutron
-NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf
-export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"}
-
-# NEUTRON_DEPLOY_MOD_WSGI defines how neutron is deployed, allowed values:
-# - False (default) : Run neutron under Eventlet
-# - True : Run neutron under uwsgi
-# TODO(annp): Switching to uwsgi in next cycle if things turn out to be stable
-# enough
-NEUTRON_DEPLOY_MOD_WSGI=$(trueorfalse False NEUTRON_DEPLOY_MOD_WSGI)
-
-NEUTRON_UWSGI_CONF=$NEUTRON_CONF_DIR/neutron-api-uwsgi.ini
-
-# If NEUTRON_ENFORCE_SCOPE == True, it will set "enforce_scope"
-# and "enforce_new_defaults" to True in the Neutron's config to enforce usage
-# of the new RBAC policies and scopes.
-NEUTRON_ENFORCE_SCOPE=$(trueorfalse False NEUTRON_ENFORCE_SCOPE)
-
-# Agent binaries.  Note, binary paths for other agents are set in per-service
-# scripts in lib/neutron_plugins/services/
-AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent"
-AGENT_L3_BINARY=${AGENT_L3_BINARY:-"$NEUTRON_BIN_DIR/neutron-l3-agent"}
-AGENT_META_BINARY="$NEUTRON_BIN_DIR/neutron-metadata-agent"
-
-# Agent config files. Note, plugin-specific Q_PLUGIN_CONF_FILE is set and
-# loaded from per-plugin  scripts in lib/neutron_plugins/
-Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini
-Q_L3_CONF_FILE=$NEUTRON_CONF_DIR/l3_agent.ini
-Q_META_CONF_FILE=$NEUTRON_CONF_DIR/metadata_agent.ini
-
-# Default name for Neutron database
-Q_DB_NAME=${Q_DB_NAME:-neutron}
-# Default Neutron Plugin
-Q_PLUGIN=${Q_PLUGIN:-ml2}
-# Default Neutron Port
-Q_PORT=${Q_PORT:-9696}
-# Default Neutron Internal Port when using TLS proxy
-Q_PORT_INT=${Q_PORT_INT:-19696}
-# Default Neutron Host
-Q_HOST=${Q_HOST:-$SERVICE_HOST}
-# Default protocol
-Q_PROTOCOL=${Q_PROTOCOL:-$SERVICE_PROTOCOL}
-# Default listen address
-Q_LISTEN_ADDRESS=${Q_LISTEN_ADDRESS:-$(ipv6_unquote $SERVICE_LISTEN_ADDRESS)}
-# Default admin username
-Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-neutron}
-# Default auth strategy
-Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone}
-# RHEL's support for namespaces requires using veths with ovs
-Q_OVS_USE_VETH=${Q_OVS_USE_VETH:-False}
-Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True}
-Q_USE_ROOTWRAP_DAEMON=$(trueorfalse True Q_USE_ROOTWRAP_DAEMON)
-# Meta data IP
-Q_META_DATA_IP=${Q_META_DATA_IP:-$(ipv6_unquote $SERVICE_HOST)}
-# Allow Overlapping IP among subnets
-Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True}
-Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True}
-Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True}
-VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True}
-VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300}
-
-# The directory which contains files for Q_PLUGIN_EXTRA_CONF_FILES.
-# /etc/neutron is assumed by many of devstack plugins.  Do not change.
-_Q_PLUGIN_EXTRA_CONF_PATH=/etc/neutron
-
-# List of config file names in addition to the main plugin config file
-# To add additional plugin config files, use ``neutron_server_config_add``
-# utility function.  For example:
-#
-#    ``neutron_server_config_add file1``
-#
-# These config files are relative to ``/etc/neutron``.  The above
-# example would specify ``--config-file /etc/neutron/file1`` for
-# neutron server.
-declare -a -g Q_PLUGIN_EXTRA_CONF_FILES
-
-# same as Q_PLUGIN_EXTRA_CONF_FILES, but with absolute path.
-declare -a -g _Q_PLUGIN_EXTRA_CONF_FILES_ABS
-
-
-Q_RR_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf
-if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then
-    Q_RR_COMMAND="sudo"
-else
-    NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron)
-    Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE"
-    if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
-        Q_RR_DAEMON_COMMAND="sudo $NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE"
-    fi
-fi
-
-
-# Distributed Virtual Router (DVR) configuration
-# Can be:
-# - ``legacy``   - No DVR functionality
-# - ``dvr_snat`` - Controller or single node DVR
-# - ``dvr``      - Compute node in multi-node DVR
-#
-Q_DVR_MODE=${Q_DVR_MODE:-legacy}
-if [[ "$Q_DVR_MODE" != "legacy" ]]; then
-    Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,l2population
-fi
-
-# Provider Network Configurations
-# --------------------------------
-
-# The following variables control the Neutron ML2 plugins' allocation
-# of tenant networks and availability of provider networks. If these
-# are not configured in ``localrc``, tenant networks will be local to
-# the host (with no remote connectivity), and no physical resources
-# will be available for the allocation of provider networks.
-
-# To disable tunnels (GRE or VXLAN) for tenant networks,
-# set to False in ``local.conf``.
-# GRE tunnels are only supported by the openvswitch.
-ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-True}
-
-# If using GRE, VXLAN or GENEVE tunnels for tenant networks,
-# specify the range of IDs from which tenant networks are
-# allocated. Can be overridden in ``localrc`` if necessary.
-TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGES:-1:1000}
-
-# To use VLANs for tenant networks, set to True in localrc. VLANs
-# are supported by the ML2 plugins, requiring additional configuration
-# described below.
-ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False}
-
-# If using VLANs for tenant networks, set in ``localrc`` to specify
-# the range of VLAN VIDs from which tenant networks are
-# allocated. An external network switch must be configured to
-# trunk these VLANs between hosts for multi-host connectivity.
-#
-# Example: ``TENANT_VLAN_RANGE=1000:1999``
-TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-}
-
-# If using VLANs for tenant networks, or if using flat or VLAN
-# provider networks, set in ``localrc`` to the name of the physical
-# network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the
-# openvswitch agent or ``LB_PHYSICAL_INTERFACE`` for the linuxbridge
-# agent, as described below.
-#
-# Example: ``PHYSICAL_NETWORK=default``
-PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-public}
-
-# With the openvswitch agent, if using VLANs for tenant networks,
-# or if using flat or VLAN provider networks, set in ``localrc`` to
-# the name of the OVS bridge to use for the physical network. The
-# bridge will be created if it does not already exist, but a
-# physical interface must be manually added to the bridge as a
-# port for external connectivity.
-#
-# Example: ``OVS_PHYSICAL_BRIDGE=br-eth1``
-OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-br-ex}
-
-# With the linuxbridge agent, if using VLANs for tenant networks,
-# or if using flat or VLAN provider networks, set in ``localrc`` to
-# the name of the network interface to use for the physical
-# network.
-#
-# Example: ``LB_PHYSICAL_INTERFACE=eth1``
-if [[ $Q_AGENT == "linuxbridge" && -z ${LB_PHYSICAL_INTERFACE} ]]; then
-    default_route_dev=$( (ip route; ip -6 route) | grep ^default | head -n 1 | awk '{print $5}')
-    die_if_not_set $LINENO default_route_dev "Failure retrieving default route device"
-    LB_PHYSICAL_INTERFACE=$default_route_dev
-fi
-
-# When Neutron tunnels are enabled it is needed to specify the
-# IP address of the end point in the local server. This IP is set
-# by default to the same IP address that the HOST IP.
-# This variable can be used to specify a different end point IP address
-# Example: ``TUNNEL_ENDPOINT_IP=1.1.1.1``
-TUNNEL_ENDPOINT_IP=${TUNNEL_ENDPOINT_IP:-$HOST_IP}
-
-# With the openvswitch plugin, set to True in ``localrc`` to enable
-# provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False.
-#
-# Example: ``OVS_ENABLE_TUNNELING=True``
-OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS}
-
-# Use DHCP agent for providing metadata service in the case of
-# without L3 agent (No Route Agent), set to True in localrc.
-ENABLE_ISOLATED_METADATA=${ENABLE_ISOLATED_METADATA:-False}
-
-# Add a static route as dhcp option, so the request to 169.254.169.254
-# will be able to reach through a route(DHCP agent)
-# This option require ENABLE_ISOLATED_METADATA = True
-ENABLE_METADATA_NETWORK=${ENABLE_METADATA_NETWORK:-False}
-# Neutron plugin specific functions
-# ---------------------------------
-
-# Please refer to ``lib/neutron_plugins/README.md`` for details.
-if [ -f $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN ]; then
-    source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN
-fi
-
-# Agent metering service plugin functions
-# -------------------------------------------
-
-# Hardcoding for 1 service plugin for now
-source $TOP_DIR/lib/neutron_plugins/services/metering
-
-# L3 Service functions
-source $TOP_DIR/lib/neutron_plugins/services/l3
-
-# Additional Neutron service plugins
-source $TOP_DIR/lib/neutron_plugins/services/placement
-source $TOP_DIR/lib/neutron_plugins/services/trunk
-source $TOP_DIR/lib/neutron_plugins/services/qos
-
-# Use security group or not
-if has_neutron_plugin_security_group; then
-    Q_USE_SECGROUP=${Q_USE_SECGROUP:-True}
-else
-    Q_USE_SECGROUP=False
-fi
-
-# Save trace setting
-_XTRACE_NEUTRON=$(set +o | grep xtrace)
-set +o xtrace
-
-
-# Functions
-# ---------
-
-function _determine_config_server {
-    if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" != '' ]]; then
-        if [[ "$Q_PLUGIN_EXTRA_CONF_PATH" = "$_Q_PLUGIN_EXTRA_CONF_PATH" ]]; then
-            deprecated "Q_PLUGIN_EXTRA_CONF_PATH is deprecated"
-        else
-            die $LINENO "Q_PLUGIN_EXTRA_CONF_PATH is deprecated"
-        fi
-    fi
-    if [[ ${#Q_PLUGIN_EXTRA_CONF_FILES[@]} > 0 ]]; then
-        deprecated "Q_PLUGIN_EXTRA_CONF_FILES is deprecated.  Use neutron_server_config_add instead."
-    fi
-    for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do
-        _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($_Q_PLUGIN_EXTRA_CONF_PATH/$cfg_file)
-    done
-
-    local cfg_file
-    local opts="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE"
-    for cfg_file in ${_Q_PLUGIN_EXTRA_CONF_FILES_ABS[@]}; do
-        opts+=" --config-file $cfg_file"
-    done
-    echo "$opts"
-}
-
-function _determine_config_l3 {
-    local opts="--config-file $NEUTRON_CONF --config-file $Q_L3_CONF_FILE"
-    echo "$opts"
-}
-
-# For services and agents that require it, dynamically construct a list of
-# --config-file arguments that are passed to the binary.
-function determine_config_files {
-    local opts=""
-    case "$1" in
-        "neutron-server") opts="$(_determine_config_server)" ;;
-        "neutron-l3-agent") opts="$(_determine_config_l3)" ;;
-    esac
-    if [ -z "$opts" ] ; then
-        die $LINENO "Could not determine config files for $1."
-    fi
-    echo "$opts"
-}
-
-# configure_mutnauq()
-# Set common config for all neutron server and agents.
-function configure_mutnauq {
-    _configure_neutron_common
-    iniset_rpc_backend neutron $NEUTRON_CONF
-
-    if is_service_enabled q-metering; then
-        _configure_neutron_metering
-    fi
-    if is_service_enabled q-agt q-svc; then
-        _configure_neutron_service
-    fi
-    if is_service_enabled q-agt; then
-        _configure_neutron_plugin_agent
-    fi
-    if is_service_enabled q-dhcp; then
-        _configure_neutron_dhcp_agent
-    fi
-    if is_service_enabled q-l3; then
-        _configure_neutron_l3_agent
-    fi
-    if is_service_enabled q-meta; then
-        _configure_neutron_metadata_agent
-    fi
-
-    if [[ "$Q_DVR_MODE" != "legacy" ]]; then
-        _configure_dvr
-    fi
-    if is_service_enabled ceilometer; then
-        _configure_neutron_ceilometer_notifications
-    fi
-
-    if [[ $Q_AGENT == "ovn" ]]; then
-        configure_ovn
-        configure_ovn_plugin
-    fi
-
-    # Configure Neutron's advanced services
-    if is_service_enabled q-placement neutron-placement; then
-        configure_placement_extension
-    fi
-    if is_service_enabled q-trunk neutron-trunk; then
-        configure_trunk_extension
-    fi
-    if is_service_enabled q-qos neutron-qos; then
-        configure_qos
-        if is_service_enabled q-l3 neutron-l3; then
-            configure_l3_agent_extension_fip_qos
-            configure_l3_agent_extension_gateway_ip_qos
-        fi
-    fi
-
-    iniset $NEUTRON_CONF DEFAULT api_workers "$API_WORKERS"
-    # devstack is not a tool for running uber scale OpenStack
-    # clouds, therefore running without a dedicated RPC worker
-    # for state reports is more than adequate.
-    iniset $NEUTRON_CONF DEFAULT rpc_state_report_workers 0
-}
-
-function create_nova_conf_neutron {
-    local conf=${1:-$NOVA_CONF}
-    iniset $conf neutron auth_type "password"
-    iniset $conf neutron auth_url "$KEYSTONE_SERVICE_URI"
-    iniset $conf neutron username "$Q_ADMIN_USERNAME"
-    iniset $conf neutron password "$SERVICE_PASSWORD"
-    iniset $conf neutron user_domain_name "$SERVICE_DOMAIN_NAME"
-    iniset $conf neutron project_name "$SERVICE_PROJECT_NAME"
-    iniset $conf neutron project_domain_name "$SERVICE_DOMAIN_NAME"
-    iniset $conf neutron auth_strategy "$Q_AUTH_STRATEGY"
-    iniset $conf neutron region_name "$REGION_NAME"
-
-    # optionally set options in nova_conf
-    neutron_plugin_create_nova_conf $conf
-
-    if is_service_enabled q-meta; then
-        iniset $conf neutron service_metadata_proxy "True"
-    fi
-
-    iniset $conf DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL"
-    iniset $conf DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT"
-}
-
-# create_mutnauq_accounts() - Set up common required neutron accounts
-
-# Tenant               User       Roles
-# ------------------------------------------------------------------
-# service              neutron    admin        # if enabled
-
-# Migrated from keystone_data.sh
-function create_mutnauq_accounts {
-    local neutron_url
-    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
-        neutron_url=$Q_PROTOCOL://$SERVICE_HOST/networking/
-    else
-        neutron_url=$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/
-    fi
-
-    if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then
-
-        create_service_user "neutron"
-
-        get_or_create_service "neutron" "network" "Neutron Service"
-        get_or_create_endpoint \
-            "network" \
-            "$REGION_NAME" "$neutron_url"
-    fi
-}
-
-# init_mutnauq() - Initialize databases, etc.
-function init_mutnauq {
-    recreate_database $Q_DB_NAME
-    time_start "dbsync"
-    # Run Neutron db migrations
-    $NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head
-    time_stop "dbsync"
-}
-
-# install_mutnauq() - Collect source and prepare
-function install_mutnauq {
-    # Install neutron-lib from git so we make sure we're testing
-    # the latest code.
-    if use_library_from_git "neutron-lib"; then
-        git_clone_by_name "neutron-lib"
-        setup_dev_lib "neutron-lib"
-    fi
-
-    git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH
-    setup_develop $NEUTRON_DIR
-
-    if [[ $Q_AGENT == "ovn" ]]; then
-        install_ovn
-    fi
-}
-
-# install_neutron_agent_packages() - Collect source and prepare
-function install_neutron_agent_packages_mutnauq {
-    # radvd doesn't come with the OS. Install it if the l3 service is enabled.
-    if is_service_enabled q-l3; then
-        install_package radvd
-    fi
-    # install packages that are specific to plugin agent(s)
-    if is_service_enabled q-agt q-dhcp q-l3; then
-        neutron_plugin_install_agent_packages
-    fi
-}
-
-# Finish neutron configuration
-function configure_neutron_after_post_config {
-    if [[ $Q_SERVICE_PLUGIN_CLASSES != '' ]]; then
-        iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES
-    fi
-    configure_rbac_policies
-}
-
-# configure_rbac_policies() - Configure Neutron to enforce new RBAC
-# policies and scopes if NEUTRON_ENFORCE_SCOPE == True
-function configure_rbac_policies {
-    if [ "$NEUTRON_ENFORCE_SCOPE" == "True" ]; then
-        iniset $NEUTRON_CONF oslo_policy enforce_new_defaults True
-        iniset $NEUTRON_CONF oslo_policy enforce_scope True
-    else
-        iniset $NEUTRON_CONF oslo_policy enforce_new_defaults False
-        iniset $NEUTRON_CONF oslo_policy enforce_scope False
-    fi
-}
-
-# Start running OVN processes
-function start_ovn_services {
-    if [[ $Q_AGENT == "ovn" ]]; then
-        init_ovn
-        start_ovn
-        if [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]]; then
-            if [[ "$NEUTRON_CREATE_INITIAL_NETWORKS" != "True" ]]; then
-                echo "OVN_L3_CREATE_PUBLIC_NETWORK=True is being ignored "
-                echo "because NEUTRON_CREATE_INITIAL_NETWORKS is set to False"
-            else
-                create_public_bridge
-            fi
-        fi
-    fi
-}
-
-# Start running processes
-function start_neutron_service_and_check {
-    local service_port=$Q_PORT
-    local service_protocol=$Q_PROTOCOL
-    local cfg_file_options
-    local neutron_url
-
-    cfg_file_options="$(determine_config_files neutron-server)"
-
-    if is_service_enabled tls-proxy; then
-        service_port=$Q_PORT_INT
-        service_protocol="http"
-    fi
-    # Start the Neutron service
-    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
-        enable_service neutron-api
-        run_process neutron-api "$(which uwsgi) --procname-prefix neutron-api --ini $NEUTRON_UWSGI_CONF"
-        neutron_url=$Q_PROTOCOL://$Q_HOST/networking/
-        enable_service neutron-rpc-server
-        run_process neutron-rpc-server "$NEUTRON_BIN_DIR/neutron-rpc-server $cfg_file_options"
-    else
-        run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options"
-        neutron_url=$service_protocol://$Q_HOST:$service_port
-        # Start proxy if enabled
-        if is_service_enabled tls-proxy; then
-            start_tls_proxy neutron '*' $Q_PORT $Q_HOST $Q_PORT_INT
-        fi
-    fi
-    echo "Waiting for Neutron to start..."
-
-    local testcmd="wget ${ssl_ca} --no-proxy -q -O- $neutron_url"
-    test_with_retry "$testcmd" "Neutron did not start" $SERVICE_TIMEOUT
-}
-
-# Control of the l2 agent is separated out to make it easier to test partial
-# upgrades (everything upgraded except the L2 agent)
-function start_mutnauq_l2_agent {
-    run_process q-agt "$AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE"
-
-    if is_provider_network && [[ $Q_AGENT == "openvswitch" ]]; then
-        sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE
-        sudo ip link set $OVS_PHYSICAL_BRIDGE up
-        sudo ip link set br-int up
-        sudo ip link set $PUBLIC_INTERFACE up
-        if is_ironic_hardware; then
-            for IP in $(ip addr show dev $PUBLIC_INTERFACE | grep ' inet ' | awk '{print $2}'); do
-                sudo ip addr del $IP dev $PUBLIC_INTERFACE
-                sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE
-            done
-            sudo ip route replace $FIXED_RANGE via $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE
-        fi
-    fi
-}
-
-function start_mutnauq_other_agents {
-    run_process q-dhcp "$AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file $Q_DHCP_CONF_FILE"
-
-    run_process q-l3 "$AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)"
-
-    run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file $Q_META_CONF_FILE"
-    run_process q-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME"
-}
-
-# Start running processes, including screen
-function start_neutron_agents {
-    # Start up the neutron agents if enabled
-    start_mutnauq_l2_agent
-    start_mutnauq_other_agents
-}
-
-function stop_mutnauq_l2_agent {
-    stop_process q-agt
-}
-
-# stop_mutnauq_other() - Stop running processes
-function stop_mutnauq_other {
-    if is_service_enabled q-dhcp; then
-        stop_process q-dhcp
-        pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }')
-        [ ! -z "$pid" ] && sudo kill -9 $pid
-    fi
-
-    if [ "$NEUTRON_DEPLOY_MOD_WSGI" == "True" ]; then
-        stop_process neutron-rpc-server
-        stop_process neutron-api
-    else
-        stop_process q-svc
-    fi
-
-    if is_service_enabled q-l3; then
-        sudo pkill -f "radvd -C $DATA_DIR/neutron/ra"
-        stop_process q-l3
-    fi
-
-    if is_service_enabled q-meta; then
-        sudo pkill -9 -f neutron-ns-metadata-proxy || :
-        stop_process q-meta
-    fi
-
-    if is_service_enabled q-metering; then
-        neutron_metering_stop
-    fi
-
-    if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
-        sudo pkill -9 -f $NEUTRON_ROOTWRAP-daemon || :
-    fi
-}
-
-# stop_neutron() - Stop running processes (non-screen)
-function stop_mutnauq {
-    stop_mutnauq_other
-    stop_mutnauq_l2_agent
-
-    if [[ $Q_AGENT == "ovn" ]]; then
-        stop_ovn
-    fi
-}
-
-# _move_neutron_addresses_route() - Move the primary IP to the OVS bridge
-# on startup, or back to the public interface on cleanup. If no IP is
-# configured on the interface, just add it as a port to the OVS bridge.
-function _move_neutron_addresses_route {
-    local from_intf=$1
-    local to_intf=$2
-    local add_ovs_port=$3
-    local del_ovs_port=$4
-    local af=$5
-
-    if [[ -n "$from_intf" && -n "$to_intf" ]]; then
-        # Remove the primary IP address from $from_intf and add it to $to_intf,
-        # along with the default route, if it exists.  Also, when called
-        # on configure we will also add $from_intf as a port on $to_intf,
-        # assuming it is an OVS bridge.
-
-        local IP_REPLACE=""
-        local IP_DEL=""
-        local IP_UP=""
-        local DEFAULT_ROUTE_GW
-        DEFAULT_ROUTE_GW=$(ip -f $af r | awk "/default.+$from_intf\s/ { print \$3; exit }")
-        local ADD_OVS_PORT=""
-        local DEL_OVS_PORT=""
-        local ARP_CMD=""
-
-        IP_BRD=$(ip -f $af a s dev $from_intf scope global primary | grep inet | awk '{ print $2, $3, $4; exit }')
-
-        if [ "$DEFAULT_ROUTE_GW" != "" ]; then
-            ADD_DEFAULT_ROUTE="sudo ip -f $af r replace default via $DEFAULT_ROUTE_GW dev $to_intf"
-        fi
-
-        if [[ "$add_ovs_port" == "True" ]]; then
-            ADD_OVS_PORT="sudo ovs-vsctl --may-exist add-port $to_intf $from_intf"
-        fi
-
-        if [[ "$del_ovs_port" == "True" ]]; then
-            DEL_OVS_PORT="sudo ovs-vsctl --if-exists del-port $from_intf $to_intf"
-        fi
-
-        if [[ "$IP_BRD" != "" ]]; then
-            IP_DEL="sudo ip addr del $IP_BRD dev $from_intf"
-            IP_REPLACE="sudo ip addr replace $IP_BRD dev $to_intf"
-            IP_UP="sudo ip link set $to_intf up"
-            if [[ "$af" == "inet" ]]; then
-                IP=$(echo $IP_BRD | awk '{ print $1; exit }' | grep -o -E '(.*)/' | cut -d "/" -f1)
-                ARP_CMD="sudo arping -A -c 3 -w 5 -I $to_intf $IP "
-            fi
-        fi
-
-        # The add/del OVS port calls have to happen either before or
-        # after the address is moved in order to not leave it orphaned.
-        $DEL_OVS_PORT; $IP_DEL; $IP_REPLACE; $IP_UP; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE; $ARP_CMD
-    fi
-}
-
-# _configure_public_network_connectivity() - Configures connectivity to the
-# external network using $PUBLIC_INTERFACE or NAT on the single interface
-# machines
-function _configure_public_network_connectivity {
-    # If we've given a PUBLIC_INTERFACE to take over, then we assume
-    # that we can own the whole thing, and privot it into the OVS
-    # bridge. If we are not, we're probably on a single interface
-    # machine, and we just setup NAT so that fixed guests can get out.
-    if [[ -n "$PUBLIC_INTERFACE" ]]; then
-        _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True False "inet"
-
-        if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then
-            _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False False "inet6"
-        fi
-    else
-        for d in $default_v4_route_devs; do
-            sudo iptables -t nat -A POSTROUTING -o $d -s $FLOATING_RANGE -j MASQUERADE
-        done
-    fi
-}
-
-# cleanup_mutnauq() - Remove residual data files, anything left over from previous
-# runs that a clean run would need to clean up
-function cleanup_mutnauq {
-
-    if [[ -n "$OVS_PHYSICAL_BRIDGE" ]]; then
-        _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False True "inet"
-
-        if [[ $(ip -f inet6 a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then
-            # ip(8) wants the prefix length when deleting
-            local v6_gateway
-            v6_gateway=$(ip -6 a s dev $OVS_PHYSICAL_BRIDGE | grep $IPV6_PUBLIC_NETWORK_GATEWAY | awk '{ print $2 }')
-            sudo ip -6 addr del $v6_gateway dev $OVS_PHYSICAL_BRIDGE
-            _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False False "inet6"
-        fi
-
-        if is_provider_network && is_ironic_hardware; then
-            for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do
-                sudo ip addr del $IP dev $OVS_PHYSICAL_BRIDGE
-                sudo ip addr add $IP dev $PUBLIC_INTERFACE
-            done
-            sudo route del -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE
-        fi
-    fi
-
-    if is_neutron_ovs_base_plugin; then
-        neutron_ovs_base_cleanup
-    fi
-
-    if [[ $Q_AGENT == "linuxbridge" ]]; then
-        neutron_lb_cleanup
-    fi
-
-    # delete all namespaces created by neutron
-    for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|fip|snat)-[0-9a-f-]*'); do
-        sudo ip netns delete ${ns}
-    done
-
-    if [[ $Q_AGENT == "ovn" ]]; then
-        cleanup_ovn
-    fi
-}
-
-
-function _create_neutron_conf_dir {
-    # Put config files in ``NEUTRON_CONF_DIR`` for everyone to find
-    sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR
-}
-
-# _configure_neutron_common()
-# Set common config for all neutron server and agents.
-# This MUST be called before other ``_configure_neutron_*`` functions.
-function _configure_neutron_common {
-    _create_neutron_conf_dir
-
-    # Uses oslo config generator to generate core sample configuration files
-    (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh)
-
-    cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF
-
-    Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json
-
-    # allow neutron user to administer neutron to match neutron account
-    # NOTE(amotoki): This is required for nova works correctly with neutron.
-    if [ -f $NEUTRON_DIR/etc/policy.json ]; then
-        cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE
-        sed -i 's/"context_is_admin":  "role:admin"/"context_is_admin":  "role:admin or user_name:neutron"/g' $Q_POLICY_FILE
-    else
-        echo '{"context_is_admin":  "role:admin or user_name:neutron"}' > $Q_POLICY_FILE
-    fi
-
-    # Set plugin-specific variables ``Q_DB_NAME``, ``Q_PLUGIN_CLASS``.
-    # For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``.
-    neutron_plugin_configure_common
-
-    if [[ "$Q_PLUGIN_CONF_PATH" == '' || "$Q_PLUGIN_CONF_FILENAME" == '' || "$Q_PLUGIN_CLASS" == '' ]]; then
-        die $LINENO "Neutron plugin not set.. exiting"
-    fi
-
-    # If needed, move config file from ``$NEUTRON_DIR/etc/neutron`` to ``NEUTRON_CONF_DIR``
-    mkdir -p /$Q_PLUGIN_CONF_PATH
-    Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME
-    # NOTE(hichihara): Some neutron vendor plugins were already decomposed and
-    # there is no config file in Neutron tree. They should prepare the file in each plugin.
-    if [ -f "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" ]; then
-        cp "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" /$Q_PLUGIN_CONF_FILE
-    elif [ -f $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE ]; then
-        cp $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE
-    fi
-
-    iniset $NEUTRON_CONF database connection `database_connection_url $Q_DB_NAME`
-    iniset $NEUTRON_CONF DEFAULT state_path $DATA_DIR/neutron
-    iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG
-    iniset $NEUTRON_CONF DEFAULT bind_host $Q_LISTEN_ADDRESS
-    iniset $NEUTRON_CONF oslo_concurrency lock_path $DATA_DIR/neutron/lock
-
-    # NOTE(freerunner): Need to adjust Region Name for nova in multiregion installation
-    iniset $NEUTRON_CONF nova region_name $REGION_NAME
-
-    if [ "$VIRT_DRIVER" = 'fake' ]; then
-        # Disable arbitrary limits
-        iniset $NEUTRON_CONF quotas quota_network -1
-        iniset $NEUTRON_CONF quotas quota_subnet -1
-        iniset $NEUTRON_CONF quotas quota_port -1
-        iniset $NEUTRON_CONF quotas quota_security_group -1
-        iniset $NEUTRON_CONF quotas quota_security_group_rule -1
-    fi
-
-    # Format logging
-    setup_logging $NEUTRON_CONF
-
-    if is_service_enabled tls-proxy && [ "$NEUTRON_DEPLOY_MOD_WSGI" == "False" ]; then
-        # Set the service port for a proxy to take the original
-        iniset $NEUTRON_CONF DEFAULT bind_port "$Q_PORT_INT"
-        iniset $NEUTRON_CONF oslo_middleware enable_proxy_headers_parsing True
-    fi
-
-    _neutron_setup_rootwrap
-}
-
-function _configure_neutron_dhcp_agent {
-
-    cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $Q_DHCP_CONF_FILE
-
-    iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-    # make it so we have working DNS from guests
-    iniset $Q_DHCP_CONF_FILE DEFAULT dnsmasq_local_resolv True
-    iniset $Q_DHCP_CONF_FILE AGENT root_helper "$Q_RR_COMMAND"
-    if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
-        iniset $Q_DHCP_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND"
-    fi
-
-    if ! is_service_enabled q-l3; then
-        if [[ "$ENABLE_ISOLATED_METADATA" = "True" ]]; then
-            iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata $ENABLE_ISOLATED_METADATA
-            iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network $ENABLE_METADATA_NETWORK
-        else
-            if [[ "$ENABLE_METADATA_NETWORK" = "True" ]]; then
-                die "$LINENO" "Enable isolated metadata is a must for metadata network"
-            fi
-        fi
-    fi
-
-    _neutron_setup_interface_driver $Q_DHCP_CONF_FILE
-
-    neutron_plugin_configure_dhcp_agent $Q_DHCP_CONF_FILE
-}
-
-
-function _configure_neutron_metadata_agent {
-    cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $Q_META_CONF_FILE
-
-    iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-    iniset $Q_META_CONF_FILE DEFAULT nova_metadata_host $Q_META_DATA_IP
-    iniset $Q_META_CONF_FILE DEFAULT metadata_workers $API_WORKERS
-    iniset $Q_META_CONF_FILE AGENT root_helper "$Q_RR_COMMAND"
-    if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
-        iniset $Q_META_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND"
-    fi
-}
-
-function _configure_neutron_ceilometer_notifications {
-    iniset $NEUTRON_CONF oslo_messaging_notifications driver messagingv2
-}
-
-function _configure_neutron_metering {
-    neutron_agent_metering_configure_common
-    neutron_agent_metering_configure_agent
-}
-
-function _configure_dvr {
-    iniset $NEUTRON_CONF DEFAULT router_distributed True
-    iniset $Q_L3_CONF_FILE DEFAULT agent_mode $Q_DVR_MODE
-}
-
-
-# _configure_neutron_plugin_agent() - Set config files for neutron plugin agent
-# It is called when q-agt is enabled.
-function _configure_neutron_plugin_agent {
-    # Specify the default root helper prior to agent configuration to
-    # ensure that an agent's configuration can override the default
-    iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_COMMAND"
-    if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE  agent root_helper_daemon "$Q_RR_DAEMON_COMMAND"
-    fi
-    iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-
-    # Configure agent for plugin
-    neutron_plugin_configure_plugin_agent
-}
-
-# _configure_neutron_service() - Set config files for neutron service
-# It is called when q-svc is enabled.
-function _configure_neutron_service {
-    Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini
-    cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE
-
-    # Update either configuration file with plugin
-    iniset $NEUTRON_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS
-
-    iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-    iniset $NEUTRON_CONF oslo_policy policy_file $Q_POLICY_FILE
-    iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP
-
-    iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY
-    configure_keystone_authtoken_middleware $NEUTRON_CONF $Q_ADMIN_USERNAME
-
-    # Configuration for neutron notifications to nova.
-    iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES
-    iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES
-
-    configure_keystone_authtoken_middleware $NEUTRON_CONF nova nova
-
-    # Configure plugin
-    neutron_plugin_configure_service
-}
-
-# Utility Functions
-#------------------
-
-# _neutron_service_plugin_class_add() - add service plugin class
-function _neutron_service_plugin_class_add {
-    local service_plugin_class=$1
-    if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then
-        Q_SERVICE_PLUGIN_CLASSES=$service_plugin_class
-    elif [[ ! ,${Q_SERVICE_PLUGIN_CLASSES}, =~ ,${service_plugin_class}, ]]; then
-        Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$service_plugin_class"
-    fi
-}
-
-# _neutron_ml2_extension_driver_add_old() - add ML2 extension driver
-function _neutron_ml2_extension_driver_add_old {
-    local extension=$1
-    if [[ $Q_ML2_PLUGIN_EXT_DRIVERS == '' ]]; then
-        Q_ML2_PLUGIN_EXT_DRIVERS=$extension
-    elif [[ ! ,${Q_ML2_PLUGIN_EXT_DRIVERS}, =~ ,${extension}, ]]; then
-        Q_ML2_PLUGIN_EXT_DRIVERS="$Q_ML2_PLUGIN_EXT_DRIVERS,$extension"
-    fi
-}
-
-# mutnauq_server_config_add() - add server config file
-function mutnauq_server_config_add {
-    _Q_PLUGIN_EXTRA_CONF_FILES_ABS+=($1)
-}
-
-# _neutron_deploy_rootwrap_filters() - deploy rootwrap filters to $Q_CONF_ROOTWRAP_D (owned by root).
-function _neutron_deploy_rootwrap_filters {
-    if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then
-        return
-    fi
-    local srcdir=$1
-    sudo install -d -o root -m 755 $Q_CONF_ROOTWRAP_D
-    sudo install -o root -m 644 $srcdir/etc/neutron/rootwrap.d/* $Q_CONF_ROOTWRAP_D/
-}
-
-# _neutron_setup_rootwrap() - configure Neutron's rootwrap
-function _neutron_setup_rootwrap {
-    if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then
-        return
-    fi
-    # Wipe any existing ``rootwrap.d`` files first
-    Q_CONF_ROOTWRAP_D=$NEUTRON_CONF_DIR/rootwrap.d
-    if [[ -d $Q_CONF_ROOTWRAP_D ]]; then
-        sudo rm -rf $Q_CONF_ROOTWRAP_D
-    fi
-
-    _neutron_deploy_rootwrap_filters $NEUTRON_DIR
-
-    # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d``
-    # location moved in newer versions, prefer new location
-    if test -r $NEUTRON_DIR/etc/neutron/rootwrap.conf; then
-        sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/neutron/rootwrap.conf $Q_RR_CONF_FILE
-    else
-        sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE
-    fi
-    sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE
-    sudo sed -e 's:^exec_dirs=\(.*\)$:exec_dirs=\1,/usr/local/bin:' -i $Q_RR_CONF_FILE
-
-    # Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap
-    ROOTWRAP_SUDOER_CMD="$NEUTRON_ROOTWRAP $Q_RR_CONF_FILE *"
-    ROOTWRAP_DAEMON_SUDOER_CMD="$NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE"
-
-    # Set up the rootwrap sudoers for neutron
-    TEMPFILE=`mktemp`
-    echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE
-    echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_DAEMON_SUDOER_CMD" >>$TEMPFILE
-    chmod 0440 $TEMPFILE
-    sudo chown root:root $TEMPFILE
-    sudo mv $TEMPFILE /etc/sudoers.d/neutron-rootwrap
-
-    # Update the root_helper
-    iniset $NEUTRON_CONF agent root_helper "$Q_RR_COMMAND"
-    if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then
-        iniset $NEUTRON_CONF agent root_helper_daemon "$Q_RR_DAEMON_COMMAND"
-    fi
-}
-
-function _neutron_setup_interface_driver {
-
-    # ovs_use_veth needs to be set before the plugin configuration
-    # occurs to allow plugins to override the setting.
-    iniset $1 DEFAULT ovs_use_veth $Q_OVS_USE_VETH
-
-    neutron_plugin_setup_interface_driver $1
-}
-# Functions for Neutron Exercises
-#--------------------------------
-
-function delete_probe {
-    local from_net="$1"
-    net_id=`_get_net_id $from_net`
-    probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'`
-    neutron-debug --os-tenant-name admin --os-username admin probe-delete $probe_id
-}
-
-function _get_net_id {
-    openstack --os-cloud devstack-admin --os-region-name="$REGION_NAME" --os-project-name admin --os-username admin --os-password $ADMIN_PASSWORD network list | grep $1 | awk '{print $2}'
-}
-
-function _get_probe_cmd_prefix {
-    local from_net="$1"
-    net_id=`_get_net_id $from_net`
-    probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}' | head -n 1`
-    echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id"
-}
-
-# ssh check
-function _ssh_check_neutron {
-    local from_net=$1
-    local key_file=$2
-    local ip=$3
-    local user=$4
-    local timeout_sec=$5
-    local probe_cmd = ""
-    probe_cmd=`_get_probe_cmd_prefix $from_net`
-    local testcmd="$probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success"
-    test_with_retry "$testcmd" "server $ip didn't become ssh-able" $timeout_sec
-}
-
-function plugin_agent_add_l2_agent_extension {
-    local l2_agent_extension=$1
-    if [[ -z "$L2_AGENT_EXTENSIONS" ]]; then
-        L2_AGENT_EXTENSIONS=$l2_agent_extension
-    elif [[ ! ,${L2_AGENT_EXTENSIONS}, =~ ,${l2_agent_extension}, ]]; then
-        L2_AGENT_EXTENSIONS+=",$l2_agent_extension"
-    fi
-}
-
-# Restore xtrace
-$_XTRACE_NEUTRON
-
-# Tell emacs to use shell-script-mode
-## Local variables:
-## mode: shell-script
-## End:
+source $TOP_DIR/lib/neutron
diff --git a/lib/neutron_plugins/README.md b/lib/neutron_plugins/README.md
index ed40886..728aaee 100644
--- a/lib/neutron_plugins/README.md
+++ b/lib/neutron_plugins/README.md
@@ -13,7 +13,7 @@
 
 functions
 ---------
-``lib/neutron-legacy`` calls the following functions when the ``$Q_PLUGIN`` is enabled
+``lib/neutron`` calls the following functions when the ``$Q_PLUGIN`` is enabled
 
 * ``neutron_plugin_create_nova_conf`` :
   optionally set options in nova_conf
diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight
index d3f5bd5..84ca7ec 100644
--- a/lib/neutron_plugins/bigswitch_floodlight
+++ b/lib/neutron_plugins/bigswitch_floodlight
@@ -67,7 +67,7 @@
 }
 
 function neutron_plugin_check_adv_test_requirements {
-    is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
+    is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0
 }
 
 # Restore xtrace
diff --git a/lib/neutron_plugins/brocade b/lib/neutron_plugins/brocade
index 310b72e..9640063 100644
--- a/lib/neutron_plugins/brocade
+++ b/lib/neutron_plugins/brocade
@@ -72,7 +72,7 @@
 }
 
 function neutron_plugin_check_adv_test_requirements {
-    is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
+    is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0
 }
 
 # Restore xtrace
diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent
index bdeaf0f..a392bd0 100644
--- a/lib/neutron_plugins/linuxbridge_agent
+++ b/lib/neutron_plugins/linuxbridge_agent
@@ -97,7 +97,7 @@
 }
 
 function neutron_plugin_check_adv_test_requirements {
-    is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
+    is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0
 }
 
 # Restore xtrace
diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2
index f00feac..c2e78c6 100644
--- a/lib/neutron_plugins/ml2
+++ b/lib/neutron_plugins/ml2
@@ -67,7 +67,7 @@
     Q_PLUGIN_CLASS="ml2"
     # The ML2 plugin delegates L3 routing/NAT functionality to
     # the L3 service plugin which must therefore be specified.
-    _neutron_service_plugin_class_add $ML2_L3_PLUGIN
+    neutron_service_plugin_class_add $ML2_L3_PLUGIN
 }
 
 function neutron_plugin_configure_service {
@@ -111,20 +111,13 @@
             fi
         fi
     fi
-    # REVISIT(rkukura): Setting firewall_driver here for
-    # neutron.agent.securitygroups_rpc.is_firewall_enabled() which is
-    # used in the server, in case no L2 agent is configured on the
-    # server's node. If an L2 agent is configured, this will get
-    # overridden with the correct driver. The ml2 plugin should
-    # instead use its own config variable to indicate whether security
-    # groups is enabled, and that will need to be set here instead.
-    if [[ "$Q_USE_SECGROUP" == "True" ]]; then
-        iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.not.a.real.FirewallDriver
-    else
-        iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver
-    fi
+    populate_ml2_config /$Q_PLUGIN_CONF_FILE securitygroup enable_security_group=$Q_USE_SECGROUP
 
     populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS
+    if [[ "$Q_ML2_PLUGIN_MECHANISM_DRIVERS" == *"linuxbridge"* ]]; then
+        iniset $NEUTRON_CONF experimental linuxbridge True
+    fi
+    populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 overlay_ip_version=$TUNNEL_IP_VERSION
 
     if [[ -n "$Q_ML2_PLUGIN_TYPE_DRIVERS" ]]; then
         populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS
diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent
index 7fed8bf..6e79984 100644
--- a/lib/neutron_plugins/openvswitch_agent
+++ b/lib/neutron_plugins/openvswitch_agent
@@ -68,7 +68,7 @@
 }
 
 function neutron_plugin_check_adv_test_requirements {
-    is_service_enabled q-agt && is_service_enabled q-dhcp && return 0
+    is_service_enabled q-agt neutron-agent && is_service_enabled q-dhcp neutron-dhcp && return 0
 }
 
 # Restore xtrace
diff --git a/lib/neutron_plugins/ovn_agent b/lib/neutron_plugins/ovn_agent
index bf1b23a..be3a9e7 100644
--- a/lib/neutron_plugins/ovn_agent
+++ b/lib/neutron_plugins/ovn_agent
@@ -91,16 +91,23 @@
 # http://www.openvswitch.org/support/dist-docs/ovs-appctl.8.txt
 OVN_DBS_LOG_LEVEL=${OVN_DBS_LOG_LEVEL:-info}
 
+# OVN metadata agent configuration
 OVN_META_CONF=$NEUTRON_CONF_DIR/neutron_ovn_metadata_agent.ini
 OVN_META_DATA_HOST=${OVN_META_DATA_HOST:-$(ipv6_unquote $SERVICE_HOST)}
 
+# OVN agent configuration
+OVN_AGENT_CONF=$NEUTRON_CONF_DIR/plugins/ml2/ovn_agent.ini
+OVN_AGENT_EXTENSIONS=${OVN_AGENT_EXTENSIONS:-}
+
 # If True (default) the node will be considered a gateway node.
 ENABLE_CHASSIS_AS_GW=$(trueorfalse True ENABLE_CHASSIS_AS_GW)
 OVN_L3_CREATE_PUBLIC_NETWORK=$(trueorfalse True OVN_L3_CREATE_PUBLIC_NETWORK)
 
 export OVSDB_SERVER_LOCAL_HOST=$SERVICE_LOCAL_HOST
+TUNNEL_IP=$TUNNEL_ENDPOINT_IP
 if [[ "$SERVICE_IP_VERSION" == 6 ]]; then
     OVSDB_SERVER_LOCAL_HOST=[$OVSDB_SERVER_LOCAL_HOST]
+    TUNNEL_IP=[$TUNNEL_IP]
 fi
 
 OVN_IGMP_SNOOPING_ENABLE=$(trueorfalse False OVN_IGMP_SNOOPING_ENABLE)
@@ -130,6 +137,7 @@
 
 NEUTRON_OVN_BIN_DIR=$(get_python_exec_prefix)
 NEUTRON_OVN_METADATA_BINARY="neutron-ovn-metadata-agent"
+NEUTRON_OVN_AGENT_BINARY="neutron-ovn-agent"
 
 STACK_GROUP="$( id --group --name "$STACK_USER" )"
 
@@ -169,12 +177,23 @@
 # Utility Functions
 # -----------------
 
+function wait_for_db_file {
+    local count=0
+    while [ ! -f $1 ]; do
+        sleep 1
+        count=$((count+1))
+        if [ "$count" -gt 40 ]; then
+            die $LINENO "DB File $1 not found"
+        fi
+    done
+}
+
 function wait_for_sock_file {
     local count=0
     while [ ! -S $1 ]; do
         sleep 1
         count=$((count+1))
-        if [ "$count" -gt 5 ]; then
+        if [ "$count" -gt 40 ]; then
             die $LINENO "Socket $1 not found"
         fi
     done
@@ -231,11 +250,12 @@
     local cmd="$2"
     local stop_cmd="$3"
     local group=$4
-    local user=${5:-$STACK_USER}
+    local user=$5
+    local rundir=${6:-$OVS_RUNDIR}
 
     local systemd_service="devstack@$service.service"
     local unit_file="$SYSTEMD_DIR/$systemd_service"
-    local environment="OVN_RUNDIR=$OVS_RUNDIR OVN_DBDIR=$OVN_DATADIR OVN_LOGDIR=$LOGDIR OVS_RUNDIR=$OVS_RUNDIR OVS_DBDIR=$OVS_DATADIR OVS_LOGDIR=$LOGDIR"
+    local environment="OVN_RUNDIR=$OVN_RUNDIR OVN_DBDIR=$OVN_DATADIR OVN_LOGDIR=$LOGDIR OVS_RUNDIR=$OVS_RUNDIR OVS_DBDIR=$OVS_DATADIR OVS_LOGDIR=$LOGDIR"
 
     echo "Starting $service executed command": $cmd
 
@@ -251,14 +271,14 @@
 
     _start_process $systemd_service
 
-    local testcmd="test -e $OVS_RUNDIR/$service.pid"
+    local testcmd="test -e $rundir/$service.pid"
     test_with_retry "$testcmd" "$service did not start" $SERVICE_TIMEOUT 1
     local service_ctl_file
-    service_ctl_file=$(ls $OVS_RUNDIR | grep $service | grep ctl)
+    service_ctl_file=$(ls $rundir | grep $service | grep ctl)
     if [ -z "$service_ctl_file" ]; then
         die $LINENO "ctl file for service $service is not present."
     fi
-    sudo ovs-appctl -t $OVS_RUNDIR/$service_ctl_file vlog/set console:off syslog:info file:info
+    sudo ovs-appctl -t $rundir/$service_ctl_file vlog/set console:off syslog:info file:info
 }
 
 function clone_repository {
@@ -274,7 +294,7 @@
 function create_public_bridge {
     # Create the public bridge that OVN will use
     sudo ovs-vsctl --may-exist add-br $PUBLIC_BRIDGE -- set bridge $PUBLIC_BRIDGE protocols=OpenFlow13,OpenFlow15
-    sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=$PHYSICAL_NETWORK:$PUBLIC_BRIDGE
+    sudo ovs-vsctl set open . external-ids:ovn-bridge-mappings=${OVN_BRIDGE_MAPPINGS}
     _configure_public_network_connectivity
 }
 
@@ -350,7 +370,7 @@
 
 # OVN service sanity check
 function ovn_sanity_check {
-    if is_service_enabled q-agt neutron-agt; then
+    if is_service_enabled q-agt neutron-agent; then
         die $LINENO "The q-agt/neutron-agt service must be disabled with OVN."
     elif is_service_enabled q-l3 neutron-l3; then
         die $LINENO "The q-l3/neutron-l3 service must be disabled with OVN."
@@ -373,10 +393,6 @@
 
     sudo mkdir -p $OVS_RUNDIR
     sudo chown $(whoami) $OVS_RUNDIR
-    # NOTE(lucasagomes): To keep things simpler, let's reuse the same
-    # RUNDIR for both OVS and OVN. This way we avoid having to specify the
-    # --db option in the ovn-{n,s}bctl commands while playing with DevStack
-    sudo ln -s $OVS_RUNDIR $OVN_RUNDIR
 
     if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then
         # If OVS is already installed, remove it, because we're about to
@@ -400,6 +416,8 @@
         sudo mkdir -p $OVS_PREFIX/var/log/ovn
         sudo chown $(whoami) $OVS_PREFIX/var/log/ovn
     else
+        # Load fixup_ovn_centos
+        source ${TOP_DIR}/tools/fixup_stuff.sh
         fixup_ovn_centos
         install_package $(get_packages openvswitch)
         install_package $(get_packages ovn)
@@ -465,7 +483,7 @@
 function configure_ovn_plugin {
     echo "Configuring Neutron for OVN"
 
-    if is_service_enabled q-svc ; then
+    if is_service_enabled q-svc neutron-api; then
         filter_network_api_extensions
         populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_geneve max_header_size=$OVN_GENEVE_OVERHEAD
         populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_nb_connection="$OVN_NB_REMOTE"
@@ -489,7 +507,9 @@
             inicomment /$Q_PLUGIN_CONF_FILE network_log local_output_log_base="$Q_LOG_DRIVER_LOG_BASE"
         fi
 
-        if is_service_enabled q-ovn-metadata-agent; then
+        if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then
+            populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True
+        elif is_service_enabled q-ovn-agent neutron-ovn-agent && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]]; then
             populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=True
         else
             populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_metadata_enabled=False
@@ -510,7 +530,9 @@
     fi
 
     if is_service_enabled n-api-meta ; then
-        if is_service_enabled q-ovn-metadata-agent ; then
+        if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then
+            iniset $NOVA_CONF neutron service_metadata_proxy True
+        elif is_service_enabled q-ovn-agent neutron-ovn-agent && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]]; then
             iniset $NOVA_CONF neutron service_metadata_proxy True
         fi
     fi
@@ -543,29 +565,42 @@
     fi
 
     # Metadata
-    if is_service_enabled q-ovn-metadata-agent && is_service_enabled ovn-controller; then
+    local sample_file=""
+    local config_file=""
+    if is_service_enabled q-ovn-agent neutron-ovn-agent && [[ "$OVN_AGENT_EXTENSIONS" =~ 'metadata' ]] && is_service_enabled ovn-controller; then
+        sample_file=$NEUTRON_DIR/etc/neutron/plugins/ml2/ovn_agent.ini.sample
+        config_file=$OVN_AGENT_CONF
+    elif is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent && is_service_enabled ovn-controller; then
+        sample_file=$NEUTRON_DIR/etc/neutron_ovn_metadata_agent.ini.sample
+        config_file=$OVN_META_CONF
+    fi
+    if [ -n "$config_file" ]; then
         sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR
 
         mkdir -p $NEUTRON_DIR/etc/neutron/plugins/ml2
         (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh)
 
-        cp $NEUTRON_DIR/etc/neutron_ovn_metadata_agent.ini.sample $OVN_META_CONF
-        configure_root_helper_options $OVN_META_CONF
+        cp $sample_file $config_file
+        configure_root_helper_options $config_file
 
-        iniset $OVN_META_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
-        iniset $OVN_META_CONF DEFAULT nova_metadata_host $OVN_META_DATA_HOST
-        iniset $OVN_META_CONF DEFAULT metadata_workers $API_WORKERS
-        iniset $OVN_META_CONF DEFAULT state_path $NEUTRON_STATE_PATH
-        iniset $OVN_META_CONF ovs ovsdb_connection tcp:$OVSDB_SERVER_LOCAL_HOST:6640
-        iniset $OVN_META_CONF ovn ovn_sb_connection $OVN_SB_REMOTE
+        iniset $config_file DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
+        iniset $config_file DEFAULT nova_metadata_host $OVN_META_DATA_HOST
+        iniset $config_file DEFAULT metadata_workers $API_WORKERS
+        iniset $config_file DEFAULT state_path $DATA_DIR/neutron
+        iniset $config_file ovs ovsdb_connection tcp:$OVSDB_SERVER_LOCAL_HOST:6640
+        iniset $config_file ovn ovn_sb_connection $OVN_SB_REMOTE
         if is_service_enabled tls-proxy; then
-            iniset $OVN_META_CONF ovn \
+            iniset $config_file ovn \
                 ovn_sb_ca_cert $INT_CA_DIR/ca-chain.pem
-            iniset $OVN_META_CONF ovn \
+            iniset $config_file ovn \
                 ovn_sb_certificate $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt
-            iniset $OVN_META_CONF ovn \
+            iniset $config_file ovn \
                 ovn_sb_private_key $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key
         fi
+        if [[ $config_file == $OVN_AGENT_CONF ]]; then
+            iniset $config_file agent extensions $OVN_AGENT_EXTENSIONS
+            iniset $config_file ovn ovn_nb_connection $OVN_NB_REMOTE
+        fi
     fi
 }
 
@@ -591,6 +626,7 @@
     rm -f $OVS_DATADIR/.*.db.~lock~
     sudo rm -f $OVN_DATADIR/*.db
     sudo rm -f $OVN_DATADIR/.*.db.~lock~
+    sudo rm -f $OVN_RUNDIR/*.sock
 }
 
 function _start_ovs {
@@ -617,12 +653,12 @@
                 dbcmd+=" --remote=db:hardware_vtep,Global,managers $OVS_DATADIR/vtep.db"
             fi
             dbcmd+=" $OVS_DATADIR/conf.db"
-            _run_process ovsdb-server "$dbcmd"
+            _run_process ovsdb-server "$dbcmd" "" "$STACK_GROUP" "root" "$OVS_RUNDIR"
 
             # Note: ovn-controller will create and configure br-int once it is started.
             # So, no need to create it now because nothing depends on that bridge here.
             local ovscmd="$OVS_SBINDIR/ovs-vswitchd --log-file --pidfile --detach"
-            _run_process ovs-vswitchd "$ovscmd" "" "$STACK_GROUP" "root"
+            _run_process ovs-vswitchd "$ovscmd" "" "$STACK_GROUP" "root" "$OVS_RUNDIR"
         else
             _start_process "$OVSDB_SERVER_SERVICE"
             _start_process "$OVS_VSWITCHD_SERVICE"
@@ -642,8 +678,8 @@
         sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-remote="$OVN_SB_REMOTE"
         sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-bridge="br-int"
         sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-type="geneve"
-        sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-ip="$HOST_IP"
-        sudo ovs-vsctl --no-wait set open_vswitch . external-ids:hostname="$LOCAL_HOSTNAME"
+        sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-encap-ip="$TUNNEL_IP"
+        sudo ovs-vsctl --no-wait set open_vswitch . external-ids:hostname=$(hostname)
         # Select this chassis to host gateway routers
         if [[ "$ENABLE_CHASSIS_AS_GW" == "True" ]]; then
             sudo ovs-vsctl --no-wait set open_vswitch . external-ids:ovn-cms-options="enable-chassis-as-gw"
@@ -657,11 +693,11 @@
         if is_service_enabled ovn-controller-vtep ; then
             ovn_base_setup_bridge br-v
             vtep-ctl add-ps br-v
-            vtep-ctl set Physical_Switch br-v tunnel_ips=$HOST_IP
+            vtep-ctl set Physical_Switch br-v tunnel_ips=$TUNNEL_IP
 
             enable_service ovs-vtep
             local vtepcmd="$OVS_SCRIPTDIR/ovs-vtep --log-file --pidfile --detach br-v"
-            _run_process ovs-vtep "$vtepcmd" "" "$STACK_GROUP" "root"
+            _run_process ovs-vtep "$vtepcmd" "" "$STACK_GROUP" "root" "$OVS_RUNDIR"
 
             vtep-ctl set-manager tcp:$HOST_IP:6640
         fi
@@ -684,9 +720,12 @@
     if is_service_enabled ovs-vtep ; then
         _start_process "devstack@ovs-vtep.service"
     fi
-    if is_service_enabled q-ovn-metadata-agent; then
+    if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent ; then
         _start_process "devstack@q-ovn-metadata-agent.service"
     fi
+    if is_service_enabled q-ovn-agent neutron-ovn-agent ; then
+        _start_process "devstack@q-ovn-agent.service"
+    fi
 }
 
 # start_ovn() - Start running processes, including screen
@@ -705,23 +744,26 @@
             local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_northd"
             local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_northd"
 
-            _run_process ovn-northd "$cmd" "$stop_cmd"
+            _run_process ovn-northd "$cmd" "$stop_cmd" "$STACK_GROUP" "root" "$OVN_RUNDIR"
         else
             _start_process "$OVN_NORTHD_SERVICE"
         fi
 
         # Wait for the service to be ready
-        wait_for_sock_file $OVS_RUNDIR/ovnnb_db.sock
-        wait_for_sock_file $OVS_RUNDIR/ovnsb_db.sock
+        # Check for socket and db files for both OVN NB and SB
+        wait_for_sock_file $OVN_RUNDIR/ovnnb_db.sock
+        wait_for_sock_file $OVN_RUNDIR/ovnsb_db.sock
+        wait_for_db_file $OVN_DATADIR/ovnnb_db.db
+        wait_for_db_file $OVN_DATADIR/ovnsb_db.db
 
         if is_service_enabled tls-proxy; then
-            sudo ovn-nbctl --db=unix:$OVS_RUNDIR/ovnnb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem
-            sudo ovn-sbctl --db=unix:$OVS_RUNDIR/ovnsb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem
+            sudo ovn-nbctl --db=unix:$OVN_RUNDIR/ovnnb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem
+            sudo ovn-sbctl --db=unix:$OVN_RUNDIR/ovnsb_db.sock set-ssl $INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key $INT_CA_DIR/$DEVSTACK_CERT_NAME.crt $INT_CA_DIR/ca-chain.pem
         fi
-        sudo ovn-nbctl --db=unix:$OVS_RUNDIR/ovnnb_db.sock set-connection p${OVN_PROTO}:6641:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000
-        sudo ovn-sbctl --db=unix:$OVS_RUNDIR/ovnsb_db.sock set-connection p${OVN_PROTO}:6642:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000
-        sudo ovs-appctl -t $OVS_RUNDIR/ovnnb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL
-        sudo ovs-appctl -t $OVS_RUNDIR/ovnsb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL
+        sudo ovn-nbctl --db=unix:$OVN_RUNDIR/ovnnb_db.sock set-connection p${OVN_PROTO}:6641:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000
+        sudo ovn-sbctl --db=unix:$OVN_RUNDIR/ovnsb_db.sock set-connection p${OVN_PROTO}:6642:$SERVICE_LISTEN_ADDRESS -- set connection . inactivity_probe=60000
+        sudo ovs-appctl -t $OVN_RUNDIR/ovnnb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL
+        sudo ovs-appctl -t $OVN_RUNDIR/ovnsb_db.ctl vlog/set console:off syslog:$OVN_DBS_LOG_LEVEL file:$OVN_DBS_LOG_LEVEL
     fi
 
     if is_service_enabled ovn-controller ; then
@@ -729,7 +771,7 @@
             local cmd="/bin/bash $SCRIPTDIR/ovn-ctl --no-monitor start_controller"
             local stop_cmd="/bin/bash $SCRIPTDIR/ovn-ctl stop_controller"
 
-            _run_process ovn-controller "$cmd" "$stop_cmd" "$STACK_GROUP" "root"
+            _run_process ovn-controller "$cmd" "$stop_cmd" "$STACK_GROUP" "root" "$OVN_RUNDIR"
         else
             _start_process "$OVN_CONTROLLER_SERVICE"
         fi
@@ -738,18 +780,24 @@
     if is_service_enabled ovn-controller-vtep ; then
         if [[ "$OVN_BUILD_FROM_SOURCE" == "True" ]]; then
             local cmd="$OVS_BINDIR/ovn-controller-vtep --log-file --pidfile --detach --ovnsb-db=$OVN_SB_REMOTE"
-            _run_process ovn-controller-vtep "$cmd" "" "$STACK_GROUP" "root"
+            _run_process ovn-controller-vtep "$cmd" "" "$STACK_GROUP" "root" "$OVN_RUNDIR"
         else
             _start_process "$OVN_CONTROLLER_VTEP_SERVICE"
         fi
     fi
 
-    if is_service_enabled q-ovn-metadata-agent; then
+    if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then
         run_process q-ovn-metadata-agent "$NEUTRON_OVN_BIN_DIR/$NEUTRON_OVN_METADATA_BINARY --config-file $OVN_META_CONF"
         # Format logging
         setup_logging $OVN_META_CONF
     fi
 
+    if is_service_enabled q-ovn-agent neutron-ovn-agent; then
+        run_process q-ovn-agent "$NEUTRON_OVN_BIN_DIR/$NEUTRON_OVN_AGENT_BINARY --config-file $OVN_AGENT_CONF"
+        # Format logging
+        setup_logging $OVN_AGENT_CONF
+    fi
+
     _start_ovn_services
 }
 
@@ -768,10 +816,18 @@
 }
 
 function stop_ovn {
-    if is_service_enabled q-ovn-metadata-agent; then
-        sudo pkill -9 -f haproxy || :
+    if is_service_enabled q-ovn-metadata-agent neutron-ovn-metadata-agent; then
+        # pkill takes care not to kill itself, but it may kill its parent
+        # sudo unless we use the "ps | grep [f]oo" trick
+        sudo pkill -9 -f "[h]aproxy" || :
         _stop_process "devstack@q-ovn-metadata-agent.service"
     fi
+    if is_service_enabled q-ovn-agent neutron-ovn-agent; then
+        # pkill takes care not to kill itself, but it may kill its parent
+        # sudo unless we use the "ps | grep [f]oo" trick
+        sudo pkill -9 -f "[h]aproxy" || :
+        _stop_process "devstack@q-ovn-agent.service"
+    fi
     if is_service_enabled ovn-controller-vtep ; then
         _stop_process "$OVN_CONTROLLER_VTEP_SERVICE"
     fi
@@ -814,5 +870,5 @@
         _cleanup $ovs_path
     fi
 
-    sudo rm -f $OVN_RUNDIR
+    sudo rm -rf $OVN_RUNDIR
 }
diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base
index cc41a8c..adabc56 100644
--- a/lib/neutron_plugins/ovs_base
+++ b/lib/neutron_plugins/ovs_base
@@ -80,19 +80,6 @@
         elif is_fedora; then
             restart_service openvswitch
             sudo systemctl enable openvswitch
-        elif is_suse; then
-            if [[ $DISTRO == "sle12" ]] && vercmp "$os_RELEASE" "<" "12.2" ; then
-                restart_service openvswitch-switch
-            else
-                # workaround for https://bugzilla.suse.com/show_bug.cgi?id=1085971
-                if [[ $DISTRO =~ "tumbleweed" ]]; then
-                    sudo sed -i -e "s,^OVS_USER_ID=.*,OVS_USER_ID='root:root'," /etc/sysconfig/openvswitch
-                fi
-                restart_service openvswitch || {
-                    journalctl -xe || :
-                    systemctl status openvswitch
-                }
-            fi
         fi
     fi
 }
diff --git a/lib/neutron_plugins/ovs_source b/lib/neutron_plugins/ovs_source
index 9ae5555..75e7d7c 100644
--- a/lib/neutron_plugins/ovs_source
+++ b/lib/neutron_plugins/ovs_source
@@ -33,9 +33,9 @@
     local fatal=$2
 
     if [ "$(trueorfalse True fatal)" == "True" ]; then
-        sudo modprobe $module || (dmesg && die $LINENO "FAILED TO LOAD $module")
+        sudo modprobe $module || (sudo dmesg && die $LINENO "FAILED TO LOAD $module")
     else
-        sudo modprobe $module || (echo "FAILED TO LOAD $module" && dmesg)
+        sudo modprobe $module || (echo "FAILED TO LOAD $module" && sudo dmesg)
     fi
 }
 
@@ -87,9 +87,15 @@
 
         install_package kernel-devel-$KERNEL_VERSION
         install_package kernel-headers-$KERNEL_VERSION
+        if is_service_enabled tls-proxy; then
+            install_package openssl-devel
+        fi
 
     elif is_ubuntu ; then
         install_package linux-headers-$KERNEL_VERSION
+        if is_service_enabled tls-proxy; then
+            install_package libssl-dev
+        fi
     fi
 }
 
@@ -97,7 +103,7 @@
 function load_ovs_kernel_modules {
     load_module openvswitch
     load_module vport-geneve False
-    dmesg | tail
+    sudo dmesg | tail
 }
 
 # reload_ovs_kernel_modules() - reload openvswitch kernel module
@@ -158,10 +164,8 @@
     sudo make install
     if [[ "$build_modules" == "True" ]]; then
         sudo make INSTALL_MOD_DIR=kernel/net/openvswitch modules_install
-        reload_ovs_kernel_modules
-    else
-        load_ovs_kernel_modules
     fi
+    reload_ovs_kernel_modules
 
     cd $_pwd
 }
@@ -176,12 +180,6 @@
         ${action}_service openvswitch-switch
     elif is_fedora; then
         ${action}_service openvswitch
-    elif is_suse; then
-        if [[ $DISTRO == "sle12" ]] && [[ $os_RELEASE -lt 12.2 ]]; then
-            ${action}_service openvswitch-switch
-        else
-            ${action}_service openvswitch
-        fi
     fi
 }
 
diff --git a/lib/neutron_plugins/services/l3 b/lib/neutron_plugins/services/l3
index c0d74c7..c6d4663 100644
--- a/lib/neutron_plugins/services/l3
+++ b/lib/neutron_plugins/services/l3
@@ -47,7 +47,8 @@
 # used for the network.  In case of ofagent, you should add the
 # corresponding entry to your OFAGENT_PHYSICAL_INTERFACE_MAPPINGS.
 # For openvswitch agent, you should add the corresponding entry to
-# your OVS_BRIDGE_MAPPINGS.
+# your OVS_BRIDGE_MAPPINGS and for OVN add the corresponding entry
+# to your OVN_BRIDGE_MAPPINGS.
 #
 # eg.  (ofagent)
 #    Q_USE_PROVIDERNET_FOR_PUBLIC=True
@@ -60,6 +61,11 @@
 #    PUBLIC_PHYSICAL_NETWORK=public
 #    OVS_BRIDGE_MAPPINGS=public:br-ex
 #
+# eg.  (ovn agent)
+#    Q_USER_PROVIDERNET_FOR_PUBLIC=True
+#    PUBLIC_PHYSICAL_NETWORK=public
+#    OVN_BRIDGE_MAPPINGS=public:br-ex
+#
 # The provider-network-type defaults to flat, however, the values
 # PUBLIC_PROVIDERNET_TYPE and PUBLIC_PROVIDERNET_SEGMENTATION_ID could
 # be set to specify the parameters for an alternate network type.
@@ -166,14 +172,14 @@
     if is_provider_network; then
         die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK"
         die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specify the PROVIDER_NETWORK_TYPE"
-        NET_ID=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share | grep ' id ' | get_field 2)
+        NET_ID=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" network create $PHYSICAL_NETWORK --provider-network-type $PROVIDER_NETWORK_TYPE --provider-physical-network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider-segment $SEGMENTATION_ID} --share -f value -c id)
         die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK"
 
         if [[ "$IP_VERSION" =~ 4.* ]]; then
             if [ -z $SUBNETPOOL_V4_ID ]; then
                 fixed_range_v4=$FIXED_RANGE
             fi
-            SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} | grep ' id ' | get_field 2)
+            SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY ${SUBNETPOOL_V4_ID:+--subnet-pool $SUBNETPOOL_V4_ID} --network $NET_ID ${fixed_range_v4:+--subnet-range $fixed_range_v4} -f value -c id)
             die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME"
         fi
 
@@ -183,7 +189,7 @@
             if [ -z $SUBNETPOOL_V6_ID ]; then
                 fixed_range_v6=$IPV6_PROVIDER_FIXED_RANGE
             fi
-            IPV6_SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} | grep ' id ' | get_field 2)
+            IPV6_SUBNET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" subnet create --ip-version 6 --gateway $IPV6_PROVIDER_NETWORK_GATEWAY $IPV6_PROVIDER_SUBNET_NAME ${SUBNETPOOL_V6_ID:+--subnet-pool $SUBNETPOOL_V6_ID} --network $NET_ID ${fixed_range_v6:+--subnet-range $fixed_range_v6} -f value -c id)
             die_if_not_set $LINENO IPV6_SUBNET_ID "Failure creating IPV6_SUBNET_ID for $IPV6_PROVIDER_SUBNET_NAME"
         fi
 
@@ -193,7 +199,7 @@
             sudo ip link set $PUBLIC_INTERFACE up
         fi
     else
-        NET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" network create "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2)
+        NET_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" network create "$PRIVATE_NETWORK_NAME" -f value -c id)
         die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME"
 
         if [[ "$IP_VERSION" =~ 4.* ]]; then
@@ -211,11 +217,11 @@
         # Create a router, and add the private subnet as one of its interfaces
         if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then
             # create a tenant-owned router.
-            ROUTER_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" router create $Q_ROUTER_NAME | grep ' id ' | get_field 2)
+            ROUTER_ID=$(openstack --os-cloud devstack --os-region "$REGION_NAME" router create $Q_ROUTER_NAME -f value -c id)
             die_if_not_set $LINENO ROUTER_ID "Failure creating router $Q_ROUTER_NAME"
         else
             # Plugin only supports creating a single router, which should be admin owned.
-            ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create $Q_ROUTER_NAME | grep ' id ' | get_field 2)
+            ROUTER_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" router create $Q_ROUTER_NAME -f value -c id)
             die_if_not_set $LINENO ROUTER_ID "Failure creating router $Q_ROUTER_NAME"
         fi
 
@@ -225,9 +231,9 @@
         fi
         # Create an external network, and a subnet. Configure the external network as router gw
         if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then
-            EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type ${PUBLIC_PROVIDERNET_TYPE:-flat} ${PUBLIC_PROVIDERNET_SEGMENTATION_ID:+--provider-segment $PUBLIC_PROVIDERNET_SEGMENTATION_ID} --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} | grep ' id ' | get_field 2)
+            EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS --provider-network-type ${PUBLIC_PROVIDERNET_TYPE:-flat} ${PUBLIC_PROVIDERNET_SEGMENTATION_ID:+--provider-segment $PUBLIC_PROVIDERNET_SEGMENTATION_ID} --provider-physical-network ${PUBLIC_PHYSICAL_NETWORK} -f value -c id)
         else
-            EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS | grep ' id ' | get_field 2)
+            EXT_NET_ID=$(openstack --os-cloud devstack-admin --os-region "$REGION_NAME" network create "$PUBLIC_NETWORK_NAME" $EXTERNAL_NETWORK_FLAGS -f value -c id)
         fi
         die_if_not_set $LINENO EXT_NET_ID "Failure creating EXT_NET_ID for $PUBLIC_NETWORK_NAME"
 
@@ -257,7 +263,7 @@
     subnet_params+="${fixed_range_v4:+--subnet-range $fixed_range_v4} "
     subnet_params+="--network $NET_ID $PRIVATE_SUBNET_NAME"
     local subnet_id
-    subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2)
+    subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params -f value -c id)
     die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet"
     echo $subnet_id
 }
@@ -278,7 +284,7 @@
     subnet_params+="${fixed_range_v6:+--subnet-range $fixed_range_v6} "
     subnet_params+="$ipv6_modes --network $NET_ID $IPV6_PRIVATE_SUBNET_NAME "
     local ipv6_subnet_id
-    ipv6_subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params | grep ' id ' | get_field 2)
+    ipv6_subnet_id=$(openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" subnet create $subnet_params -f value -c id)
     die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet"
     echo $ipv6_subnet_id
 }
@@ -323,7 +329,7 @@
     openstack --os-cloud devstack-admin-demo --os-region "$REGION_NAME" router set --external-gateway $EXT_NET_ID $ROUTER_ID
 
     # This logic is specific to using OVN or the l3-agent for layer 3
-    if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-server) || is_service_enabled q-l3 neutron-l3; then
+    if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-api) || is_service_enabled q-l3 neutron-l3; then
         # Configure and enable public bridge
         local ext_gw_interface="none"
         if is_neutron_ovs_base_plugin; then
@@ -372,7 +378,7 @@
     fi
 
     # This logic is specific to using OVN or the l3-agent for layer 3
-    if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-server) || is_service_enabled q-l3 neutron-l3; then
+    if ([[ $Q_AGENT == "ovn" ]] && [[ "$OVN_L3_CREATE_PUBLIC_NETWORK" == "True" ]] && is_service_enabled q-svc neutron-api) || is_service_enabled q-l3 neutron-l3; then
         # if the Linux host considers itself to be a router then it will
         # ignore all router advertisements
         # Ensure IPv6 RAs are accepted on interfaces with a default route.
@@ -403,7 +409,10 @@
             ext_gw_interface=$(_neutron_get_ext_gw_interface)
             local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/}
 
-            # Configure interface for public bridge
+            # Configure interface for public bridge by setting the interface
+            # to "up" in case the job is running entirely private network based
+            # testing.
+            sudo ip link set $ext_gw_interface up
             sudo ip -6 addr replace $ipv6_ext_gw_ip/$ipv6_cidr_len dev $ext_gw_interface
             # Any IPv6 private subnet that uses the default IPV6 subnet pool
             # and that is plugged into the default router (Q_ROUTER_NAME) will
diff --git a/lib/neutron_plugins/services/metering b/lib/neutron_plugins/services/metering
index 5b32468..757a562 100644
--- a/lib/neutron_plugins/services/metering
+++ b/lib/neutron_plugins/services/metering
@@ -12,7 +12,7 @@
 METERING_PLUGIN="neutron.services.metering.metering_plugin.MeteringPlugin"
 
 function neutron_agent_metering_configure_common {
-    _neutron_service_plugin_class_add $METERING_PLUGIN
+    neutron_service_plugin_class_add $METERING_PLUGIN
 }
 
 function neutron_agent_metering_configure_agent {
diff --git a/lib/neutron_plugins/services/qos b/lib/neutron_plugins/services/qos
index af9eb3d..c11c315 100644
--- a/lib/neutron_plugins/services/qos
+++ b/lib/neutron_plugins/services/qos
@@ -6,7 +6,7 @@
 
 
 function configure_qos_core_plugin {
-    configure_qos_$NEUTRON_CORE_PLUGIN
+    configure_qos_$Q_PLUGIN
 }
 
 
diff --git a/lib/neutron_plugins/services/segments b/lib/neutron_plugins/services/segments
new file mode 100644
index 0000000..08936ba
--- /dev/null
+++ b/lib/neutron_plugins/services/segments
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+function configure_segments_service_plugin {
+    neutron_service_plugin_class_add segments
+}
+
+function configure_segments_extension {
+    configure_segments_service_plugin
+}
+
diff --git a/lib/nova b/lib/nova
index 4c14374..35c6893 100644
--- a/lib/nova
+++ b/lib/nova
@@ -53,11 +53,19 @@
 NOVA_CPU_CONF=$NOVA_CONF_DIR/nova-cpu.conf
 NOVA_FAKE_CONF=$NOVA_CONF_DIR/nova-fake.conf
 NOVA_API_DB=${NOVA_API_DB:-nova_api}
-NOVA_UWSGI=$NOVA_BIN_DIR/nova-api-wsgi
-NOVA_METADATA_UWSGI=$NOVA_BIN_DIR/nova-metadata-wsgi
+NOVA_UWSGI=nova.wsgi.osapi_compute:application
+NOVA_METADATA_UWSGI=nova.wsgi.metadata:application
 NOVA_UWSGI_CONF=$NOVA_CONF_DIR/nova-api-uwsgi.ini
 NOVA_METADATA_UWSGI_CONF=$NOVA_CONF_DIR/nova-metadata-uwsgi.ini
 
+# Allow forcing the stable compute uuid to something specific. This would be
+# done by deployment tools that pre-allocate the UUIDs, but it is also handy
+# for developers that need to re-stack a compute-only deployment multiple
+# times. Since the DB is non-local and not erased on an unstack, making it
+# stay the same each time is what developers want. Set to a uuid here or
+# leave it blank for default allocate-on-start behavior.
+NOVA_CPU_UUID=""
+
 # The total number of cells we expect. Must be greater than one and doesn't
 # count cell0.
 NOVA_NUM_CELLS=${NOVA_NUM_CELLS:-1}
@@ -75,6 +83,11 @@
 # mean "use uwsgi" because we'll be always using uwsgi.
 NOVA_USE_MOD_WSGI=${NOVA_USE_MOD_WSGI:-True}
 
+# We do not need to report service status every 10s for devstack-like
+# deployments. In the gate this generates extra work for the services and the
+# database which are already taxed.
+NOVA_SERVICE_REPORT_INTERVAL=${NOVA_SERVICE_REPORT_INTERVAL:-120}
+
 if is_service_enabled tls-proxy; then
     NOVA_SERVICE_PROTOCOL="https"
 fi
@@ -97,30 +110,28 @@
 METADATA_SERVICE_PORT=${METADATA_SERVICE_PORT:-8775}
 NOVA_ENABLE_CACHE=${NOVA_ENABLE_CACHE:-True}
 
+# Flag to set the oslo_policy.enforce_scope and oslo_policy.enforce_new_defaults.
+# This is used to disable the compute API policies scope and new defaults.
+# By Default, it is True.
+# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope
+NOVA_ENFORCE_SCOPE=$(trueorfalse True NOVA_ENFORCE_SCOPE)
+
+if [[ $SERVICE_IP_VERSION == 6 ]]; then
+    NOVA_MY_IP="$HOST_IPV6"
+else
+    NOVA_MY_IP="$HOST_IP"
+fi
+
 # Option to enable/disable config drive
 # NOTE: Set ``FORCE_CONFIG_DRIVE="False"`` to turn OFF config drive
 FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"False"}
 
 # The following NOVA_FILTERS contains SameHostFilter and DifferentHostFilter with
 # the default filters.
-NOVA_FILTERS="AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter"
+NOVA_FILTERS="ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,SameHostFilter,DifferentHostFilter"
 
 QEMU_CONF=/etc/libvirt/qemu.conf
 
-# Set default defaults here as some hypervisor drivers override these
-PUBLIC_INTERFACE_DEFAULT=br100
-# Set ``GUEST_INTERFACE_DEFAULT`` to some interface on the box so that
-# the default isn't completely crazy. This will match ``eth*``, ``em*``, or
-# the new ``p*`` interfaces, then basically picks the first
-# alphabetically. It's probably wrong, however it's less wrong than
-# always using ``eth0`` which doesn't exist on new Linux distros at all.
-GUEST_INTERFACE_DEFAULT=$(ip link \
-    | grep 'state UP' \
-    | awk '{print $2}' \
-    | sed 's/://' \
-    | grep ^[ep] \
-    | head -1)
-
 # ``NOVA_VNC_ENABLED`` can be used to forcibly enable VNC configuration.
 # In multi-node setups allows compute hosts to not run ``n-novnc``.
 NOVA_VNC_ENABLED=$(trueorfalse False NOVA_VNC_ENABLED)
@@ -162,6 +173,9 @@
 # Whether to use Keystone unified limits instead of legacy quota limits.
 NOVA_USE_UNIFIED_LIMITS=$(trueorfalse False NOVA_USE_UNIFIED_LIMITS)
 
+# TB Cache Size in MiB for qemu guests
+NOVA_LIBVIRT_TB_CACHE_SIZE=${NOVA_LIBVIRT_TB_CACHE_SIZE:-0}
+
 # Functions
 # ---------
 
@@ -219,6 +233,9 @@
         done
         sudo iscsiadm --mode node --op delete || true
 
+        # Disconnect all nvmeof connections
+        sudo nvme disconnect-all || true
+
         # Clean out the instances directory.
         sudo rm -rf $NOVA_INSTANCES_PATH/*
     fi
@@ -234,8 +251,8 @@
 
     stop_process "n-api"
     stop_process "n-api-meta"
-    remove_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI"
-    remove_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI"
+    remove_uwsgi_config "$NOVA_UWSGI_CONF" "nova-api"
+    remove_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "nova-metadata"
 
     if [[ "$NOVA_BACKEND" == "LVM" ]]; then
         clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME
@@ -306,6 +323,7 @@
             fi
         fi
 
+        # Due to cinder bug #1966513 we ALWAYS need an initiator name for LVM
         # Ensure each compute host uses a unique iSCSI initiator
         echo InitiatorName=$(iscsi-iname) | sudo tee /etc/iscsi/initiatorname.iscsi
 
@@ -324,14 +342,30 @@
 
         # set chap algorithms.  The default chap_algorithm is md5 which will
         # not work under FIPS.
-        # FIXME(alee) For some reason, this breaks openeuler.  Openeuler devs should weigh in
-        # and determine the correct solution for openeuler here
-        if ! is_openeuler; then
-            iniset -sudo /etc/iscsi/iscsid.conf DEFAULT "node.session.auth.chap_algs" "SHA3-256,SHA256"
-        fi
+        iniset -sudo /etc/iscsi/iscsid.conf DEFAULT "node.session.auth.chap_algs" "SHA3-256,SHA256"
 
-        # ensure that iscsid is started, even when disabled by default
-        restart_service iscsid
+        if [[ $CINDER_TARGET_HELPER != 'nvmet' ]]; then
+            # ensure that iscsid is started, even when disabled by default
+            restart_service iscsid
+
+        # For NVMe-oF we need different packages that many not be present
+        else
+            install_package nvme-cli
+            sudo modprobe nvme-fabrics
+
+            # Ensure NVMe is ready and create the Soft-RoCE device over the networking interface
+            if [[ $CINDER_TARGET_PROTOCOL == 'nvmet_rdma' ]]; then
+                sudo modprobe nvme-rdma
+                iface=${HOST_IP_IFACE:-`ip -br -$SERVICE_IP_VERSION a | grep $NOVA_MY_IP | awk '{print $1}'`}
+                if ! sudo rdma link | grep $iface ; then
+                    sudo rdma link add rxe_$iface type rxe netdev $iface
+                fi
+            elif [[ $CINDER_TARGET_PROTOCOL == 'nvmet_tcp' ]]; then
+                sudo modprobe nvme-tcp
+            else  # 'nvmet_fc'
+                sudo modprobe nvme-fc
+            fi
+        fi
     fi
 
     # Rebuild the config file from scratch
@@ -422,11 +456,7 @@
     iniset $NOVA_CONF filter_scheduler enabled_filters "$NOVA_FILTERS"
     iniset $NOVA_CONF scheduler workers "$API_WORKERS"
     iniset $NOVA_CONF neutron default_floating_pool "$PUBLIC_NETWORK_NAME"
-    if [[ $SERVICE_IP_VERSION == 6 ]]; then
-        iniset $NOVA_CONF DEFAULT my_ip "$HOST_IPV6"
-    else
-        iniset $NOVA_CONF DEFAULT my_ip "$HOST_IP"
-    fi
+    iniset $NOVA_CONF DEFAULT my_ip "$NOVA_MY_IP"
     iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x"
     iniset $NOVA_CONF DEFAULT osapi_compute_listen "$NOVA_SERVICE_LISTEN_ADDRESS"
     iniset $NOVA_CONF DEFAULT metadata_listen "$NOVA_SERVICE_LISTEN_ADDRESS"
@@ -434,8 +464,11 @@
 
     iniset $NOVA_CONF key_manager backend nova.keymgr.conf_key_mgr.ConfKeyManager
 
-    if is_fedora || is_suse; then
-        # nova defaults to /usr/local/bin, but fedora and suse pip like to
+    iniset $NOVA_CONF DEFAULT report_interval $NOVA_SERVICE_REPORT_INTERVAL
+    iniset $NOVA_CONF DEFAULT service_down_time $(($NOVA_SERVICE_REPORT_INTERVAL * 6))
+
+    if is_fedora; then
+        # nova defaults to /usr/local/bin, but fedora pip like to
         # install things in /usr/bin
         iniset $NOVA_CONF DEFAULT bindir "/usr/bin"
     fi
@@ -473,6 +506,13 @@
             NOVA_ENABLED_APIS=$(echo $NOVA_ENABLED_APIS | sed "s/,metadata//")
         fi
         iniset $NOVA_CONF DEFAULT enabled_apis "$NOVA_ENABLED_APIS"
+        if [[ "$NOVA_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then
+            iniset $NOVA_CONF oslo_policy enforce_new_defaults True
+            iniset $NOVA_CONF oslo_policy enforce_scope True
+        else
+            iniset $NOVA_CONF oslo_policy enforce_new_defaults False
+            iniset $NOVA_CONF oslo_policy enforce_scope False
+        fi
         if is_service_enabled tls-proxy && [ "$NOVA_USE_MOD_WSGI" == "False" ]; then
             # Set the service port for a proxy to take the original
             iniset $NOVA_CONF DEFAULT osapi_compute_listen_port "$NOVA_SERVICE_PORT_INT"
@@ -502,7 +542,7 @@
 
     # nova defaults to genisoimage but only mkisofs is available for 15.0+
     # rhel provides mkisofs symlink to genisoimage or xorriso appropiately
-    if is_suse || is_fedora; then
+    if is_fedora; then
         iniset $NOVA_CONF DEFAULT mkisofs_cmd /usr/bin/mkisofs
     fi
 
@@ -512,11 +552,11 @@
     iniset $NOVA_CONF upgrade_levels compute "auto"
 
     if is_service_enabled n-api; then
-        write_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" "/compute"
+        write_uwsgi_config "$NOVA_UWSGI_CONF" "$NOVA_UWSGI" "/compute" "" "nova-api"
     fi
 
     if is_service_enabled n-api-meta; then
-        write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" "$SERVICE_LISTEN_ADDRESS:${METADATA_SERVICE_PORT}"
+        write_uwsgi_config "$NOVA_METADATA_UWSGI_CONF" "$NOVA_METADATA_UWSGI" "" "$SERVICE_LISTEN_ADDRESS:${METADATA_SERVICE_PORT}" "nova-metadata"
     fi
 
     if is_service_enabled ceilometer; then
@@ -600,7 +640,7 @@
     local conf=${1:-$NOVA_CONF}
     iniset $conf placement auth_type "password"
     iniset $conf placement auth_url "$KEYSTONE_SERVICE_URI"
-    iniset $conf placement username placement
+    iniset $conf placement username nova
     iniset $conf placement password "$SERVICE_PASSWORD"
     iniset $conf placement user_domain_name "$SERVICE_DOMAIN_NAME"
     iniset $conf placement project_name "$SERVICE_TENANT_NAME"
@@ -903,8 +943,23 @@
         # a websockets/html5 or flash powered VNC console for vm instances
         NOVNC_FROM_PACKAGE=$(trueorfalse False NOVNC_FROM_PACKAGE)
         if [ "$NOVNC_FROM_PACKAGE" = "True" ]; then
+            # Installing novnc on Debian bullseye breaks the global pip
+            # install. This happens because novnc pulls in distro cryptography
+            # which will be prefered by distro pip, but if anything has
+            # installed pyOpenSSL from pypi (keystone) that is not compatible
+            # with distro cryptography. Fix this by installing
+            # python3-openssl (pyOpenSSL) from the distro which pip will prefer
+            # on Debian. Ubuntu has inverse problems so we only do this for
+            # Debian.
+            local novnc_packages
+            novnc_packages="novnc"
+            GetOSVersion
+            if [[ "$os_VENDOR" = "Debian" ]] ; then
+                novnc_packages="$novnc_packages python3-openssl"
+            fi
+
             NOVNC_WEB_DIR=/usr/share/novnc
-            install_package novnc
+            install_package $novnc_packages
         else
             NOVNC_WEB_DIR=$DEST/novnc
             git_clone $NOVNC_REPO $NOVNC_WEB_DIR $NOVNC_BRANCH
@@ -1000,6 +1055,10 @@
     # by the compute process.
     configure_console_compute
 
+    # Set rebuild timeout longer for BFV instances because we likely have
+    # slower disk than expected. Default is 20s/GB
+    iniset $NOVA_CPU_CONF DEFAULT reimage_timeout_per_gb 180
+
     # Configure the OVSDB connection for os-vif
     if [ -n "$OVSDB_SERVER_LOCAL_HOST" ]; then
         iniset $NOVA_CPU_CONF os_vif_ovs ovsdb_connection "tcp:$OVSDB_SERVER_LOCAL_HOST:6640"
@@ -1010,7 +1069,14 @@
         iniset $NOVA_CPU_CONF workarounds libvirt_disable_apic True
     fi
 
+    if [[ "$NOVA_CPU_UUID" ]]; then
+        echo -n $NOVA_CPU_UUID > $NOVA_CONF_DIR/compute_id
+    fi
+
     if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then
+        if [ ${NOVA_LIBVIRT_TB_CACHE_SIZE} -gt 0 ]; then
+            iniset $NOVA_CPU_CONF libvirt tb_cache_size ${NOVA_LIBVIRT_TB_CACHE_SIZE}
+        fi
         # The group **$LIBVIRT_GROUP** is added to the current user in this script.
         # ``sg`` is used in run_process to execute nova-compute as a member of the
         # **$LIBVIRT_GROUP** group.
diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt
index 3e7d280..ba2e98e 100644
--- a/lib/nova_plugins/functions-libvirt
+++ b/lib/nova_plugins/functions-libvirt
@@ -69,12 +69,12 @@
             $REQUIREMENTS_DIR/upper-constraints.txt -- libvirt-python
 
     if is_ubuntu; then
-        install_package qemu-system libvirt-clients libvirt-daemon-system libvirt-dev python3-libvirt
+        install_package qemu-system libvirt-clients libvirt-daemon-system libvirt-dev python3-libvirt systemd-coredump
         if is_arch "aarch64"; then
             install_package qemu-efi
         fi
         #pip_install_gr <there-si-no-guestfs-in-pypi>
-    elif is_fedora || is_suse; then
+    elif is_fedora; then
 
         # Optionally enable the virt-preview repo when on Fedora
         if [[ $DISTRO =~ f[0-9][0-9] ]] && [[ ${ENABLE_FEDORA_VIRT_PREVIEW_REPO} == "True" ]]; then
@@ -82,11 +82,17 @@
             sudo dnf copr enable -y @virtmaint-sig/virt-preview
         fi
 
+        if is_openeuler; then
+            qemu_package=qemu
+        else
+            qemu_package=qemu-kvm
+        fi
+
         # Note that in CentOS/RHEL this needs to come from the RDO
         # repositories (qemu-kvm-ev ... which provides this package)
         # as the base system version is too old.  We should have
         # pre-installed these
-        install_package qemu-kvm
+        install_package $qemu_package
         install_package libvirt libvirt-devel python3-libvirt
 
         if is_arch "aarch64"; then
@@ -115,8 +121,8 @@
 EOF
     fi
 
-    if is_fedora || is_suse; then
-        # Starting with fedora 18 and opensuse-12.3 enable stack-user to
+    if is_fedora; then
+        # Starting with fedora 18 enable stack-user to
         # virsh -c qemu:///system by creating a policy-kit rule for
         # stack-user using the new Javascript syntax
         rules_dir=/etc/polkit-1/rules.d
diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic
index f058e9b..9a39c79 100644
--- a/lib/nova_plugins/hypervisor-ironic
+++ b/lib/nova_plugins/hypervisor-ironic
@@ -53,6 +53,10 @@
         iniset $NOVA_CONF ironic project_domain_id default
         iniset $NOVA_CONF ironic project_name demo
     fi
+    if is_ironic_sharded; then
+        iniset $NOVA_CONF ironic shard $IRONIC_SHARD_1_NAME
+    fi
+
     iniset $NOVA_CONF ironic user_domain_id default
     iniset $NOVA_CONF ironic region_name $REGION_NAME
 
diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt
index c1cd132..4b44c1f 100644
--- a/lib/nova_plugins/hypervisor-libvirt
+++ b/lib/nova_plugins/hypervisor-libvirt
@@ -56,6 +56,10 @@
     # arm64-specific configuration
     if is_arch "aarch64"; then
         iniset $NOVA_CONF libvirt cpu_mode "host-passthrough"
+        # NOTE(mnaser): We cannot have `cpu_model` set if the `cpu_mode` is
+        #               set to `host-passthrough`, or `nova-compute` refuses to
+        #               start.
+        inidelete $NOVA_CONF libvirt cpu_model
     fi
 
     if isset ENABLE_FILE_INJECTION; then
@@ -114,9 +118,6 @@
                     sudo dpkg-statoverride --add --update $STAT_OVERRIDE
                 fi
             done
-        elif is_suse; then
-            # Workaround for missing dependencies in python-libguestfs
-            install_package python-libguestfs guestfs-data augeas augeas-lenses
         elif is_fedora; then
             install_package python3-libguestfs
         fi
diff --git a/lib/os-vif b/lib/os-vif
index 865645c..7c8bee3 100644
--- a/lib/os-vif
+++ b/lib/os-vif
@@ -1,10 +1,5 @@
 #!/bin/bash
 
-# support vsctl or native.
-# until bug #1929446 is resolved we override the os-vif default
-# and fall back to the legacy "vsctl" driver.
-OS_VIF_OVS_OVSDB_INTERFACE=${OS_VIF_OVS_OVSDB_INTERFACE:="vsctl"}
-
 function is_ml2_ovs {
     if [[ "${Q_AGENT}" == "openvswitch" ]]; then
         echo "True"
@@ -19,11 +14,9 @@
 
 function configure_os_vif {
     if [[ -e ${NOVA_CONF} ]]; then
-        iniset ${NOVA_CONF} os_vif_ovs ovsdb_interface ${OS_VIF_OVS_OVSDB_INTERFACE}
         iniset ${NOVA_CONF} os_vif_ovs isolate_vif ${OS_VIF_OVS_ISOLATE_VIF}
     fi
     if [[ -e ${NEUTRON_CONF} ]]; then
-        iniset ${NEUTRON_CONF} os_vif_ovs ovsdb_interface ${OS_VIF_OVS_OVSDB_INTERFACE}
         iniset ${NEUTRON_CONF} os_vif_ovs isolate_vif ${OS_VIF_OVS_ISOLATE_VIF}
     fi
 }
diff --git a/lib/placement b/lib/placement
index b779866..63fdfb6 100644
--- a/lib/placement
+++ b/lib/placement
@@ -48,6 +48,12 @@
 PLACEMENT_SERVICE_PROTOCOL=${PLACEMENT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL}
 PLACEMENT_SERVICE_HOST=${PLACEMENT_SERVICE_HOST:-$SERVICE_HOST}
 
+# Flag to set the oslo_policy.enforce_scope and oslo_policy.enforce_new_defaults.
+# This is used to switch the Placement API policies scope and new defaults.
+# By Default, these flag are False.
+# For more detail: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope
+PLACEMENT_ENFORCE_SCOPE=$(trueorfalse False PLACEMENT_ENFORCE_SCOPE)
+
 # Functions
 # ---------
 
@@ -62,7 +68,7 @@
 # runs that a clean run would need to clean up
 function cleanup_placement {
     sudo rm -f $(apache_site_config_for placement-api)
-    remove_uwsgi_config "$PLACEMENT_UWSGI_CONF" "$PLACEMENT_UWSGI"
+    remove_uwsgi_config "$PLACEMENT_UWSGI_CONF" "placement-api"
 }
 
 # _config_placement_apache_wsgi() - Set WSGI config files
@@ -111,6 +117,13 @@
     else
         _config_placement_apache_wsgi
     fi
+    if [[ "$PLACEMENT_ENFORCE_SCOPE" == "True" || "$ENFORCE_SCOPE" == "True" ]]; then
+        iniset $PLACEMENT_CONF oslo_policy enforce_new_defaults True
+        iniset $PLACEMENT_CONF oslo_policy enforce_scope True
+    else
+        iniset $PLACEMENT_CONF oslo_policy enforce_new_defaults False
+        iniset $PLACEMENT_CONF oslo_policy enforce_scope False
+    fi
 }
 
 # create_placement_accounts() - Set up required placement accounts
diff --git a/lib/rpc_backend b/lib/rpc_backend
index 743b4ae..bbb4149 100644
--- a/lib/rpc_backend
+++ b/lib/rpc_backend
@@ -52,20 +52,7 @@
     if is_service_enabled rabbit; then
         # Install rabbitmq-server
         install_package rabbitmq-server
-        if is_suse; then
-            install_package rabbitmq-server-plugins
-            # the default systemd socket activation only listens on the loopback interface
-            # which causes rabbitmq to try to start its own epmd
-            sudo mkdir -p /etc/systemd/system/epmd.socket.d
-            cat <<EOF | sudo tee /etc/systemd/system/epmd.socket.d/ports.conf >/dev/null
-[Socket]
-ListenStream=
-ListenStream=[::]:4369
-EOF
-            sudo systemctl daemon-reload
-            sudo systemctl restart epmd.socket epmd.service
-        fi
-        if is_fedora || is_suse; then
+        if is_fedora; then
             # NOTE(jangutter): If rabbitmq is not running (as in a fresh
             # install) then rabbit_setuser triggers epmd@0.0.0.0.socket with
             # socket activation. This fails the first time and does not get
diff --git a/lib/swift b/lib/swift
index ba92f3d..1ebf073 100644
--- a/lib/swift
+++ b/lib/swift
@@ -402,6 +402,11 @@
     # Versioned Writes
     iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:versioned_writes allow_versioned_writes true
 
+    # Add sha1 temporary https://storyboard.openstack.org/#!/story/2010068
+    if [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]]; then
+        iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:tempurl allowed_digests "sha1 sha256 sha512"
+    fi
+
     # Configure Ceilometer
     if is_service_enabled ceilometer; then
         iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer "set log_level" "WARN"
@@ -542,9 +547,6 @@
     local swift_log_dir=${SWIFT_DATA_DIR}/logs
     sudo rm -rf ${swift_log_dir}
     local swift_log_group=adm
-    if is_suse; then
-        swift_log_group=root
-    fi
     sudo install -d -o ${STACK_USER} -g ${swift_log_group} ${swift_log_dir}/hourly
 
     if [[ $SYSLOG != "False" ]]; then
diff --git a/lib/tempest b/lib/tempest
index 4504663..310db2d 100644
--- a/lib/tempest
+++ b/lib/tempest
@@ -18,7 +18,7 @@
 #   - ``PUBLIC_NETWORK_NAME``
 #   - ``VIRT_DRIVER``
 #   - ``LIBVIRT_TYPE``
-#   - ``KEYSTONE_SERVICE_URI``, ``KEYSTONE_SERVICE_URI_V3`` from lib/keystone
+#   - ``KEYSTONE_SERVICE_URI_V3`` from lib/keystone
 #
 # Optional Dependencies:
 #
@@ -29,6 +29,7 @@
 # - ``DEFAULT_INSTANCE_USER``
 # - ``DEFAULT_INSTANCE_ALT_USER``
 # - ``CINDER_ENABLED_BACKENDS``
+# - ``CINDER_BACKUP_DRIVER``
 # - ``NOVA_ALLOW_DUPLICATE_NETWORKS``
 #
 # ``stack.sh`` calls the entry points in this order:
@@ -71,6 +72,17 @@
 TEMPEST_DEFAULT_STORAGE_PROTOCOL="iSCSI"
 TEMPEST_STORAGE_PROTOCOL=${TEMPEST_STORAGE_PROTOCOL:-$TEMPEST_DEFAULT_STORAGE_PROTOCOL}
 
+# Glance/Image variables
+# When Glance image import is enabled, image creation is asynchronous and images
+# may not yet be active when tempest looks for them.  In that case, we poll
+# Glance every TEMPEST_GLANCE_IMPORT_POLL_INTERVAL seconds for the number of
+# times specified by TEMPEST_GLANCE_IMPORT_POLL_LIMIT.  If you are importing
+# multiple images, set TEMPEST_GLANCE_IMAGE_COUNT so the poller does not quit
+# too early (though it will not exceed the polling limit).
+TEMPEST_GLANCE_IMPORT_POLL_INTERVAL=${TEMPEST_GLANCE_IMPORT_POLL_INTERVAL:-1}
+TEMPEST_GLANCE_IMPORT_POLL_LIMIT=${TEMPEST_GLANCE_IMPORT_POLL_LIMIT:-12}
+TEMPEST_GLANCE_IMAGE_COUNT=${TEMPEST_GLANCE_IMAGE_COUNT:-1}
+
 # Neutron/Network variables
 IPV6_ENABLED=$(trueorfalse True IPV6_ENABLED)
 IPV6_SUBNET_ATTRIBUTES_ENABLED=$(trueorfalse True IPV6_SUBNET_ATTRIBUTES_ENABLED)
@@ -90,6 +102,9 @@
 # it will run tempest with
 TEMPEST_CONCURRENCY=${TEMPEST_CONCURRENCY:-$(nproc)}
 
+TEMPEST_FLAVOR_RAM=${TEMPEST_FLAVOR_RAM:-192}
+TEMPEST_FLAVOR_ALT_RAM=${TEMPEST_FLAVOR_ALT_RAM:-256}
+
 # Functions
 # ---------
 
@@ -117,6 +132,13 @@
         (cd $REQUIREMENTS_DIR &&
             git show master:upper-constraints.txt 2>/dev/null ||
             git show origin/master:upper-constraints.txt) > $tmp_c
+        # NOTE(gmann): we need to set the below env var pointing to master
+        # constraints even that is what default in tox.ini. Otherwise it can
+        # create the issue for grenade run where old and new devstack can have
+        # different tempest (old and master) to install. For detail problem,
+        # refer to the https://bugs.launchpad.net/devstack/+bug/2003993
+        export UPPER_CONSTRAINTS_FILE=https://releases.openstack.org/constraints/upper/master
+        export TOX_CONSTRAINTS_FILE=https://releases.openstack.org/constraints/upper/master
     else
         echo "Using $TEMPEST_VENV_UPPER_CONSTRAINTS constraints in Tempest virtual env."
         cat $TEMPEST_VENV_UPPER_CONSTRAINTS > $tmp_c
@@ -127,6 +149,45 @@
     fi
 }
 
+# Makes a call to glance to get a list of active images, ignoring
+# ramdisk and kernel images.  Takes 3 arguments, an array and two
+# variables.  The array will contain the list of active image UUIDs;
+# if an image with ``DEFAULT_IMAGE_NAME`` is found, its UUID will be
+# set as the value img_id ($2) parameters.
+function get_active_images {
+    declare -n img_array=$1
+    declare -n img_id=$2
+
+    # start with a fresh array in case we are called multiple times
+    img_array=()
+
+    while read -r IMAGE_NAME IMAGE_UUID; do
+        if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then
+            img_id="$IMAGE_UUID"
+        fi
+        img_array+=($IMAGE_UUID)
+    done < <(openstack --os-cloud devstack-admin image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }')
+}
+
+function poll_glance_images {
+    declare -n image_array=$1
+    declare -n image_id=$2
+    local -i poll_count
+
+    poll_count=$TEMPEST_GLANCE_IMPORT_POLL_LIMIT
+    while (( poll_count-- > 0 )) ; do
+        sleep $TEMPEST_GLANCE_IMPORT_POLL_INTERVAL
+        get_active_images image_array image_id
+        if (( ${#image_array[*]} >= $TEMPEST_GLANCE_IMAGE_COUNT )) ; then
+            return
+        fi
+    done
+    local msg
+    msg="Polling limit of $TEMPEST_GLANCE_IMPORT_POLL_LIMIT exceeded; "
+    msg+="poll interval was $TEMPEST_GLANCE_IMPORT_POLL_INTERVAL sec"
+    warn $LINENO "$msg"
+}
+
 # configure_tempest() - Set config files, create data dirs, etc
 function configure_tempest {
     if [[ "$INSTALL_TEMPEST" == "True" ]]; then
@@ -168,13 +229,21 @@
     declare -a images
 
     if is_service_enabled glance; then
-        while read -r IMAGE_NAME IMAGE_UUID; do
-            if [ "$IMAGE_NAME" = "$DEFAULT_IMAGE_NAME" ]; then
-                image_uuid="$IMAGE_UUID"
-                image_uuid_alt="$IMAGE_UUID"
+        get_active_images images image_uuid
+
+        if (( ${#images[*]} < $TEMPEST_GLANCE_IMAGE_COUNT )); then
+            # Glance image import is asynchronous and may be configured
+            # to do image conversion.  If image import is being used,
+            # it's possible that this code is being executed before the
+            # import has completed and there may be no active images yet.
+            if [[ "$GLANCE_USE_IMPORT_WORKFLOW" == "True" ]]; then
+                poll_glance_images images image_uuid
+                if (( ${#images[*]} < $TEMPEST_GLANCE_IMAGE_COUNT )); then
+                    echo "Only found ${#images[*]} image(s), was looking for $TEMPEST_GLANCE_IMAGE_COUNT"
+                    exit 1
+                fi
             fi
-            images+=($IMAGE_UUID)
-        done < <(openstack --os-cloud devstack-admin image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }')
+        fi
 
         case "${#images[*]}" in
             0)
@@ -184,13 +253,22 @@
             1)
                 if [ -z "$image_uuid" ]; then
                     image_uuid=${images[0]}
-                    image_uuid_alt=${images[0]}
                 fi
+                image_uuid_alt=$image_uuid
                 ;;
             *)
                 if [ -z "$image_uuid" ]; then
                     image_uuid=${images[0]}
-                    image_uuid_alt=${images[1]}
+                    if [ -z "$image_uuid_alt" ]; then
+                        image_uuid_alt=${images[1]}
+                    fi
+                elif [ -z "$image_uuid_alt" ]; then
+                    for image in ${images[@]}; do
+                        if [[ "$image" != "$image_uuid" ]]; then
+                            image_uuid_alt=$image
+                            break
+                        fi
+                    done
                 fi
                 ;;
         esac
@@ -220,13 +298,15 @@
             if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then
                 # Determine the flavor disk size based on the image size.
                 disk=$(image_size_in_gib $image_uuid)
-                openstack --os-cloud devstack-admin flavor create --id 42 --ram 128 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.nano
+                ram=${TEMPEST_FLAVOR_RAM}
+                openstack --os-cloud devstack-admin flavor create --id 42 --ram ${ram} --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.nano
             fi
             flavor_ref=42
             if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then
                 # Determine the alt flavor disk size based on the alt image size.
                 disk=$(image_size_in_gib $image_uuid_alt)
-                openstack --os-cloud devstack-admin flavor create --id 84 --ram 192 --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.micro
+                ram=${TEMPEST_FLAVOR_ALT_RAM}
+                openstack --os-cloud devstack-admin flavor create --id 84 --ram ${ram} --disk $disk --vcpus 1 --property hw_rng:allowed=True m1.micro
             fi
             flavor_ref_alt=84
         else
@@ -277,6 +357,19 @@
         fi
     fi
 
+    if is_service_enabled glance; then
+        git_clone $OSTESTIMAGES_REPO $OSTESTIMAGES_DIR $OSTESTIMAGES_BRANCH
+        pushd $OSTESTIMAGES_DIR
+        tox -egenerate
+        popd
+        iniset $TEMPEST_CONFIG image images_manifest_file ${OSTESTIMAGES_DIR}/images/manifest.yaml
+        local image_conversion
+        image_conversion=$(iniget $GLANCE_IMAGE_IMPORT_CONF image_conversion output_format)
+        if [[ -n "$image_conversion" ]]; then
+            iniset $TEMPEST_CONFIG image-feature-enabled image_conversion True
+        fi
+    fi
+
     iniset $TEMPEST_CONFIG network project_network_cidr $FIXED_RANGE
 
     ssh_connect_method=${TEMPEST_SSH_CONNECT_METHOD:-$ssh_connect_method}
@@ -306,7 +399,6 @@
     iniset $TEMPEST_CONFIG volume build_timeout $BUILD_TIMEOUT
 
     # Identity
-    iniset $TEMPEST_CONFIG identity uri "$KEYSTONE_SERVICE_URI/v2.0/"
     iniset $TEMPEST_CONFIG identity uri_v3 "$KEYSTONE_SERVICE_URI_V3"
     iniset $TEMPEST_CONFIG identity user_lockout_failure_attempts $KEYSTONE_LOCKOUT_FAILURE_ATTEMPTS
     iniset $TEMPEST_CONFIG identity user_lockout_duration $KEYSTONE_LOCKOUT_DURATION
@@ -317,19 +409,7 @@
         iniset $TEMPEST_CONFIG auth admin_project_name $admin_project_name
         iniset $TEMPEST_CONFIG auth admin_domain_name $admin_domain_name
     fi
-    if [ "$ENABLE_IDENTITY_V2" == "True" ]; then
-        # Run Identity API v2 tests ONLY if needed
-        iniset $TEMPEST_CONFIG identity-feature-enabled api_v2 True
-    else
-        # Skip Identity API v2 tests by default
-        iniset $TEMPEST_CONFIG identity-feature-enabled api_v2 False
-    fi
     iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v3}
-    if [[ "$TEMPEST_AUTH_VERSION" != "v2" ]]; then
-        # we're going to disable v2 admin unless we're using v2 by default.
-        iniset $TEMPEST_CONFIG identity-feature-enabled api_v2_admin False
-    fi
-
     if is_service_enabled tls-proxy; then
         iniset $TEMPEST_CONFIG identity ca_certificates_file $SSL_BUNDLE_FILE
     fi
@@ -449,8 +529,19 @@
     # Scenario
     SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES}
     SCENARIO_IMAGE_FILE=$DEFAULT_IMAGE_FILE_NAME
+    SCENARIO_IMAGE_TYPE=${SCENARIO_IMAGE_TYPE:-cirros}
     iniset $TEMPEST_CONFIG scenario img_file $SCENARIO_IMAGE_DIR/$SCENARIO_IMAGE_FILE
 
+    # since version 0.6.0 cirros uses dhcpcd dhcp client by default, however, cirros, prior to the
+    # version 0.6.0, used udhcpc (the only available client at that time) which is also tempest's default
+    if [[ "$SCENARIO_IMAGE_TYPE" == "cirros" ]]; then
+        # the image is a cirros image
+        # use dhcpcd client when version greater or equal 0.6.0
+        if [[ $(echo $CIRROS_VERSION | tr -d '.') -ge 060 ]]; then
+            iniset $TEMPEST_CONFIG scenario dhcp_client dhcpcd
+        fi
+    fi
+
     # If using provider networking, use the physical network for validation rather than private
     TEMPEST_SSH_NETWORK_NAME=$PRIVATE_NETWORK_NAME
     if is_provider_network; then
@@ -486,6 +577,9 @@
         TEMPEST_VOLUME_REVERT_TO_SNAPSHOT=${TEMPEST_VOLUME_REVERT_TO_SNAPSHOT:-True}
     fi
     iniset $TEMPEST_CONFIG volume-feature-enabled volume_revert $(trueorfalse False TEMPEST_VOLUME_REVERT_TO_SNAPSHOT)
+    if [[ "$CINDER_BACKUP_DRIVER" == *"swift"* ]]; then
+        iniset $TEMPEST_CONFIG volume backup_driver swift
+    fi
     local tempest_volume_min_microversion=${TEMPEST_VOLUME_MIN_MICROVERSION:-None}
     local tempest_volume_max_microversion=${TEMPEST_VOLUME_MAX_MICROVERSION:-"latest"}
     if [ "$tempest_volume_min_microversion" == "None" ]; then
@@ -536,6 +630,10 @@
         iniset $TEMPEST_CONFIG volume storage_protocol "$TEMPEST_STORAGE_PROTOCOL"
     fi
 
+    if [[ $ENABLE_VOLUME_MULTIATTACH == "True" ]]; then
+        iniset $TEMPEST_CONFIG volume volume_type_multiattach $VOLUME_TYPE_MULTIATTACH
+    fi
+
     # Placement Features
     # Set the microversion range for placement.
     # Setting [None, latest] range of microversion which allow Tempest to run all microversions tests.
@@ -607,14 +705,25 @@
     # If services enable the enforce_scope for their policy
     # we need to enable the same on Tempest side so that
     # test can be run with scoped token.
-    if [[ "$KEYSTONE_ENFORCE_SCOPE" == True ]] ; then
+    if [[ "$KEYSTONE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
         iniset $TEMPEST_CONFIG enforce_scope keystone true
-        iniset $TEMPEST_CONFIG auth admin_system 'all'
-        iniset $TEMPEST_CONFIG auth admin_project_name ''
     fi
-    iniset $TEMPEST_CONFIG enforce_scope glance "$GLANCE_ENFORCE_SCOPE"
 
-    iniset $TEMPEST_CONFIG enforce_scope cinder "$CINDER_ENFORCE_SCOPE"
+    if [[ "$NOVA_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
+        iniset $TEMPEST_CONFIG enforce_scope nova true
+    fi
+
+    if [[ "$PLACEMENT_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
+        iniset $TEMPEST_CONFIG enforce_scope placement true
+    fi
+
+    if [[ "$GLANCE_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
+        iniset $TEMPEST_CONFIG enforce_scope glance true
+    fi
+
+    if [[ "$CINDER_ENFORCE_SCOPE" == True || "$ENFORCE_SCOPE" == True ]] ; then
+        iniset $TEMPEST_CONFIG enforce_scope cinder true
+    fi
 
     if [ "$VIRT_DRIVER" = "libvirt" ] && [ "$LIBVIRT_TYPE" = "lxc" ]; then
         # libvirt-lxc does not support boot from volume or attaching volumes
@@ -629,13 +738,13 @@
     local tmp_cfg_file
     tmp_cfg_file=$(mktemp)
     cd $TEMPEST_DIR
-    if [[ "$OFFLINE" != "True" ]]; then
-        tox -revenv-tempest --notest
-    fi
 
     local tmp_u_c_m
     tmp_u_c_m=$(mktemp -t tempest_u_c_m.XXXXXXXXXX)
     set_tempest_venv_constraints $tmp_u_c_m
+    if [[ "$OFFLINE" != "True" ]]; then
+        tox -revenv-tempest --notest
+    fi
     tox -evenv-tempest -- pip install -c $tmp_u_c_m -r requirements.txt
     rm -f $tmp_u_c_m
 
@@ -663,12 +772,12 @@
     # Neutron API Extensions
 
     # disable metering if we didn't enable the service
-    if ! is_service_enabled q-metering; then
+    if ! is_service_enabled q-metering neutron-metering; then
         DISABLE_NETWORK_API_EXTENSIONS+=", metering"
     fi
 
     # disable l3_agent_scheduler if we didn't enable L3 agent
-    if ! is_service_enabled q-l3; then
+    if ! is_service_enabled q-l3 neutron-l3; then
         DISABLE_NETWORK_API_EXTENSIONS+=", l3_agent_scheduler"
     fi
 
@@ -709,7 +818,12 @@
 # install_tempest() - Collect source and prepare
 function install_tempest {
     git_clone $TEMPEST_REPO $TEMPEST_DIR $TEMPEST_BRANCH
-    pip_install 'tox!=2.8.0'
+    # NOTE(gmann): Pinning tox<4.0.0 for stable/zed and lower. Tox 4.0.0
+    # released after zed was released and has some incompatible changes
+    # and it is ok not to fix the issues caused by tox 4.0.0 in stable
+    # beanches jobs. We can continue testing the stable/zed and lower
+    # branches with tox<4.0.0
+    pip_install 'tox!=2.8.0,<4.0.0'
     pushd $TEMPEST_DIR
     # NOTE(gmann): checkout the TEMPEST_BRANCH in case TEMPEST_BRANCH
     # is tag name not master. git_clone would not checkout tag because
diff --git a/lib/tls b/lib/tls
index 5a7f5ae..0a598e1 100644
--- a/lib/tls
+++ b/lib/tls
@@ -212,9 +212,6 @@
     if is_fedora; then
         sudo cp $INT_CA_DIR/ca-chain.pem /usr/share/pki/ca-trust-source/anchors/devstack-chain.pem
         sudo update-ca-trust
-    elif is_suse; then
-        sudo cp $INT_CA_DIR/ca-chain.pem /usr/share/pki/trust/anchors/devstack-chain.pem
-        sudo update-ca-certificates
     elif is_ubuntu; then
         sudo cp $INT_CA_DIR/ca-chain.pem /usr/local/share/ca-certificates/devstack-int.crt
         sudo cp $ROOT_CA_DIR/cacert.pem /usr/local/share/ca-certificates/devstack-root.crt
@@ -367,8 +364,11 @@
 function fix_system_ca_bundle_path {
     if is_service_enabled tls-proxy; then
         local capath
-        capath=$(python3 -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass')
-
+        if [[ "$GLOBAL_VENV" == "True" ]] ; then
+            capath=$($DEVSTACK_VENV/bin/python3 -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass')
+        else
+            capath=$(python3 -c $'try:\n from requests import certs\n print (certs.where())\nexcept ImportError: pass')
+        fi
         if [[ ! $capath == "" && ! $capath =~ ^/etc/.* && ! -L $capath ]]; then
             if is_fedora; then
                 sudo rm -f $capath
@@ -376,9 +376,6 @@
             elif is_ubuntu; then
                 sudo rm -f $capath
                 sudo ln -s /etc/ssl/certs/ca-certificates.crt $capath
-            elif is_suse; then
-                sudo rm -f $capath
-                sudo ln -s /etc/ssl/ca-bundle.pem $capath
             else
                 echo "Don't know how to set the CA bundle, expect the install to fail."
             fi
@@ -441,9 +438,6 @@
 
     if is_ubuntu; then
         sudo a2enmod ssl
-    elif is_suse; then
-        sudo a2enmod ssl
-        sudo a2enflag SSL
     elif is_fedora; then
         # Fedora enables mod_ssl by default
         :
@@ -536,6 +530,7 @@
 <VirtualHost $f_host:$f_port>
     SSLEngine On
     SSLCertificateFile $DEVSTACK_CERT
+    SSLProtocol -all +TLSv1.3 +TLSv1.2
 
     # Disable KeepAlive to fix bug #1630664 a.k.a the
     # ('Connection aborted.', BadStatusLine("''",)) error
@@ -549,20 +544,19 @@
 
     # Avoid races (at the cost of performance) to re-use a pooled connection
     # where the connection is closed (bug 1807518).
+    # Set acquire=1 to disable waiting for connection pool members so that
+    # we can determine when apache is overloaded (returns 503).
     SetEnv proxy-initial-not-pooled
     <Location />
-        ProxyPass http://$b_host:$b_port/ retry=0 nocanon
+        ProxyPass http://$b_host:$b_port/ retry=0 nocanon acquire=1
         ProxyPassReverse http://$b_host:$b_port/
     </Location>
     ErrorLog $APACHE_LOG_DIR/tls-proxy_error.log
     ErrorLogFormat "%{cu}t [%-m:%l] [pid %P:tid %T] %7F: %E: [client\ %a] [frontend\ %A] %M% ,\ referer\ %{Referer}i"
     LogLevel info
-    CustomLog $APACHE_LOG_DIR/tls-proxy_access.log "%{%Y-%m-%d}t %{%T}t.%{msec_frac}t [%l] %a \"%r\" %>s %b"
+    CustomLog $APACHE_LOG_DIR/tls-proxy_access.log combined
 </VirtualHost>
 EOF
-    if is_suse ; then
-        sudo a2enflag SSL
-    fi
     for mod in headers ssl proxy proxy_http; do
         enable_apache_mod $mod
     done
diff --git a/openrc b/openrc
index 6d488bb..5ec7634 100644
--- a/openrc
+++ b/openrc
@@ -7,9 +7,6 @@
 #   Set OS_USERNAME to override the default user name 'demo'
 #   Set ADMIN_PASSWORD to set the password for 'admin' and 'demo'
 
-# NOTE: support for the old NOVA_* novaclient environment variables has
-# been removed.
-
 if [[ -n "$1" ]]; then
     OS_USERNAME=$1
 fi
@@ -35,26 +32,11 @@
 # Get some necessary configuration
 source $RC_DIR/lib/tls
 
-# The OpenStack ecosystem has standardized the term **project** as the
-# entity that owns resources.  In some places **tenant** remains
-# referenced, but in all cases this just means **project**.  We will
-# warn if we need to turn on legacy **tenant** support to have a
-# working environment.
+# Minimal configuration
+export OS_AUTH_TYPE=password
 export OS_PROJECT_NAME=${OS_PROJECT_NAME:-demo}
-
-echo "WARNING: setting legacy OS_TENANT_NAME to support cli tools."
-export OS_TENANT_NAME=$OS_PROJECT_NAME
-
-# In addition to the owning entity (project), nova stores the entity performing
-# the action as the **user**.
 export OS_USERNAME=${OS_USERNAME:-demo}
-
-# With Keystone you pass the keystone password instead of an api key.
-# Recent versions of novaclient use OS_PASSWORD instead of NOVA_API_KEYs
-# or NOVA_PASSWORD.
 export OS_PASSWORD=${ADMIN_PASSWORD:-secret}
-
-# Region
 export OS_REGION_NAME=${REGION_NAME:-RegionOne}
 
 # Set the host API endpoint. This will default to HOST_IP if SERVICE_IP_VERSION
@@ -73,30 +55,14 @@
     GLANCE_HOST=${GLANCE_HOST:-$HOST_IP}
 fi
 
-# Identity API version
-export OS_IDENTITY_API_VERSION=3
-
-# Ask keystoneauth1 to use keystone
-export OS_AUTH_TYPE=password
-
-# Authenticating against an OpenStack cloud using Keystone returns a **Token**
-# and **Service Catalog**.  The catalog contains the endpoints for all services
-# the user/project has access to - including nova, glance, keystone, swift, ...
-# We currently recommend using the version 3 *identity api*.
-#
-
 # If you don't have a working .stackenv, this is the backup position
 KEYSTONE_BACKUP=$SERVICE_PROTOCOL://$SERVICE_HOST:5000
 KEYSTONE_SERVICE_URI=${KEYSTONE_SERVICE_URI:-$KEYSTONE_BACKUP}
 
 export OS_AUTH_URL=${OS_AUTH_URL:-$KEYSTONE_SERVICE_URI}
 
-# Currently, in order to use openstackclient with Identity API v3,
-# we need to set the domain which the user and project belong to.
-if [ "$OS_IDENTITY_API_VERSION" = "3" ]; then
-    export OS_USER_DOMAIN_ID=${OS_USER_DOMAIN_ID:-"default"}
-    export OS_PROJECT_DOMAIN_ID=${OS_PROJECT_DOMAIN_ID:-"default"}
-fi
+export OS_USER_DOMAIN_ID=${OS_USER_DOMAIN_ID:-"default"}
+export OS_PROJECT_DOMAIN_ID=${OS_PROJECT_DOMAIN_ID:-"default"}
 
 # Set OS_CACERT to a default CA certificate chain if it exists.
 if [[ ! -v OS_CACERT ]] ; then
diff --git a/playbooks/post.yaml b/playbooks/post.yaml
index 9e66f20..0047d78 100644
--- a/playbooks/post.yaml
+++ b/playbooks/post.yaml
@@ -17,9 +17,18 @@
         dest: "{{ stage_dir }}/verify_tempest_conf.log"
         state: hard
       when: tempest_log.stat.exists
+    - name: Capture most recent qemu crash dump, if any
+      shell:
+        executable: /bin/bash
+        cmd: |
+          coredumpctl -o {{ devstack_log_dir }}/qemu.coredump dump /usr/bin/qemu-system-x86_64
+      ignore_errors: yes
   roles:
     - export-devstack-journal
     - apache-logs-conf
+    # This should run as early as possible to make sure we don't skew
+    # the post-tempest results with other activities.
+    - capture-performance-data
     - devstack-project-conf
     # capture-system-logs should be the last role before stage-output
     - capture-system-logs
diff --git a/playbooks/tox/pre.yaml b/playbooks/tox/pre.yaml
index d7e4670..68d5254 100644
--- a/playbooks/tox/pre.yaml
+++ b/playbooks/tox/pre.yaml
@@ -5,4 +5,10 @@
       bindep_profile: test
       bindep_dir: "{{ zuul_work_dir }}"
     - test-setup
-    - ensure-tox
+    # NOTE(gmann): Pinning tox<4.0.0 for stable/zed and lower. Tox 4.0.0
+    # released after zed was released and has some incompatible changes
+    # and it is ok not to fix the issues caused by tox 4.0.0 in stable
+    # beanches jobs. We can continue testing the stable/zed and lower
+    # branches with tox<4.0.0
+    - role: ensure-tox
+      ensure_tox_version: "<4"
diff --git a/roles/capture-performance-data/README.rst b/roles/capture-performance-data/README.rst
new file mode 100644
index 0000000..b7a37c2
--- /dev/null
+++ b/roles/capture-performance-data/README.rst
@@ -0,0 +1,25 @@
+Generate performance logs for staging
+
+Captures usage information from mysql, systemd, apache logs, and other
+parts of the system and generates a performance.json file in the
+staging directory.
+
+**Role Variables**
+
+.. zuul:rolevar:: stage_dir
+   :default: {{ ansible_user_dir }}
+
+   The base stage directory
+
+.. zuul:rolevar:: devstack_conf_dir
+   :default: /opt/stack
+
+   The base devstack destination directory
+
+.. zuul:rolevar:: debian_suse_apache_deref_logs
+
+   The apache logs found in the debian/suse locations
+
+.. zuul:rolevar:: redhat_apache_deref_logs
+
+   The apache logs found in the redhat locations
diff --git a/roles/capture-performance-data/defaults/main.yaml b/roles/capture-performance-data/defaults/main.yaml
new file mode 100644
index 0000000..7bd79f4
--- /dev/null
+++ b/roles/capture-performance-data/defaults/main.yaml
@@ -0,0 +1,3 @@
+devstack_base_dir: /opt/stack
+devstack_conf_dir: "{{ devstack_base_dir }}"
+stage_dir: "{{ ansible_user_dir }}"
diff --git a/roles/capture-performance-data/tasks/main.yaml b/roles/capture-performance-data/tasks/main.yaml
new file mode 100644
index 0000000..51a11b6
--- /dev/null
+++ b/roles/capture-performance-data/tasks/main.yaml
@@ -0,0 +1,18 @@
+- name: Generate statistics
+  shell:
+    executable: /bin/bash
+    cmd: |
+      source {{ devstack_conf_dir }}/stackrc
+      source {{ devstack_conf_dir }}/inc/python
+      setup_devstack_virtualenv
+      $PYTHON {{ devstack_conf_dir }}/tools/get-stats.py \
+        --db-user="$DATABASE_USER" \
+        --db-pass="$DATABASE_PASSWORD" \
+        --db-host="$DATABASE_HOST" \
+        {{ apache_logs }} > {{ stage_dir }}/performance.json
+  vars:
+    apache_logs: >-
+      {% for i in debian_suse_apache_deref_logs.results | default([]) + redhat_apache_deref_logs.results | default([]) %}
+      --apache-log="{{ i.stat.path }}"
+      {% endfor %}
+  ignore_errors: yes
diff --git a/roles/capture-system-logs/README.rst b/roles/capture-system-logs/README.rst
index c284124..1376f63 100644
--- a/roles/capture-system-logs/README.rst
+++ b/roles/capture-system-logs/README.rst
@@ -9,6 +9,7 @@
 - coredumps
 - dns resolver
 - listen53
+- services
 - unbound.log
 - deprecation messages
 
diff --git a/roles/capture-system-logs/tasks/main.yaml b/roles/capture-system-logs/tasks/main.yaml
index 905806d..77b5ec5 100644
--- a/roles/capture-system-logs/tasks/main.yaml
+++ b/roles/capture-system-logs/tasks/main.yaml
@@ -19,6 +19,9 @@
           rpm -qa | sort > {{ stage_dir }}/rpm-qa.txt
       fi
 
+      # Services status
+      sudo systemctl status --all > services.txt 2>/dev/null
+
       # NOTE(kchamart) The 'audit.log' can be useful in cases when QEMU
       # failed to start due to denials from SELinux — useful for CentOS
       # and Fedora machines.  For Ubuntu (which runs AppArmor), DevStack
diff --git a/roles/devstack-ipv6-only-deployments-verification/README.rst b/roles/devstack-ipv6-only-deployments-verification/README.rst
index 400a8da..3bddf5e 100644
--- a/roles/devstack-ipv6-only-deployments-verification/README.rst
+++ b/roles/devstack-ipv6-only-deployments-verification/README.rst
@@ -1,10 +1,10 @@
-Verify the IPv6-only deployments
+Verify all addresses in IPv6-only deployments
 
 This role needs to be invoked from a playbook that
-run tests. This role verifies the IPv6 setting on
-devstack side and devstack deploy services on IPv6.
-This role is invoked before tests are run so that
-if any missing IPv6 setting or deployments can fail
+runs tests. This role verifies the IPv6 settings on the
+devstack side and that devstack deploys with all addresses
+being IPv6. This role is invoked before tests are run so that
+if there is any missing IPv6 setting, deployments can fail
 the job early.
 
 
diff --git a/roles/setup-devstack-source-dirs/tasks/main.yaml b/roles/setup-devstack-source-dirs/tasks/main.yaml
index 294c29c..cb7c6e3 100644
--- a/roles/setup-devstack-source-dirs/tasks/main.yaml
+++ b/roles/setup-devstack-source-dirs/tasks/main.yaml
@@ -43,9 +43,9 @@
       base_branch={{ devstack_sources_branch }}
       if git branch -a | grep "$base_branch" > /dev/null ; then
           git checkout $base_branch
-      elif [[ "$base_branch" == stable/* ]]; then
+      elif [[ "$base_branch" == stable/* ]] || [[ "$base_branch" == unmaintained/* ]]; then
           # Look for an eol tag for the stable branch.
-          eol_tag=${base_branch#stable/}-eol
+          eol_tag="${base_branch#*/}-eol"
           if git tag -l |grep $eol_tag >/dev/null; then
               git checkout $eol_tag
               git reset --hard $eol_tag
diff --git a/samples/local.conf b/samples/local.conf
index 8b76137..55b7298 100644
--- a/samples/local.conf
+++ b/samples/local.conf
@@ -49,7 +49,7 @@
 # path of the destination log file.  A timestamp will be appended to the given name.
 LOGFILE=$DEST/logs/stack.sh.log
 
-# Old log files are automatically removed after 7 days to keep things neat.  Change
+# Old log files are automatically removed after 2 days to keep things neat.  Change
 # the number of days by setting ``LOGDAYS``.
 LOGDAYS=2
 
diff --git a/samples/local.sh b/samples/local.sh
index a1c5c81..7e6ae70 100755
--- a/samples/local.sh
+++ b/samples/local.sh
@@ -31,7 +31,7 @@
     # ``demo``)
 
     # Get OpenStack user auth
-    source $TOP_DIR/openrc
+    export OS_CLOUD=devstack
 
     # Add first keypair found in localhost:$HOME/.ssh
     for i in $HOME/.ssh/id_rsa.pub $HOME/.ssh/id_dsa.pub; do
diff --git a/stack.sh b/stack.sh
index 6e9ced9..dcfd398 100755
--- a/stack.sh
+++ b/stack.sh
@@ -1,5 +1,6 @@
 #!/usr/bin/env bash
 
+
 # ``stack.sh`` is an opinionated OpenStack developer installation.  It
 # installs and configures various combinations of **Cinder**, **Glance**,
 # **Horizon**, **Keystone**, **Nova**, **Neutron**, and **Swift**
@@ -12,7 +13,7 @@
 # a multi-node developer install.
 
 # To keep this script simple we assume you are running on a recent **Ubuntu**
-# (Bionic or newer), **Fedora** (F24 or newer), or **CentOS/RHEL**
+# (Bionic or newer) or **CentOS/RHEL/RockyLinux**
 # (7 or newer) machine. (It may work on other platforms but support for those
 # platforms is left to those who added them to DevStack.) It should work in
 # a VM or physical server. Additionally, we maintain a list of ``deb`` and
@@ -229,7 +230,7 @@
 
 # Warn users who aren't on an explicitly supported distro, but allow them to
 # override check and attempt installation with ``FORCE=yes ./stack``
-SUPPORTED_DISTROS="bullseye|focal|f35|opensuse-15.2|opensuse-tumbleweed|rhel8|rhel9|openEuler-20.03"
+SUPPORTED_DISTROS="bookworm|jammy|noble|rhel9"
 
 if [[ ! ${DISTRO} =~ $SUPPORTED_DISTROS ]]; then
     echo "WARNING: this script has not been tested on $DISTRO"
@@ -280,13 +281,6 @@
 sudo chown root:root $TEMPFILE
 sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh
 
-# TODO(wxy): Currently some base packages are not installed by default in
-# openEuler. Remove the code below once the packaged are installed by default
-# in the future.
-if [[ $DISTRO == "openEuler-20.03" ]]; then
-    install_package hostname
-fi
-
 # Configure Distro Repositories
 # -----------------------------
 
@@ -308,17 +302,15 @@
 }
 
 function _install_rdo {
-    if [[ $DISTRO == "rhel8" ]]; then
+    if [[ $DISTRO == "rhel9" ]]; then
+        rdo_release=${TARGET_BRANCH#*/}
         if [[ "$TARGET_BRANCH" == "master" ]]; then
-            # rdo-release.el8.rpm points to latest RDO release, use that for master
-            sudo dnf -y install https://rdoproject.org/repos/rdo-release.el8.rpm
+            # adding delorean-deps repo to provide current master rpms
+            sudo wget https://trunk.rdoproject.org/centos9-master/delorean-deps.repo -O /etc/yum.repos.d/delorean-deps.repo
         else
-            # For stable branches use corresponding release rpm
-            rdo_release=$(echo $TARGET_BRANCH | sed "s|stable/||g")
-            sudo dnf -y install https://rdoproject.org/repos/openstack-${rdo_release}/rdo-release-${rdo_release}.el8.rpm
+            # For stable/unmaintained branches use corresponding release rpm
+            sudo dnf -y install centos-release-openstack-${rdo_release}
         fi
-    elif [[ $DISTRO == "rhel9" ]]; then
-        sudo curl -L -o /etc/yum.repos.d/delorean-deps.repo http://trunk.rdoproject.org/centos9-master/delorean-deps.repo
     fi
     sudo dnf -y update
 }
@@ -341,7 +333,9 @@
 
 # Destination path for devstack logs
 if [[ -n ${LOGDIR:-} ]]; then
-    mkdir -p $LOGDIR
+    sudo mkdir -p $LOGDIR
+    safe_chown -R $STACK_USER $LOGDIR
+    safe_chmod 0755 $LOGDIR
 fi
 
 # Destination path for service data
@@ -398,9 +392,32 @@
     # Patch: https://github.com/rpm-software-management/dnf/pull/1448
     echo "[]" | sudo tee /var/cache/dnf/expired_repos.json
 elif [[ $DISTRO == "rhel9" ]]; then
+    # for CentOS Stream 9 repository
     sudo dnf config-manager --set-enabled crb
+    # for RHEL 9 repository
+    sudo dnf config-manager --set-enabled codeready-builder-for-rhel-9-x86_64-rpms
     # rabbitmq and other packages are provided by RDO repositories.
     _install_rdo
+
+    # Some distributions (Rocky Linux 9) provide curl-minimal instead of curl,
+    # it triggers a conflict when devstack wants to install "curl".
+    # Swap curl-minimal with curl.
+    if is_package_installed curl-minimal; then
+        sudo dnf swap -y curl-minimal curl
+    fi
+elif [[ $DISTRO == "openEuler-22.03" ]]; then
+    # There are some problem in openEuler. We should fix it first. Some required
+    # package/action runs before fixup script. So we can't fix there.
+    #
+    # 1. the hostname package is not installed by default
+    # 2. Some necessary packages are in openstack repo, for example liberasurecode-devel
+    # 3. python3-pip can be uninstalled by `get_pip.py` automaticly.
+    # 4. Ensure wget installation before use
+    install_package hostname openstack-release-wallaby wget
+    uninstall_package python3-pip
+
+    # Add yum repository for libvirt7.X
+    sudo wget https://eur.openeuler.openatom.cn/coprs/g/sig-openstack/Libvirt-7.X/repo/openeuler-22.03_LTS/group_sig-openstack-Libvirt-7.X-openeuler-22.03_LTS.repo -O /etc/yum.repos.d/libvirt7.2.0.repo
 fi
 
 # Ensure python is installed
@@ -585,6 +602,12 @@
 source $TOP_DIR/lib/database
 source $TOP_DIR/lib/rpc_backend
 
+# load host tuning functions and defaults
+source $TOP_DIR/lib/host
+# tune host memory early to ensure zswap/ksm are configured before
+# doing memory intensive operation like cloning repos or unpacking packages.
+tune_host
+
 # Configure Projects
 # ==================
 
@@ -797,6 +820,20 @@
 source $TOP_DIR/tools/fixup_stuff.sh
 fixup_all
 
+if [[ "$GLOBAL_VENV" == "True" ]] ; then
+    # TODO(frickler): find a better solution for this
+    sudo ln -sf /opt/stack/data/venv/bin/cinder-manage /usr/local/bin
+    sudo ln -sf /opt/stack/data/venv/bin/cinder-rtstool /usr/local/bin
+    sudo ln -sf /opt/stack/data/venv/bin/glance /usr/local/bin
+    sudo ln -sf /opt/stack/data/venv/bin/nova-manage /usr/local/bin
+    sudo ln -sf /opt/stack/data/venv/bin/openstack /usr/local/bin
+    sudo ln -sf /opt/stack/data/venv/bin/privsep-helper /usr/local/bin
+    sudo ln -sf /opt/stack/data/venv/bin/rally /usr/local/bin
+    sudo ln -sf /opt/stack/data/venv/bin/tox /usr/local/bin
+
+    setup_devstack_virtualenv
+fi
+
 # Install subunit for the subunit output stream
 pip_install -U os-testr
 
@@ -976,6 +1013,9 @@
     setup_dev_lib "python-openstackclient"
 else
     pip_install_gr python-openstackclient
+    if is_service_enabled openstack-cli-server; then
+        install_openstack_cli_server
+    fi
 fi
 
 # Installs alias for osc so that we can collect timing for all
@@ -1040,22 +1080,6 @@
 # Save configuration values
 save_stackenv $LINENO
 
-# Kernel Samepage Merging (KSM)
-# -----------------------------
-
-# Processes that mark their memory as mergeable can share identical memory
-# pages if KSM is enabled. This is particularly useful for nova + libvirt
-# backends but any other setup that marks its memory as mergeable can take
-# advantage. The drawback is there is higher cpu load; however, we tend to
-# be memory bound not cpu bound so enable KSM by default but allow people
-# to opt out if the CPU time is more important to them.
-
-if [[ $ENABLE_KSM == "True" ]] ; then
-    if [[ -f /sys/kernel/mm/ksm/run ]] ; then
-        sudo sh -c "echo 1 > /sys/kernel/mm/ksm/run"
-    fi
-fi
-
 
 # Start Services
 # ==============
@@ -1159,7 +1183,8 @@
 # ----
 
 if is_service_enabled q-dhcp; then
-    # Delete traces of nova networks from prior runs
+    # TODO(frickler): These are remnants from n-net, check which parts are really
+    # still needed for Neutron.
     # Do not kill any dnsmasq instance spawned by NetworkManager
     netman_pid=$(pidof NetworkManager || true)
     if [ -z "$netman_pid" ]; then
@@ -1219,12 +1244,7 @@
     echo_summary "Configuring Nova"
     init_nova
 
-    # Additional Nova configuration that is dependent on other services
-    # TODO(stephenfin): Is it possible for neutron to *not* be enabled now? If
-    # not, remove the if here
-    if is_service_enabled neutron; then
-        async_runfunc configure_neutron_nova
-    fi
+    async_runfunc configure_neutron_nova
 fi
 
 
@@ -1512,6 +1532,19 @@
 time_totals
 async_print_timing
 
+if is_service_enabled mysql; then
+    if [[ "$MYSQL_GATHER_PERFORMANCE" == "True" && "$MYSQL_HOST" ]]; then
+        echo ""
+        echo ""
+        echo "Post-stack database query stats:"
+        mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST stats -e \
+              'SELECT * FROM queries' -t 2>/dev/null
+        mysql -u $DATABASE_USER -p$DATABASE_PASSWORD -h $MYSQL_HOST stats -e \
+              'DELETE FROM queries' 2>/dev/null
+    fi
+fi
+
+
 # Using the cloud
 # ===============
 
diff --git a/stackrc b/stackrc
index d22fa88..0b3e1c6 100644
--- a/stackrc
+++ b/stackrc
@@ -121,24 +121,11 @@
     SYSTEMCTL="sudo systemctl"
 fi
 
-
-# Whether or not to enable Kernel Samepage Merging (KSM) if available.
-# This allows programs that mark their memory as mergeable to share
-# memory pages if they are identical. This is particularly useful with
-# libvirt backends. This reduces memory usage at the cost of CPU overhead
-# to scan memory. We default to enabling it because we tend to be more
-# memory constrained than CPU bound.
-ENABLE_KSM=$(trueorfalse True ENABLE_KSM)
-
 # Passwords generated by interactive devstack runs
 if [[ -r $RC_DIR/.localrc.password ]]; then
     source $RC_DIR/.localrc.password
 fi
 
-# Control whether Python 3 should be used at all.
-# TODO(frickler): Drop this when all consumers are fixed
-export USE_PYTHON3=True
-
 # Adding the specific version of Python 3 to this variable will install
 # the app using that version of the interpreter instead of just 3.
 _DEFAULT_PYTHON3_VERSION="$(_get_python_version python3)"
@@ -175,9 +162,26 @@
     export PS4='+ $(short_source):   '
 fi
 
-# Configure Identity API version
-# TODO(frickler): Drop this when plugins no longer need it
-IDENTITY_API_VERSION=3
+# Global option for enforcing scope. If enabled, ENFORCE_SCOPE overrides
+# each services ${SERVICE}_ENFORCE_SCOPE variables
+ENFORCE_SCOPE=$(trueorfalse False ENFORCE_SCOPE)
+
+# Devstack supports the use of a global virtualenv. These variables enable
+# and disable this functionality as well as set the path to the virtualenv.
+# Note that the DATA_DIR is selected because grenade testing uses a shared
+# DATA_DIR but different DEST dirs and we don't want two sets of venvs,
+# instead we want one global set.
+DEVSTACK_VENV=${DEVSTACK_VENV:-$DATA_DIR/venv}
+
+# NOTE(kopecmartin): remove this once this is fixed
+# https://bugs.launchpad.net/devstack/+bug/2031639
+# This couldn't go to fixup_stuff as that's called after projects
+# (e.g. certain paths) are set taking GLOBAL_VENV into account
+if [[ "$os_VENDOR" =~ (CentOSStream|Rocky) ]]; then
+    GLOBAL_VENV=$(trueorfalse False GLOBAL_VENV)
+else
+    GLOBAL_VENV=$(trueorfalse True GLOBAL_VENV)
+fi
 
 # Enable use of Python virtual environments.  Individual project use of
 # venvs are controlled by the PROJECT_VENV array; every project with
@@ -186,13 +190,23 @@
 USE_VENV=$(trueorfalse False USE_VENV)
 
 # Add packages that need to be installed into a venv but are not in any
-# requirmenets files here, in a comma-separated list
-ADDITIONAL_VENV_PACKAGES=${ADITIONAL_VENV_PACKAGES:-""}
+# requirements files here, in a comma-separated list.
+# Currently only used when USE_VENV is true (individual project venvs)
+ADDITIONAL_VENV_PACKAGES=${ADDITIONAL_VENV_PACKAGES:-""}
 
 # This can be used to turn database query logging on and off
 # (currently only implemented for MySQL backend)
 DATABASE_QUERY_LOGGING=$(trueorfalse False DATABASE_QUERY_LOGGING)
 
+# This can be used to turn on various non-default items in the
+# performance_schema that are of interest to us
+MYSQL_GATHER_PERFORMANCE=$(trueorfalse True MYSQL_GATHER_PERFORMANCE)
+
+# This can be used to reduce the amount of memory mysqld uses while running.
+# These are unscientifically determined, and could reduce performance or
+# cause other issues.
+MYSQL_REDUCE_MEMORY=$(trueorfalse True MYSQL_REDUCE_MEMORY)
+
 # Set a timeout for git operations.  If git is still running when the
 # timeout expires, the command will be retried up to 3 times.  This is
 # in the format for timeout(1);
@@ -235,7 +249,7 @@
 # Setting the variable to 'ALL' will activate the download for all
 # libraries.
 
-DEVSTACK_SERIES="zed"
+DEVSTACK_SERIES="2024.2"
 
 ##############
 #
@@ -290,6 +304,9 @@
 TEMPEST_BRANCH=${TEMPEST_BRANCH:-$BRANCHLESS_TARGET_BRANCH}
 TEMPEST_VENV_UPPER_CONSTRAINTS=${TEMPEST_VENV_UPPER_CONSTRAINTS:-master}
 
+OSTESTIMAGES_REPO=${OSTESTIMAGES_REPO:-${GIT_BASE}/openstack/os-test-images.git}
+OSTESTIMAGES_BRANCH=${OSTESTIMAGES_BRANCH:-$BRANCHLESS_TARGET_BRANCH}
+OSTESTIMAGES_DIR=${DEST}/os-test-images
 
 ##############
 #
@@ -558,28 +575,6 @@
 GITBRANCH["os-ken"]=${OS_KEN_BRANCH:-$TARGET_BRANCH}
 GITDIR["os-ken"]=$DEST/os-ken
 
-##################
-#
-#  TripleO / Heat Agent Components
-#
-##################
-
-# run-parts script required by os-refresh-config
-DIB_UTILS_REPO=${DIB_UTILS_REPO:-${GIT_BASE}/openstack/dib-utils.git}
-DIB_UTILS_BRANCH=${DIB_UTILS_BRANCH:-$BRANCHLESS_TARGET_BRANCH}
-
-# os-apply-config configuration template tool
-OAC_REPO=${OAC_REPO:-${GIT_BASE}/openstack/os-apply-config.git}
-OAC_BRANCH=${OAC_BRANCH:-$TRAILING_TARGET_BRANCH}
-
-# os-collect-config configuration agent
-OCC_REPO=${OCC_REPO:-${GIT_BASE}/openstack/os-collect-config.git}
-OCC_BRANCH=${OCC_BRANCH:-$TRAILING_TARGET_BRANCH}
-
-# os-refresh-config configuration run-parts tool
-ORC_REPO=${ORC_REPO:-${GIT_BASE}/openstack/os-refresh-config.git}
-ORC_BRANCH=${ORC_BRANCH:-$TRAILING_TARGET_BRANCH}
-
 
 #################
 #
@@ -649,20 +644,19 @@
 #     If the file ends in .tar.gz, uncompress the tarball and and select the first
 #     .img file inside it as the image.  If present, use "*-vmlinuz*" as the kernel
 #     and "*-initrd*" as the ramdisk
-#     example: http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-amd64.tar.gz
+#     example: https://cloud-images.ubuntu.com/releases/jammy/release/ubuntu-22.04-server-cloudimg-amd64.tar.gz
 #  * disk image (*.img,*.img.gz)
 #    if file ends in .img, then it will be uploaded and registered as a to
 #    glance as a disk image.  If it ends in .gz, it is uncompressed first.
 #    example:
-#      http://cloud-images.ubuntu.com/releases/precise/release/ubuntu-12.04-server-cloudimg-armel-disk1.img
-#      http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz
+#      https://cloud-images.ubuntu.com/releases/jammy/release/ubuntu-22.04-server-cloudimg-amd64.img
+#      https://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz
 #  * OpenVZ image:
 #    OpenVZ uses its own format of image, and does not support UEC style images
 
-#IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image
-#IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image
+#IMAGE_URLS="https://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image
 
-CIRROS_VERSION=${CIRROS_VERSION:-"0.5.2"}
+CIRROS_VERSION=${CIRROS_VERSION:-"0.6.2"}
 CIRROS_ARCH=${CIRROS_ARCH:-$(uname -m)}
 
 # Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of
@@ -679,11 +673,11 @@
                 lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc
                     DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs}
                     DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz}
-                    IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";;
+                    IMAGE_URLS+="https://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";;
                 *) # otherwise, use the qcow image
                     DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk}
                     DEFAULT_IMAGE_FILE_NAME=${DEFAULT_IMAGE_FILE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img}
-                    IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";;
+                    IMAGE_URLS+="https://download.cirros-cloud.net/${CIRROS_VERSION}/${DEFAULT_IMAGE_FILE_NAME}";;
                 esac
             ;;
         vsphere)
@@ -708,11 +702,11 @@
 EXTRA_CACHE_URLS=""
 
 # etcd3 defaults
-ETCD_VERSION=${ETCD_VERSION:-v3.3.12}
-ETCD_SHA256_AMD64=${ETCD_SHA256_AMD64:-"dc5d82df095dae0a2970e4d870b6929590689dd707ae3d33e7b86da0f7f211b6"}
-ETCD_SHA256_ARM64=${ETCD_SHA256_ARM64:-"170b848ac1a071fe7d495d404a868a2c0090750b2944f8a260ef1c6125b2b4f4"}
-ETCD_SHA256_PPC64=${ETCD_SHA256_PPC64:-"77f807b1b51abbf51e020bb05bdb8ce088cb58260fcd22749ea32eee710463d3"}
-# etcd v3.2.x doesn't have anything for s390x
+ETCD_VERSION=${ETCD_VERSION:-v3.4.27}
+ETCD_SHA256_AMD64=${ETCD_SHA256_AMD64:-"a32d21e006252dbc3405b0645ba8468021ed41376974b573285927bf39b39eb9"}
+ETCD_SHA256_ARM64=${ETCD_SHA256_ARM64:-"ed7e257c225b9b9545fac22246b97f4074a4b5109676e92dbaebfb9315b69cc0"}
+ETCD_SHA256_PPC64=${ETCD_SHA256_PPC64:-"eb8825e0bc2cbaf9e55947f5ee373ebc9ca43b6a2ea5ced3b992c81855fff37e"}
+# etcd v3.2.x and later doesn't have anything for s390x
 ETCD_SHA256_S390X=${ETCD_SHA256_S390X:-""}
 # Make sure etcd3 downloads the correct architecture
 if is_arch "x86_64"; then
@@ -792,7 +786,7 @@
 SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT=${SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT:-5}
 
 # Service graceful shutdown timeout
-WORKER_TIMEOUT=${WORKER_TIMEOUT:-90}
+WORKER_TIMEOUT=${WORKER_TIMEOUT:-80}
 
 # Common Configuration
 # --------------------
@@ -869,7 +863,31 @@
 # This is either 127.0.0.1 for IPv4 or ::1 for IPv6
 SERVICE_LOCAL_HOST=${SERVICE_LOCAL_HOST:-${DEF_SERVICE_LOCAL_HOST}}
 
-REGION_NAME=${REGION_NAME:-RegionOne}
+# TUNNEL IP version
+# This is the IP version to use for tunnel endpoints
+TUNNEL_IP_VERSION=${TUNNEL_IP_VERSION:-4}
+
+# Validate TUNNEL_IP_VERSION
+if [[ $TUNNEL_IP_VERSION != "4" ]] && [[ $TUNNEL_IP_VERSION != "6" ]]; then
+    die $LINENO "TUNNEL_IP_VERSION must be either 4 or 6"
+fi
+
+if [[ "$TUNNEL_IP_VERSION" == 4 ]]; then
+    DEF_TUNNEL_ENDPOINT_IP=$HOST_IP
+fi
+
+if [[ "$TUNNEL_IP_VERSION" == 6 ]]; then
+    # Only die if the user has not over-ridden the endpoint IP
+    if [[ "$HOST_IPV6" == "" ]] && [[ "$TUNNEL_ENDPOINT_IP" == "" ]]; then
+        die $LINENO "Could not determine host IPv6 address.  See local.conf for suggestions on setting HOST_IPV6."
+    fi
+
+    DEF_TUNNEL_ENDPOINT_IP=$HOST_IPV6
+fi
+
+# Allow the use of an alternate address for tunnel endpoints.
+# Default is dependent on TUNNEL_IP_VERSION above.
+TUNNEL_ENDPOINT_IP=${TUNNEL_ENDPOINT_IP:-${DEF_TUNNEL_ENDPOINT_IP}}
 
 # Configure services to use syslog instead of writing to individual log files
 SYSLOG=$(trueorfalse False SYSLOG)
diff --git a/tests/test_ini_config.sh b/tests/test_ini_config.sh
index 6ed1647..6367cde 100755
--- a/tests/test_ini_config.sh
+++ b/tests/test_ini_config.sh
@@ -44,6 +44,9 @@
 multi = foo1
 multi = foo2
 
+[fff]
+ampersand =
+
 [key_with_spaces]
 rgw special key = something
 
@@ -85,7 +88,7 @@
 
 # test iniget_sections
 VAL=$(iniget_sections "${TEST_INI}")
-assert_equal "$VAL" "default aaa bbb ccc ddd eee key_with_spaces \
+assert_equal "$VAL" "default aaa bbb ccc ddd eee fff key_with_spaces \
 del_separate_options del_same_option del_missing_option \
 del_missing_option_multi del_no_options"
 
@@ -124,6 +127,13 @@
 VAL=$(iniget ${TEST_INI} bbb handlers)
 assert_equal "$VAL" "33,44" "inset at EOF"
 
+# Test with ampersand in values
+for i in `seq 3`; do
+    iniset ${TEST_INI} fff ampersand '&y'
+done
+VAL=$(iniget ${TEST_INI} fff ampersand)
+assert_equal "$VAL" "&y" "iniset ampersands in option"
+
 # test empty option
 if ini_has_option ${SUDO_ARG} ${TEST_INI} ddd empty; then
     passed "ini_has_option: ddd.empty present"
diff --git a/tests/test_package_ordering.sh b/tests/test_package_ordering.sh
index bfc2a19..f221c82 100755
--- a/tests/test_package_ordering.sh
+++ b/tests/test_package_ordering.sh
@@ -8,7 +8,7 @@
 source $TOP/tests/unittest.sh
 
 export LC_ALL=en_US.UTF-8
-PKG_FILES=$(find $TOP/files/debs $TOP/files/rpms $TOP/files/rpms-suse -type f)
+PKG_FILES=$(find $TOP/files/debs $TOP/files/rpms -type f)
 
 TMPDIR=$(mktemp -d)
 
diff --git a/tools/create-stack-user.sh b/tools/create-stack-user.sh
index 919cacb..cb8d7aa 100755
--- a/tools/create-stack-user.sh
+++ b/tools/create-stack-user.sh
@@ -44,6 +44,15 @@
 if ! getent passwd $STACK_USER >/dev/null; then
     echo "Creating a user called $STACK_USER"
     useradd -g $STACK_USER -s /bin/bash -d $DEST -m $STACK_USER
+    # RHEL based distros create home dir with 700 permissions,
+    # And Ubuntu 21.04+ with 750, i.e missing executable
+    # permission for either group or others
+    # Devstack deploy will have issues with this, fix it by
+    # adding executable permission
+    if [[ $(stat -c '%A' $DEST|grep -o x|wc -l) -lt 3 ]]; then
+        echo "Executable permission missing for $DEST, adding it"
+        chmod +x $DEST
+    fi
 fi
 
 echo "Giving stack user passwordless sudo privileges"
diff --git a/tools/dbcounter/dbcounter.py b/tools/dbcounter/dbcounter.py
new file mode 100644
index 0000000..86e5529
--- /dev/null
+++ b/tools/dbcounter/dbcounter.py
@@ -0,0 +1,121 @@
+import json
+import logging
+import os
+import threading
+import time
+import queue
+
+import sqlalchemy
+from sqlalchemy.engine import CreateEnginePlugin
+from sqlalchemy import event
+
+# https://docs.sqlalchemy.org/en/14/core/connections.html?
+# highlight=createengineplugin#sqlalchemy.engine.CreateEnginePlugin
+
+LOG = logging.getLogger(__name__)
+
+# The theory of operation here is that we register this plugin with
+# sqlalchemy via an entry_point. It gets loaded by virtue of plugin=
+# being in the database connection URL, which gives us an opportunity
+# to hook the engines that get created.
+#
+# We opportunistically spawn a thread, which we feed "hits" to over a
+# queue, and which occasionally writes those hits to a special
+# database called 'stats'. We access that database with the same user,
+# pass, and host as the main connection URL for simplicity.
+
+
+class LogCursorEventsPlugin(CreateEnginePlugin):
+    def __init__(self, url, kwargs):
+        self.db_name = url.database
+        LOG.info('Registered counter for database %s' % self.db_name)
+        new_url = sqlalchemy.engine.URL.create(url.drivername,
+                                               url.username,
+                                               url.password,
+                                               url.host,
+                                               url.port,
+                                               'stats')
+
+        self.engine = sqlalchemy.create_engine(new_url)
+        self.queue = queue.Queue()
+        self.thread = None
+
+    def update_url(self, url):
+        return url.difference_update_query(["dbcounter"])
+
+    def engine_created(self, engine):
+        """Hook the engine creation process.
+
+        This is the plug point for the sqlalchemy plugin. Using
+        plugin=$this in the URL causes this method to be called when
+        the engine is created, giving us a chance to hook it below.
+        """
+        event.listen(engine, "before_cursor_execute", self._log_event)
+
+    def ensure_writer_thread(self):
+        self.thread = threading.Thread(target=self.stat_writer, daemon=True)
+        self.thread.start()
+
+    def _log_event(self, conn, cursor, statement, parameters, context,
+                   executemany):
+        """Queue a "hit" for this operation to be recorded.
+
+        Attepts to determine the operation by the first word of the
+        statement, or 'OTHER' if it cannot be determined.
+        """
+
+        # Start our thread if not running. If we were forked after the
+        # engine was created and this plugin was associated, our
+        # writer thread is gone, so respawn.
+        if not self.thread or not self.thread.is_alive():
+            self.ensure_writer_thread()
+
+        try:
+            op = statement.strip().split(' ', 1)[0] or 'OTHER'
+        except Exception:
+            op = 'OTHER'
+
+        self.queue.put((self.db_name, op))
+
+    def do_incr(self, db, op, count):
+        """Increment the counter for (db,op) by count."""
+
+        query = sqlalchemy.text('INSERT INTO queries (db, op, count) '
+                                '  VALUES (:db, :op, :count) '
+                                '  ON DUPLICATE KEY UPDATE count=count+:count')
+        try:
+            with self.engine.begin() as conn:
+                r = conn.execute(query, {'db': db, 'op': op, 'count': count})
+        except Exception as e:
+            LOG.error('Failed to account for access to database %r: %s',
+                      db, e)
+
+    def stat_writer(self):
+        """Consume messages from the queue and write them in batches.
+
+        This reads "hists" from from a queue fed by _log_event() and
+        writes (db,op)+=count stats to the database after ten seconds
+        of no activity to avoid triggering a write for every SELECT
+        call. Write no less often than every sixty seconds to avoid being
+        starved by constant activity.
+        """
+        LOG.debug('[%i] Writer thread running' % os.getpid())
+        while True:
+            to_write = {}
+            last = time.time()
+            while time.time() - last < 60:
+                try:
+                    item = self.queue.get(timeout=10)
+                    to_write.setdefault(item, 0)
+                    to_write[item] += 1
+                except queue.Empty:
+                    break
+
+            if to_write:
+                LOG.debug('[%i] Writing DB stats %s' % (
+                    os.getpid(),
+                    ','.join(['%s:%s=%i' % (db, op, count)
+                              for (db, op), count in to_write.items()])))
+
+            for (db, op), count in to_write.items():
+                self.do_incr(db, op, count)
diff --git a/tools/dbcounter/pyproject.toml b/tools/dbcounter/pyproject.toml
new file mode 100644
index 0000000..d74d688
--- /dev/null
+++ b/tools/dbcounter/pyproject.toml
@@ -0,0 +1,3 @@
+[build-system]
+requires = ["sqlalchemy", "setuptools>=42"]
+build-backend = "setuptools.build_meta"
\ No newline at end of file
diff --git a/tools/dbcounter/setup.cfg b/tools/dbcounter/setup.cfg
new file mode 100644
index 0000000..12300bf
--- /dev/null
+++ b/tools/dbcounter/setup.cfg
@@ -0,0 +1,14 @@
+[metadata]
+name = dbcounter
+author = Dan Smith
+author_email = dms@danplanet.com
+version = 0.1
+description = A teeny tiny dbcounter plugin for use with devstack
+url = http://github.com/openstack/devstack
+license = Apache
+
+[options]
+py_modules = dbcounter
+entry_points =
+    [sqlalchemy.plugins]
+    dbcounter = dbcounter:LogCursorEventsPlugin
diff --git a/tools/file_tracker.sh b/tools/file_tracker.sh
new file mode 100755
index 0000000..9c31b30
--- /dev/null
+++ b/tools/file_tracker.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+set -o errexit
+
+# time to sleep between checks
+SLEEP_TIME=20
+
+function tracker {
+    echo "Number of open files | Number of open files not in use | Maximum number of files allowed to be opened"
+    while true; do
+        cat /proc/sys/fs/file-nr
+        sleep $SLEEP_TIME
+    done
+}
+
+function usage {
+    echo "Usage: $0 [-x] [-s N]" 1>&2
+    exit 1
+}
+
+while getopts ":s:x" opt; do
+    case $opt in
+        s)
+            SLEEP_TIME=$OPTARG
+            ;;
+        x)
+            set -o xtrace
+            ;;
+        *)
+            usage
+            ;;
+    esac
+done
+
+tracker
diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh
index f24ac40..faea44f 100755
--- a/tools/fixup_stuff.sh
+++ b/tools/fixup_stuff.sh
@@ -36,6 +36,12 @@
     # Disable selinux to avoid configuring to allow Apache access
     # to Horizon files (LP#1175444)
     if selinuxenabled; then
+        #persit selinux config across reboots
+        cat << EOF | sudo tee /etc/selinux/config
+SELINUX=permissive
+SELINUXTYPE=targeted
+EOF
+        # then disable at runtime
         sudo setenforce 0
     fi
 
@@ -90,45 +96,6 @@
     fi
 }
 
-function fixup_suse {
-    if ! is_suse; then
-        return
-    fi
-
-    # Deactivate and disable apparmor profiles in openSUSE and SLE
-    # distros to avoid issues with haproxy and dnsmasq.  In newer
-    # releases, systemctl stop apparmor is actually a no-op, so we
-    # have to use aa-teardown to make sure we've deactivated the
-    # profiles:
-    #
-    # https://www.suse.com/releasenotes/x86_64/SUSE-SLES/15/#fate-325343
-    # https://gitlab.com/apparmor/apparmor/merge_requests/81
-    # https://build.opensuse.org/package/view_file/openSUSE:Leap:15.2/apparmor/apparmor.service?expand=1
-    if sudo systemctl is-active -q apparmor; then
-        sudo systemctl stop apparmor
-    fi
-    if [ -x /usr/sbin/aa-teardown ]; then
-        sudo /usr/sbin/aa-teardown
-    fi
-    if sudo systemctl is-enabled -q apparmor; then
-        sudo systemctl disable apparmor
-    fi
-
-    # Since pip10, pip will refuse to uninstall files from packages
-    # that were created with distutils (rather than more modern
-    # setuptools).  This is because it technically doesn't have a
-    # manifest of what to remove.  However, in most cases, simply
-    # overwriting works.  So this hacks around those packages that
-    # have been dragged in by some other system dependency
-    sudo rm -rf /usr/lib/python3.6/site-packages/ply-*.egg-info
-    sudo rm -rf /usr/lib/python3.6/site-packages/six-*.egg-info
-
-    # Ensure trusted CA certificates are up to date
-    # See https://bugzilla.suse.com/show_bug.cgi?id=1154871
-    # May be removed once a new opensuse-15 image is available in nodepool
-    sudo zypper up -y p11-kit ca-certificates-mozilla
-}
-
 function fixup_ovn_centos {
     if [[ $os_VENDOR != "CentOS" ]]; then
         return
@@ -153,32 +120,7 @@
     sudo rm -rf /usr/lib/python3/dist-packages/simplejson-*.egg-info
 }
 
-function fixup_openeuler {
-    if ! is_openeuler; then
-        return
-    fi
-
-    if is_arch "x86_64"; then
-        arch="x86_64"
-    elif is_arch "aarch64"; then
-        arch="aarch64"
-    fi
-
-    # Some packages' version in openEuler are too old, use the newer ones we
-    # provide in oepkg. (oepkg is an openEuler third part yum repo which is
-    # endorsed by openEuler community)
-    (echo '[openstack-ci]'
-    echo 'name=openstack'
-    echo 'baseurl=https://repo.oepkgs.net/openEuler/rpm/openEuler-20.03-LTS-SP2/budding-openeuler/openstack-master-ci/'$arch'/'
-    echo 'enabled=1'
-    echo 'gpgcheck=0') | sudo tee -a /etc/yum.repos.d/openstack-master.repo > /dev/null
-
-    yum_install liberasurecode-devel
-}
-
 function fixup_all {
     fixup_ubuntu
     fixup_fedora
-    fixup_suse
-    fixup_openeuler
 }
diff --git a/tools/generate-devstack-plugins-list.py b/tools/generate-devstack-plugins-list.py
index 1cacd06..bc28515 100644
--- a/tools/generate-devstack-plugins-list.py
+++ b/tools/generate-devstack-plugins-list.py
@@ -73,8 +73,11 @@
 s = requests.Session()
 # sometimes gitea gives us a 500 error; retry sanely
 #  https://stackoverflow.com/a/35636367
+# We need to disable raise_on_status because if any repo endup with 500 then
+# propose-updates job which run this script will fail.
 retries = Retry(total=3, backoff_factor=1,
-                status_forcelist=[ 500 ])
+                status_forcelist=[ 500 ],
+                raise_on_status=False)
 s.mount('https://', HTTPAdapter(max_retries=retries))
 
 found_plugins = filter(functools.partial(has_devstack_plugin, s), projects)
diff --git a/tools/get-stats.py b/tools/get-stats.py
new file mode 100755
index 0000000..b958af6
--- /dev/null
+++ b/tools/get-stats.py
@@ -0,0 +1,220 @@
+#!/usr/bin/python3
+
+import argparse
+import csv
+import datetime
+import glob
+import itertools
+import json
+import logging
+import os
+import re
+import socket
+import subprocess
+import sys
+
+try:
+    import psutil
+except ImportError:
+    psutil = None
+    print('No psutil, process information will not be included',
+          file=sys.stderr)
+
+try:
+    import pymysql
+except ImportError:
+    pymysql = None
+    print('No pymysql, database information will not be included',
+          file=sys.stderr)
+
+LOG = logging.getLogger('perf')
+
+# https://www.elastic.co/blog/found-crash-elasticsearch#mapping-explosion
+
+
+def tryint(value):
+    try:
+        return int(value)
+    except (ValueError, TypeError):
+        return value
+
+
+def get_service_stats(service):
+    stats = {'MemoryCurrent': 0}
+    output = subprocess.check_output(['/usr/bin/systemctl', 'show', service] +
+                                     ['-p%s' % stat for stat in stats])
+    for line in output.decode().split('\n'):
+        if not line:
+            continue
+        stat, val = line.split('=')
+        stats[stat] = tryint(val)
+
+    return stats
+
+
+def get_services_stats():
+    services = [os.path.basename(s) for s in
+                glob.glob('/etc/systemd/system/devstack@*.service')] + \
+                ['apache2.service']
+    return [dict(service=service, **get_service_stats(service))
+            for service in services]
+
+
+def get_process_stats(proc):
+    cmdline = proc.cmdline()
+    if 'python' in cmdline[0]:
+        cmdline = cmdline[1:]
+    return {'cmd': cmdline[0],
+            'pid': proc.pid,
+            'args': ' '.join(cmdline[1:]),
+            'rss': proc.memory_info().rss}
+
+
+def get_processes_stats(matches):
+    me = os.getpid()
+    procs = psutil.process_iter()
+
+    def proc_matches(proc):
+        return me != proc.pid and any(
+            re.search(match, ' '.join(proc.cmdline()))
+            for match in matches)
+
+    return [
+        get_process_stats(proc)
+        for proc in procs
+        if proc_matches(proc)]
+
+
+def get_db_stats(host, user, passwd):
+    dbs = []
+    try:
+        db = pymysql.connect(host=host, user=user, password=passwd,
+                             database='stats',
+                             cursorclass=pymysql.cursors.DictCursor)
+    except pymysql.err.OperationalError as e:
+        if 'Unknown database' in str(e):
+            print('No stats database; assuming devstack failed',
+                  file=sys.stderr)
+            return []
+        raise
+
+    with db:
+        with db.cursor() as cur:
+            cur.execute('SELECT db,op,count FROM queries')
+            for row in cur:
+                dbs.append({k: tryint(v) for k, v in row.items()})
+    return dbs
+
+
+def get_http_stats_for_log(logfile):
+    stats = {}
+    apache_fields = ('host', 'a', 'b', 'date', 'tz', 'request', 'status',
+                     'length', 'c', 'agent')
+    ignore_agents = ('curl', 'uwsgi', 'nova-status')
+    ignored_services = set()
+    for line in csv.reader(open(logfile), delimiter=' '):
+        fields = dict(zip(apache_fields, line))
+        if len(fields) != len(apache_fields):
+            # Not a combined access log, so we can bail completely
+            return []
+        try:
+            method, url, http = fields['request'].split(' ')
+        except ValueError:
+            method = url = http = ''
+        if 'HTTP' not in http:
+            # Not a combined access log, so we can bail completely
+            return []
+
+        # Tempest's User-Agent is unchanged, but client libraries and
+        # inter-service API calls use proper strings. So assume
+        # 'python-urllib' is tempest so we can tell it apart.
+        if 'python-urllib' in fields['agent'].lower():
+            agent = 'tempest'
+        else:
+            agent = fields['agent'].split(' ')[0]
+            if agent.startswith('python-'):
+                agent = agent.replace('python-', '')
+            if '/' in agent:
+                agent = agent.split('/')[0]
+
+        if agent in ignore_agents:
+            continue
+
+        try:
+            service, rest = url.strip('/').split('/', 1)
+        except ValueError:
+            # Root calls like "GET /identity"
+            service = url.strip('/')
+            rest = ''
+
+        if not service.isalpha():
+            ignored_services.add(service)
+            continue
+
+        method_key = '%s-%s' % (agent, method)
+        try:
+            length = int(fields['length'])
+        except ValueError:
+            LOG.warning('[%s] Failed to parse length %r from line %r' % (
+                logfile, fields['length'], line))
+            length = 0
+        stats.setdefault(service, {'largest': 0})
+        stats[service].setdefault(method_key, 0)
+        stats[service][method_key] += 1
+        stats[service]['largest'] = max(stats[service]['largest'],
+                                        length)
+
+    if ignored_services:
+        LOG.warning('Ignored services: %s' % ','.join(
+            sorted(ignored_services)))
+
+    # Flatten this for ES
+    return [{'service': service, 'log': os.path.basename(logfile),
+             **vals}
+            for service, vals in stats.items()]
+
+
+def get_http_stats(logfiles):
+    return list(itertools.chain.from_iterable(get_http_stats_for_log(log)
+                                              for log in logfiles))
+
+
+def get_report_info():
+    return {
+        'timestamp': datetime.datetime.now().isoformat(),
+        'hostname': socket.gethostname(),
+        'version': 2,
+    }
+
+
+if __name__ == '__main__':
+    process_defaults = ['privsep', 'mysqld', 'erlang', 'etcd']
+    parser = argparse.ArgumentParser()
+    parser.add_argument('--db-user', default='root',
+                        help=('MySQL user for collecting stats '
+                              '(default: "root")'))
+    parser.add_argument('--db-pass', default=None,
+                        help='MySQL password for db-user')
+    parser.add_argument('--db-host', default='localhost',
+                        help='MySQL hostname')
+    parser.add_argument('--apache-log', action='append', default=[],
+                        help='Collect API call stats from this apache log')
+    parser.add_argument('--process', action='append',
+                        default=process_defaults,
+                        help=('Include process stats for this cmdline regex '
+                              '(default is %s)' % ','.join(process_defaults)))
+    args = parser.parse_args()
+
+    logging.basicConfig(level=logging.WARNING)
+
+    data = {
+        'services': get_services_stats(),
+        'db': pymysql and args.db_pass and get_db_stats(args.db_host,
+                                                        args.db_user,
+                                                        args.db_pass) or [],
+        'processes': psutil and get_processes_stats(args.process) or [],
+        'api': get_http_stats(args.apache_log),
+        'report': get_report_info(),
+    }
+
+    print(json.dumps(data, indent=2))
diff --git a/tools/install_pip.sh b/tools/install_pip.sh
index e9c52ea..91b180c 100755
--- a/tools/install_pip.sh
+++ b/tools/install_pip.sh
@@ -139,15 +139,18 @@
     # recent enough anyway.  This is included via rpms/general
     : # Simply fall through
 elif is_ubuntu; then
-    : # pip on Ubuntu 20.04 is new enough, too
+    # pip on Ubuntu 20.04 and higher is new enough, too
+    # drop setuptools from u-c
+    sed -i -e '/setuptools/d' $REQUIREMENTS_DIR/upper-constraints.txt
 else
     install_get_pip
+
+    # Note setuptools is part of requirements.txt and we want to make sure
+    # we obey any versioning as described there.
+    pip_install_gr setuptools
 fi
 
 set -x
 
-# Note setuptools is part of requirements.txt and we want to make sure
-# we obey any versioning as described there.
-pip_install_gr setuptools
 
 get_versions
diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh
index a7c03d2..bb470b2 100755
--- a/tools/install_prereqs.sh
+++ b/tools/install_prereqs.sh
@@ -74,13 +74,13 @@
 if [[ -n "$SYSLOG" && "$SYSLOG" != "False" ]]; then
     if is_ubuntu || is_fedora; then
         install_package rsyslog-relp
-    elif is_suse; then
-        install_package rsyslog-module-relp
     else
         exit_distro_not_supported "rsyslog-relp installation"
     fi
 fi
 
+# TODO(clarkb) remove these once we are switched to global venv by default
+export PYTHON=$(which python${PYTHON3_VERSION} 2>/dev/null || which python3 2>/dev/null)
 
 # Mark end of run
 # ---------------
diff --git a/tools/memory_tracker.sh b/tools/memory_tracker.sh
index 6c36534..2f404c2 100755
--- a/tools/memory_tracker.sh
+++ b/tools/memory_tracker.sh
@@ -14,7 +14,12 @@
 
 set -o errexit
 
-PYTHON=${PYTHON:-python3}
+# TODO(frickler): make this use stackrc variables
+if [ -x /opt/stack/data/venv/bin/python ]; then
+    PYTHON=/opt/stack/data/venv/bin/python
+else
+    PYTHON=${PYTHON:-python3}
+fi
 
 # time to sleep between checks
 SLEEP_TIME=20
diff --git a/tools/ping_neutron.sh b/tools/ping_neutron.sh
index 73fe3f3..ab8e8df 100755
--- a/tools/ping_neutron.sh
+++ b/tools/ping_neutron.sh
@@ -30,7 +30,8 @@
 
 This provides a wrapper to ping neutron guests that are on isolated
 tenant networks that the caller can't normally reach. It does so by
-creating a network namespace probe.
+using either the DHCP or Metadata network namespace to support both
+ML2/OVS and OVN.
 
 It takes arguments like ping, except the first arg must be the network
 name.
@@ -44,6 +45,12 @@
     exit 1
 }
 
+# BUG: with duplicate network names, this fails pretty hard since it
+# will just pick the first match.
+function _get_net_id {
+    openstack --os-cloud devstack-admin --os-region-name="$REGION_NAME" --os-project-name admin --os-username admin --os-password $ADMIN_PASSWORD network list | grep $1 | head -n 1 | awk '{print $2}'
+}
+
 NET_NAME=$1
 
 if [[ -z "$NET_NAME" ]]; then
@@ -53,12 +60,11 @@
 
 REMAINING_ARGS="${@:2}"
 
-# BUG: with duplicate network names, this fails pretty hard.
-NET_ID=$(openstack network show -f value -c id "$NET_NAME")
-PROBE_ID=$(neutron-debug probe-list -c id -c network_id | grep "$NET_ID" | awk '{print $2}' | head -n 1)
+NET_ID=`_get_net_id $NET_NAME`
+NET_NS=$(ip netns list | grep "$NET_ID" | head -n 1)
 
 # This runs a command inside the specific netns
-NET_NS_CMD="ip netns exec qprobe-$PROBE_ID"
+NET_NS_CMD="ip netns exec $NET_NS"
 
 PING_CMD="sudo $NET_NS_CMD ping $REMAINING_ARGS"
 echo "Running $PING_CMD"
diff --git a/tools/verify-ipv6-only-deployments.sh b/tools/verify-ipv6-only-deployments.sh
index 2596395..0f0cba8 100755
--- a/tools/verify-ipv6-only-deployments.sh
+++ b/tools/verify-ipv6-only-deployments.sh
@@ -23,32 +23,43 @@
     _service_listen_address=$(echo $SERVICE_LISTEN_ADDRESS | tr -d [])
     local _service_local_host=''
     _service_local_host=$(echo $SERVICE_LOCAL_HOST | tr -d [])
+    local _tunnel_endpoint_ip=''
+    _tunnel_endpoint_ip=$(echo $TUNNEL_ENDPOINT_IP | tr -d [])
     if [[ "$SERVICE_IP_VERSION" != 6 ]]; then
         echo $SERVICE_IP_VERSION "SERVICE_IP_VERSION is not set to 6 which is must for devstack to deploy services with IPv6 address."
         exit 1
     fi
+    if [[ "$TUNNEL_IP_VERSION" != 6 ]]; then
+        echo $TUNNEL_IP_VERSION "TUNNEL_IP_VERSION is not set to 6 so TUNNEL_ENDPOINT_IP cannot be an IPv6 address."
+        exit 1
+    fi
     is_service_host_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_host'"))')
     if [[ "$is_service_host_ipv6" != "True" ]]; then
-        echo $SERVICE_HOST "SERVICE_HOST is not ipv6 which means devstack cannot deploy services on IPv6 address."
+        echo $SERVICE_HOST "SERVICE_HOST is not IPv6 which means devstack cannot deploy services on IPv6 addresses."
         exit 1
     fi
     is_host_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_host_ipv6'"))')
     if [[ "$is_host_ipv6" != "True" ]]; then
-        echo $HOST_IPV6 "HOST_IPV6 is not ipv6 which means devstack cannot deploy services on IPv6 address."
+        echo $HOST_IPV6 "HOST_IPV6 is not IPv6 which means devstack cannot deploy services on IPv6 addresses."
         exit 1
     fi
     is_service_listen_address=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_listen_address'"))')
     if [[ "$is_service_listen_address" != "True" ]]; then
-        echo $SERVICE_LISTEN_ADDRESS "SERVICE_LISTEN_ADDRESS is not ipv6 which means devstack cannot deploy services on IPv6 address."
+        echo $SERVICE_LISTEN_ADDRESS "SERVICE_LISTEN_ADDRESS is not IPv6 which means devstack cannot deploy services on IPv6 addresses."
         exit 1
     fi
     is_service_local_host=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_service_local_host'"))')
     if [[ "$is_service_local_host" != "True" ]]; then
-        echo $SERVICE_LOCAL_HOST "SERVICE_LOCAL_HOST is not ipv6 which means devstack cannot deploy services on IPv6 address."
+        echo $SERVICE_LOCAL_HOST "SERVICE_LOCAL_HOST is not IPv6 which means devstack cannot deploy services on IPv6 addresses."
+        exit 1
+    fi
+    is_tunnel_endpoint_ip=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$_tunnel_endpoint_ip'"))')
+    if [[ "$is_tunnel_endpoint_ip" != "True" ]]; then
+        echo $TUNNEL_ENDPOINT_IP "TUNNEL_ENDPOINT_IP is not IPv6 which means devstack will not deploy with an IPv6 endpoint address."
         exit 1
     fi
     echo "Devstack is properly configured with IPv6"
-    echo "SERVICE_IP_VERSION: " $SERVICE_IP_VERSION "HOST_IPV6: " $HOST_IPV6 "SERVICE_HOST: " $SERVICE_HOST "SERVICE_LISTEN_ADDRESS: " $SERVICE_LISTEN_ADDRESS "SERVICE_LOCAL_HOST: " $SERVICE_LOCAL_HOST
+    echo "SERVICE_IP_VERSION:" $SERVICE_IP_VERSION "HOST_IPV6:" $HOST_IPV6 "SERVICE_HOST:" $SERVICE_HOST "SERVICE_LISTEN_ADDRESS:" $SERVICE_LISTEN_ADDRESS "SERVICE_LOCAL_HOST:" $SERVICE_LOCAL_HOST "TUNNEL_IP_VERSION:" $TUNNEL_IP_VERSION "TUNNEL_ENDPOINT_IP:" $TUNNEL_ENDPOINT_IP
 }
 
 function sanity_check_system_ipv6_enabled {
@@ -72,7 +83,7 @@
         is_endpoint_ipv6=$(python3 -c 'import oslo_utils.netutils as nutils; print(nutils.is_valid_ipv6("'$endpoint_address'"))')
         if [[ "$is_endpoint_ipv6" != "True" ]]; then
             all_ipv6=False
-            echo $endpoint ": This is not ipv6 endpoint which means corresponding service is not listening on IPv6 address."
+            echo $endpoint ": This is not an IPv6 endpoint which means corresponding service is not listening on an IPv6 address."
             continue
         fi
         endpoints_verified=True
@@ -80,7 +91,7 @@
     if [[ "$all_ipv6" == "False"  ]] || [[ "$endpoints_verified" == "False" ]]; then
         exit 1
     fi
-    echo "All services deployed by devstack is on IPv6 endpoints"
+    echo "All services deployed by devstack are on IPv6 endpoints"
     echo $endpoints
 }
 
diff --git a/tools/worlddump.py b/tools/worlddump.py
index e292173..edbfa26 100755
--- a/tools/worlddump.py
+++ b/tools/worlddump.py
@@ -19,7 +19,6 @@
 
 import argparse
 import datetime
-from distutils import spawn
 import fnmatch
 import io
 import os
@@ -52,7 +51,7 @@
 
 
 def filename(dirname, name=""):
-    now = datetime.datetime.utcnow()
+    now = datetime.datetime.now(datetime.timezone.utc)
     fmt = "worlddump-%Y-%m-%d-%H%M%S"
     if name:
         fmt += "-" + name
@@ -76,7 +75,7 @@
 
 
 def _find_cmd(cmd):
-    if not spawn.find_executable(cmd):
+    if not shutil.which(cmd):
         print("*** %s not found: skipping" % cmd)
         return False
     return True
diff --git a/tox.ini b/tox.ini
index ec764ab..26cd68c 100644
--- a/tox.ini
+++ b/tox.ini
@@ -12,7 +12,7 @@
 # against devstack, just set BASHATE_INSTALL_PATH=/path/... to your
 # modified bashate tree
 deps =
-   {env:BASHATE_INSTALL_PATH:bashate==2.0.0}
+   {env:BASHATE_INSTALL_PATH:bashate}
 allowlist_externals = bash
 commands = bash -c "find {toxinidir}             \
          -not \( -type d -name .?\* -prune \)    \
diff --git a/unstack.sh b/unstack.sh
index 813f9a8..1b2d8dd 100755
--- a/unstack.sh
+++ b/unstack.sh
@@ -168,10 +168,12 @@
     cleanup_etcd3
 fi
 
-if is_service_enabled dstat; then
-    stop_dstat
+if is_service_enabled openstack-cli-server; then
+    stop_service devstack@openstack-cli-server
 fi
 
+stop_dstat
+
 # NOTE: Cinder automatically installs the lvm2 package, independently of the
 # enabled backends. So if Cinder is enabled, and installed successfully we are
 # sure lvm2 (lvremove, /etc/lvm/lvm.conf, etc.) is here.
@@ -185,4 +187,4 @@
 # Clean any safe.directory items we wrote into the global
 # gitconfig. We can identify the relevant ones by checking that they
 # point to somewhere in our $DEST directory.
-sudo sed -i "/directory=${DEST}/ d" /etc/gitconfig
+sudo sed -i "\+directory = ${DEST}+ d" /etc/gitconfig